diff --git a/.ci/build b/.ci/build index cd41bf180..b1b485595 100755 --- a/.ci/build +++ b/.ci/build @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -set -ex +set -e # For the build step concourse will set the following environment variables: # SOURCE_PATH - path to component repository root directory. diff --git a/Makefile b/Makefile index dbd4046d6..c97ecb8e3 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ # Image URL to use all building/pushing image targets VERSION := $(shell cat VERSION) REGISTRY := eu.gcr.io/gardener-project/gardener -IMAGE_REPOSITORY := $(REGISTRY)/druid +IMAGE_REPOSITORY := $(REGISTRY)/etcd-druid IMAGE_TAG := $(VERSION) BUILD_DIR := build BIN_DIR := bin @@ -84,7 +84,7 @@ docker-push: # download controller-gen if necessary controller-gen: ifeq (, $(shell which controller-gen)) - go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.2.0-beta.2 + go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.2.4 CONTROLLER_GEN=$(shell go env GOPATH)/bin/controller-gen else CONTROLLER_GEN=$(shell which controller-gen) diff --git a/api/v1alpha1/etcd_types.go b/api/v1alpha1/etcd_types.go index bbf3178ae..c723064ab 100644 --- a/api/v1alpha1/etcd_types.go +++ b/api/v1alpha1/etcd_types.go @@ -47,14 +47,14 @@ type StorageProvider string // StoreSpec defines parameters related to ObjectStore persisting backups type StoreSpec struct { - // +required - Container string `json:"container"` // +optional - Prefix *string `json:"prefix,omitempty"` - // +required - Provider StorageProvider `json:"provider"` + Container string `json:"container,omitempty"` // +required - SecretRef corev1.SecretReference `json:"secretRef"` + Prefix *string `json:"prefix"` + // +optional + Provider StorageProvider `json:"provider,omitempty"` + // +optional + SecretRef corev1.SecretReference `json:"secretRef,omitempty"` } // TLSConfig hold the TLS configuration details. @@ -63,6 +63,8 @@ type TLSConfig struct { ServerTLSSecretRef corev1.SecretReference `json:"serverTLSSecretRef"` // +required ClientTLSSecretRef corev1.SecretReference `json:"clientTLSSecretRef"` + // +required + TLSCASecretRef corev1.SecretReference `json:"tlsCASecretRef"` } // BackupSpec defines parametes associated with the full and delta snapshots of etcd @@ -72,9 +74,9 @@ type BackupSpec struct { Port *int `json:"port,omitempty"` // +optional TLS *TLSConfig `json:"tls,omitempty"` - // Version defines the etcd-backup-restore container image version + // Image defines the etcd container image and tag // +required - Version string `json:"version"` + Image string `json:"image"` // Store defines the specification of object store provider for storing backups. // +optional Store *StoreSpec `json:"store,omitempty"` @@ -111,9 +113,9 @@ type EtcdConfig struct { ServerPort *int `json:"serverPort,omitempty"` // +optional ClientPort *int `json:"clientPort,omitempty"` - // Version defines the etcd container image version + // Image defines the etcd container image and tag // +required - Version string `json:"version"` + Image string `json:"image"` // +optional AuthSecretRef *corev1.SecretReference `json:"authSecretRef,omitempty"` // Metrics defines the level of detail for exported metrics of etcd, specify 'extensive' to include histogram metrics. @@ -129,6 +131,10 @@ type EtcdConfig struct { // EtcdSpec defines the desired state of Etcd type EtcdSpec struct { + // selector is a label query over pods that should match the replica count. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector"` // +optional Labels map[string]string `json:"labels,omitempty"` // +optional @@ -146,6 +152,9 @@ type EtcdSpec struct { // StorageCapacity defines the size of persistent volume. // +optional StorageCapacity *resource.Quantity `json:"storageCapacity,omitempty"` + // VolumeClaimTemplate defines the volume claim template to be created + // +required + VolumeClaimTemplate *string `json:"volumeClaimTemplate,omitempty"` } // CrossVersionObjectReference contains enough information to let you identify the referred resource. @@ -278,13 +287,13 @@ type EtcdStatus struct { //LastOperation LastOperation `json:"lastOperation,omitempty"` } +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.labelSelector -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - // Etcd is the Schema for the etcds API type Etcd struct { metav1.TypeMeta `json:",inline"` @@ -294,6 +303,9 @@ type Etcd struct { Status EtcdStatus `json:"status,omitempty"` } +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // +kubebuilder:object:root=true // EtcdList contains a list of Etcd diff --git a/api/v1alpha1/etcd_types_test.go b/api/v1alpha1/etcd_types_test.go index c27842c78..e01300548 100644 --- a/api/v1alpha1/etcd_types_test.go +++ b/api/v1alpha1/etcd_types_test.go @@ -20,9 +20,15 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "golang.org/x/net/context" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + + "context" + "time" + + "k8s.io/apimachinery/pkg/api/resource" + + corev1 "k8s.io/api/core/v1" ) // These tests are written in BDD-style using Ginkgo framework. Refer to @@ -54,11 +60,7 @@ var _ = Describe("Etcd", func() { Name: "foo", Namespace: "default", } - created = &Etcd{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }} + created = getEtcd("foo", "default") By("creating an API obj") Expect(k8sClient.Create(context.TODO(), created)).To(Succeed()) @@ -75,3 +77,116 @@ var _ = Describe("Etcd", func() { }) }) + +func getEtcd(name, namespace string) *Etcd { + clientPort := 2379 + serverPort := 2380 + port := 8080 + garbageCollectionPeriod := metav1.Duration{ + Duration: 43200 * time.Second, + } + deltaSnapshotPeriod := metav1.Duration{ + Duration: 300 * time.Second, + } + snapshotSchedule := "0 */24 * * *" + defragSchedule := "0 */24 * * *" + storageCapacity := resource.MustParse("80Gi") + deltaSnapShotMemLimit := resource.MustParse("100Mi") + quota := resource.MustParse("8Gi") + storageClass := "gardener.cloud-fast" + prefix := "etcd-test" + garbageCollectionPolicy := GarbageCollectionPolicy(GarbageCollectionPolicyExponential) + + tlsConfig := &TLSConfig{ + ClientTLSSecretRef: corev1.SecretReference{ + Name: "etcd-client-tls", + }, + ServerTLSSecretRef: corev1.SecretReference{ + Name: "etcd-server-tls", + }, + TLSCASecretRef: corev1.SecretReference{ + Name: "ca-etcd", + }, + } + + instance := &Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: EtcdSpec{ + Annotations: map[string]string{ + "app": "etcd-statefulset", + "role": "test", + }, + Labels: map[string]string{ + "app": "etcd-statefulset", + "role": "test", + "name": name, + }, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "etcd-statefulset", + "name": name, + }, + }, + Replicas: 1, + StorageClass: &storageClass, + StorageCapacity: &storageCapacity, + + Backup: BackupSpec{ + Image: "eu.gcr.io/gardener-project/gardener/etcdbrctl:0.8.0-dev", + Port: &port, + FullSnapshotSchedule: &snapshotSchedule, + GarbageCollectionPolicy: &garbageCollectionPolicy, + GarbageCollectionPeriod: &garbageCollectionPeriod, + DeltaSnapshotPeriod: &deltaSnapshotPeriod, + DeltaSnapshotMemoryLimit: &deltaSnapShotMemLimit, + + Resources: &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": parseQuantity("500m"), + "memory": parseQuantity("2Gi"), + }, + Requests: corev1.ResourceList{ + "cpu": parseQuantity("23m"), + "memory": parseQuantity("128Mi"), + }, + }, + Store: &StoreSpec{ + SecretRef: corev1.SecretReference{ + Name: "etcd-backup", + }, + Container: "shoot--dev--i308301-1--b3caa", + Provider: StorageProvider("aws"), + Prefix: &prefix, + }, + }, + Etcd: EtcdConfig{ + Quota: "a, + Metrics: Basic, + Image: "quay.io/coreos/etcd:v3.3.13", + DefragmentationSchedule: &defragSchedule, + Resources: &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": parseQuantity("2500m"), + "memory": parseQuantity("4Gi"), + }, + Requests: corev1.ResourceList{ + "cpu": parseQuantity("500m"), + "memory": parseQuantity("1000Mi"), + }, + }, + ClientPort: &clientPort, + ServerPort: &serverPort, + TLS: tlsConfig, + }, + }, + } + return instance +} + +func parseQuantity(q string) resource.Quantity { + val, _ := resource.ParseQuantity(q) + return val +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 447ff0ef0..1fd765857 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -14,12 +14,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -// autogenerated by controller-gen object, do not modify manually +// Code generated by controller-gen. DO NOT EDIT. package v1alpha1 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -197,7 +197,7 @@ func (in *EtcdConfig) DeepCopy() *EtcdConfig { func (in *EtcdList) DeepCopyInto(out *EtcdList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Etcd, len(*in)) @@ -228,6 +228,11 @@ func (in *EtcdList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdSpec) DeepCopyInto(out *EtcdSpec) { *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } if in.Labels != nil { in, out := &in.Labels, &out.Labels *out = make(map[string]string, len(*in)) @@ -254,6 +259,11 @@ func (in *EtcdSpec) DeepCopyInto(out *EtcdSpec) { x := (*in).DeepCopy() *out = &x } + if in.VolumeClaimTemplate != nil { + in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdSpec. @@ -353,6 +363,7 @@ func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { *out = *in out.ServerTLSSecretRef = in.ServerTLSSecretRef out.ClientTLSSecretRef = in.ClientTLSSecretRef + out.TLSCASecretRef = in.TLSCASecretRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. diff --git a/charts/etcd/templates/configmap-etcd-bootstrap.yaml b/charts/etcd/templates/configmap-etcd-bootstrap.yaml index b0caff503..5794d4f60 100644 --- a/charts/etcd/templates/configmap-etcd-bootstrap.yaml +++ b/charts/etcd/templates/configmap-etcd-bootstrap.yaml @@ -122,10 +122,10 @@ data: {{- if .Values.etcd.enableTLS }} client-transport-security: # Path to the client server TLS cert file. - cert-file: /var/etcd/ssl/tls/tls.crt + cert-file: /var/etcd/ssl/server/tls.crt # Path to the client server TLS key file. - key-file: /var/etcd/ssl/tls/tls.key + key-file: /var/etcd/ssl/server/tls.key # Enable client cert authentication. client-cert-auth: true diff --git a/charts/etcd/templates/etcd-statefulset.yaml b/charts/etcd/templates/etcd-statefulset.yaml index 2ced45e47..82c3f9170 100644 --- a/charts/etcd/templates/etcd-statefulset.yaml +++ b/charts/etcd/templates/etcd-statefulset.yaml @@ -1,14 +1,14 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: etcd-sts-{{ printf "%.6s" .Values.uid }} + name: {{ .Values.name }} namespace: {{ .Release.Namespace }} annotations: {{- if .Values.annotations }} {{ toYaml .Values.annotations | indent 4 }} {{- end }} labels: - name: etcd-{{ .Values.uid }} + name: etcd instance: {{ .Values.name }} {{- if .Values.labels }} {{ toYaml .Values.labels | indent 4 }} @@ -27,7 +27,7 @@ spec: replicas: {{ .Values.statefulsetReplicas }} selector: matchLabels: - name: etcd-{{ .Values.uid }} + name: etcd instance: {{ .Values.name }} {{- if .Values.labels }} {{ toYaml .Values.labels | indent 6 }} @@ -41,15 +41,19 @@ spec: {{ toYaml .Values.annotations | indent 8 }} {{- end }} labels: - name: etcd-{{ .Values.uid }} + name: etcd instance: {{ .Values.name }} {{- if .Values.labels }} {{ toYaml .Values.labels | indent 8 }} {{- end }} spec: + hostAliases: + - ip: "127.0.0.1" + hostnames: + - {{ .Values.name }}-local containers: - name: etcd - image: {{ .Values.etcd.imageRepository }}:{{ .Values.etcd.imageVersion }} + image: {{ .Values.etcd.image }} imagePullPolicy: {{ .Values.etcd.pullPolicy }} command: - /var/etcd/bin/bootstrap.sh @@ -70,9 +74,9 @@ spec: - --cert=/var/etcd/ssl/client/tls.crt - --key=/var/etcd/ssl/client/tls.key - --cacert=/var/etcd/ssl/ca/ca.crt - - --endpoints=https://etcd-statefulset-{{ printf "%.6s" .Values.uid }}-0:{{ .Values.etcd.clientPort }} + - --endpoints=https://{{ .Values.name }}-local:{{ .Values.etcd.clientPort }} {{ else }} - - --endpoints=http://etcd-statefulset-{{ printf "%.6s" .Values.uid }}-0:{{ .Values.etcd.clientPort }} + - --endpoints=http://{{ .Values.name }}-local:{{ .Values.etcd.clientPort }} {{ end }} # {{- if and .Values.etcd.username .Values.etcd.password }} # - --user={{ .Values.etcd.username }}:{{ .Values.etcd.password }} @@ -91,7 +95,7 @@ spec: resources: {{ toYaml .Values.etcd.resources | indent 10 }} volumeMounts: - - name: etcd-{{ .Values.name }} + - name: {{ .Values.volumeClaimTemplateName }} mountPath: /var/etcd/data/ - name: etcd-bootstrap-sh mountPath: /var/etcd/bin/ @@ -117,21 +121,21 @@ spec: {{- end }} - --data-dir=/var/etcd/data/new.etcd - --storage-provider={{ .Values.store.storageProvider }} - - --store-prefix={{ .Release.Name }}-etcd + - --store-prefix={{ .Values.store.storePrefix }} {{- if .Values.backup.etcdQuotaBytes }} - --embedded-etcd-quota-bytes={{ int $.Values.backup.etcdQuotaBytes }} {{- end }} {{- if .Values.etcd.enableTLS }} - - --cert=/var/etcd/ssl/tls/tls.crt - - --key=/var/etcd/ssl/tls/tls.key + - --cert=/var/etcd/ssl/client/tls.crt + - --key=/var/etcd/ssl/client/tls.key - --cacert=/var/etcd/ssl/ca/ca.crt - --insecure-transport=false - --insecure-skip-tls-verify=false - - --endpoints=https://etcd-statefulset-{{ printf "%.6s" .Values.uid }}-0:{{ .Values.etcd.clientPort }} + - --endpoints=https://{{ .Values.name }}-local:{{ .Values.etcd.clientPort }} {{ else }} - --insecure-transport=true - --insecure-skip-tls-verify=true - - --endpoints=http://etcd-statefulset-{{ printf "%.6s" .Values.uid }}-0:{{ .Values.etcd.clientPort }} + - --endpoints=http://{{ .Values.name }}-local:{{ .Values.etcd.clientPort }} {{- end }} - --etcd-connection-timeout={{ .Values.backup.etcdConnectionTimeout }} - --delta-snapshot-period={{ int $.Values.backup.deltaSnapshotPeriod }} @@ -142,7 +146,7 @@ spec: # {{- end }} - --garbage-collection-period={{int $.Values.backup.garbageCollectionPeriod }} - --snapstore-temp-directory={{ .Values.backup.snapstoreTempDir }} - image: {{ .Values.backup.imageRepository }}:{{ .Values.backup.imageVersion }} + image: {{ .Values.backup.image }} imagePullPolicy: {{ .Values.backup.pullPolicy }} ports: - containerPort: {{ .Values.backup.port }} @@ -227,7 +231,7 @@ spec: key: "accessKeyID" {{- end }} volumeMounts: - - name: etcd-{{ .Values.name }} + - name: {{ .Values.volumeClaimTemplateName }} mountPath: /var/etcd/data - name: etcd-config-file mountPath: /var/etcd/config/ @@ -265,20 +269,22 @@ spec: secretName: {{ .Values.tlsClientSecret }} - name: ca-etcd secret: - secretName: ca-etcd + secretName: {{ .Values.tlsCASecret }} {{- end }} -{{- if and .Values.store.storageProvider (not (eq .Values.store.storageProvider "Local")) }} +{{- if eq .Values.store.storageProvider "GCS" }} - name: etcd-backup secret: secretName: {{ .Values.store.storeSecret }} {{- end }} volumeClaimTemplates: - metadata: - name: etcd-{{ .Values.name }} + name: {{ .Values.volumeClaimTemplateName }} spec: accessModes: - "ReadWriteOnce" +{{ if .Values.storageClass }} storageClassName: {{ .Values.storageClass }} +{{ end }} resources: requests: storage: {{ .Values.storageCapacity }} diff --git a/charts/etcd/values.yaml b/charts/etcd/values.yaml index 4396102b9..4ac29f6c3 100644 --- a/charts/etcd/values.yaml +++ b/charts/etcd/values.yaml @@ -4,6 +4,7 @@ storageCapacity: 16Gi tlsServerSecret: etcd-server-tls tlsClientSecret: etcd-client-tls +tlsCASecret: ca-etcd podAnnotations: {} etcd: diff --git a/charts/images.yaml b/charts/images.yaml deleted file mode 100644 index 39c21cd95..000000000 --- a/charts/images.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# List of all container images which are deployed by the Etcd-druid. -images: -- name: etcd - sourceRepository: github.com/coreos/etcd - repository: quay.io/coreos/etcd - tag: v3.3.13 -- name: backup-restore - sourceRepository: github.com/gardener/etcd-backup-restore - repository: eu.gcr.io/gardener-project/gardener/etcdbrctl - tag: "0.7.3" \ No newline at end of file diff --git a/config/crd/bases/druid.gardener.cloud_etcds.yaml b/config/crd/bases/druid.gardener.cloud_etcds.yaml index f50f44e22..11bf56d88 100644 --- a/config/crd/bases/druid.gardener.cloud_etcds.yaml +++ b/config/crd/bases/druid.gardener.cloud_etcds.yaml @@ -3,14 +3,24 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.4 creationTimestamp: null name: etcds.druid.gardener.cloud spec: group: druid.gardener.cloud names: kind: Etcd + listKind: EtcdList plural: etcds - scope: "" + singular: etcd + scope: Namespaced + subresources: + scale: + labelSelectorPath: .status.labelSelector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} validation: openAPIV3Schema: description: Etcd is the Schema for the etcds API @@ -18,378 +28,25 @@ spec: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: - properties: - annotations: - additionalProperties: - type: string - description: 'Annotations is an unstructured key value map stored with - a resource that may be set by external tools to store and retrieve - arbitrary metadata. They are not queryable and should be preserved - when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' - type: object - clusterName: - description: The name of the cluster which the object belongs to. This - is used to distinguish resources with same name and namespace in different - clusters. This field is not set anywhere right now and apiserver is - going to ignore it if set in create or update request. - type: string - creationTimestamp: - description: "CreationTimestamp is a timestamp representing the server - time when this object was created. It is not guaranteed to be set - in happens-before order across separate operations. Clients may not - set this value. It is represented in RFC3339 form and is in UTC. \n - Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" - format: date-time - type: string - deletionGracePeriodSeconds: - description: Number of seconds allowed for this object to gracefully - terminate before it will be removed from the system. Only set when - deletionTimestamp is also set. May only be shortened. Read-only. - format: int64 - type: integer - deletionTimestamp: - description: "DeletionTimestamp is RFC 3339 date and time at which this - resource will be deleted. This field is set by the server when a graceful - deletion is requested by the user, and is not directly settable by - a client. The resource is expected to be deleted (no longer visible - from resource lists, and not reachable by name) after the time in - this field, once the finalizers list is empty. As long as the finalizers - list contains items, deletion is blocked. Once the deletionTimestamp - is set, this value may not be unset or be set further into the future, - although it may be shortened or the resource may be deleted prior - to this time. For example, a user may request that a pod is deleted - in 30 seconds. The Kubelet will react by sending a graceful termination - signal to the containers in the pod. After that 30 seconds, the Kubelet - will send a hard termination signal (SIGKILL) to the container and - after cleanup, remove the pod from the API. In the presence of network - partitions, this object may still exist after this timestamp, until - an administrator or automated process can determine the resource is - fully terminated. If not set, graceful deletion of the object has - not been requested. \n Populated by the system when a graceful deletion - is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" - format: date-time - type: string - finalizers: - description: Must be empty before the object is deleted from the registry. - Each entry is an identifier for the responsible component that will - remove the entry from the list. If the deletionTimestamp of the object - is non-nil, entries in this list can only be removed. - items: - type: string - type: array - generateName: - description: "GenerateName is an optional prefix, used by the server, - to generate a unique name ONLY IF the Name field has not been provided. - If this field is used, the name returned to the client will be different - than the name passed. This value will also be combined with a unique - suffix. The provided value has the same validation rules as the Name - field, and may be truncated by the length of the suffix required to - make the value unique on the server. \n If this field is specified - and the generated name exists, the server will NOT return a 409 - - instead, it will either return 201 Created or 500 with Reason ServerTimeout - indicating a unique name could not be found in the time allotted, - and the client should retry (optionally after the time indicated in - the Retry-After header). \n Applied only if Name is not specified. - More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" - type: string - generation: - description: A sequence number representing a specific generation of - the desired state. Populated by the system. Read-only. - format: int64 - type: integer - initializers: - description: "An initializer is a controller which enforces some system - invariant at object creation time. This field is a list of initializers - that have not yet acted on this object. If nil or empty, this object - has been completely initialized. Otherwise, the object is considered - uninitialized and is hidden (in list/watch and get calls) from clients - that haven't explicitly asked to observe uninitialized objects. \n - When an object is created, the system will populate this list with - the current set of initializers. Only privileged users may set or - modify this list. Once it is empty, it may not be modified further - by any user. \n DEPRECATED - initializers are an alpha field and will - be removed in v1.15." - properties: - pending: - description: Pending is a list of initializers that must execute - in order before this object is visible. When the last pending - initializer is removed, and no failing result is set, the initializers - struct will be set to nil and the object is considered as initialized - and visible to all clients. - items: - properties: - name: - description: name of the process that is responsible for initializing - this object. - type: string - required: - - name - type: object - type: array - result: - description: If result is set with the Failure field, the object - will be persisted to storage and then deleted, ensuring that other - clients can observe the deletion. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - code: - description: Suggested HTTP return code for this status, 0 if - not set. - format: int32 - type: integer - details: - description: Extended data associated with the reason. Each - reason may define its own extended details. This field is - optional and the data returned is not guaranteed to conform - to any schema except that defined by the reason type. - properties: - causes: - description: The Causes array includes more details associated - with the StatusReason failure. Not all StatusReasons may - provide detailed causes. - items: - properties: - field: - description: "The field of the resource that has caused - this error, as named by its JSON serialization. - May include dot and postfix notation for nested - attributes. Arrays are zero-indexed. Fields may - appear more than once in an array of causes due - to fields having multiple errors. Optional. \n Examples: - \ \"name\" - the field \"name\" on the current - resource \"items[0].name\" - the field \"name\" - on the first array entry in \"items\"" - type: string - message: - description: A human-readable description of the cause - of the error. This field may be presented as-is - to a reader. - type: string - reason: - description: A machine-readable description of the - cause of the error. If this value is empty there - is no information available. - type: string - type: object - type: array - group: - description: The group attribute of the resource associated - with the status StatusReason. - type: string - kind: - description: 'The kind attribute of the resource associated - with the status StatusReason. On some operations may differ - from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - name: - description: The name attribute of the resource associated - with the status StatusReason (when there is a single name - which can be described). - type: string - retryAfterSeconds: - description: If specified, the time in seconds before the - operation should be retried. Some errors may indicate - the client must take an alternate action - for those errors - this field may indicate how long to wait before taking - the alternate action. - format: int32 - type: integer - uid: - description: 'UID of the resource. (when there is a single - resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids' - type: string - type: object - kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - message: - description: A human-readable description of the status of this - operation. - type: string - metadata: - description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - properties: - continue: - description: continue may be set if the user set a limit - on the number of items returned, and indicates that the - server has more data available. The value is opaque and - may be used to issue another request to the endpoint that - served this list to retrieve the next set of available - objects. Continuing a consistent list may not be possible - if the server configuration has changed or more than a - few minutes have passed. The resourceVersion field returned - when using this continue value will be identical to the - value in the first response, unless you have received - this token from an error message. - type: string - resourceVersion: - description: 'String that identifies the server''s internal - version of this object that can be used by clients to - determine when objects have changed. Value must be treated - as opaque by clients and passed unmodified back to the - server. Populated by the system. Read-only. More info: - https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' - type: string - selfLink: - description: selfLink is a URL representing this object. - Populated by the system. Read-only. - type: string - type: object - reason: - description: A machine-readable description of why this operation - is in the "Failure" status. If this value is empty there is - no information available. A Reason clarifies an HTTP status - code but does not override it. - type: string - status: - description: 'Status of the operation. One of: "Success" or - "Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' - type: string - type: object - required: - - pending - type: object - labels: - additionalProperties: - type: string - description: 'Map of string keys and values that can be used to organize - and categorize (scope and select) objects. May match selectors of - replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' - type: object - managedFields: - description: "ManagedFields maps workflow-id and version to the set - of fields that are managed by that workflow. This is mostly for internal - housekeeping, and users typically shouldn't need to set or understand - this field. A workflow can be the user's name, a controller's name, - or the name of a specific apply path like \"ci-cd\". The set of fields - is always in the version that the workflow used when modifying the - object. \n This field is alpha and can be changed or removed without - notice." - items: - properties: - apiVersion: - description: APIVersion defines the version of this resource that - this field set applies to. The format is "group/version" just - like the top-level APIVersion field. It is necessary to track - the version of a field set because it cannot be automatically - converted. - type: string - fields: - additionalProperties: true - description: Fields identifies a set of fields. - type: object - manager: - description: Manager is an identifier of the workflow managing - these fields. - type: string - operation: - description: Operation is the type of operation which lead to - this ManagedFieldsEntry being created. The only valid values - for this field are 'Apply' and 'Update'. - type: string - time: - description: Time is timestamp of when these fields were set. - It should always be empty if Operation is 'Apply' - format: date-time - type: string - type: object - type: array - name: - description: 'Name must be unique within a namespace. Is required when - creating resources, although some resources may allow a client to - request the generation of an appropriate name automatically. Name - is primarily intended for creation idempotence and configuration definition. - Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' - type: string - namespace: - description: "Namespace defines the space within each name must be unique. - An empty namespace is equivalent to the \"default\" namespace, but - \"default\" is the canonical representation. Not all objects are required - to be scoped to a namespace - the value of this field for those objects - will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: - http://kubernetes.io/docs/user-guide/namespaces" - type: string - ownerReferences: - description: List of objects depended by this object. If ALL objects - in the list have been deleted, this object will be garbage collected. - If this object is managed by a controller, then an entry in this list - will point to this controller, with the controller field set to true. - There cannot be more than one managing controller. - items: - properties: - apiVersion: - description: API version of the referent. - type: string - blockOwnerDeletion: - description: If true, AND if the owner has the "foregroundDeletion" - finalizer, then the owner cannot be deleted from the key-value - store until this reference is removed. Defaults to false. To - set this field, a user needs "delete" permission of the owner, - otherwise 422 (Unprocessable Entity) will be returned. - type: boolean - controller: - description: If true, this reference points to the managing controller. - type: boolean - kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - name: - description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' - type: string - uid: - description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' - type: string - required: - - apiVersion - - kind - - name - - uid - type: object - type: array - resourceVersion: - description: "An opaque value that represents the internal version of - this object that can be used by clients to determine when objects - have changed. May be used for optimistic concurrency, change detection, - and the watch operation on a resource or set of resources. Clients - must treat these values as opaque and passed unmodified back to the - server. They may only be valid for a particular resource or set of - resources. \n Populated by the system. Read-only. Value must be treated - as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency" - type: string - selfLink: - description: SelfLink is a URL representing this object. Populated by - the system. Read-only. - type: string - uid: - description: "UID is the unique in time and space value for this object. - It is typically generated by the server on successful creation of - a resource and is not allowed to change on PUT operations. \n Populated - by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" - type: string type: object spec: + description: EtcdSpec defines the desired state of Etcd properties: annotations: additionalProperties: type: string type: object backup: + description: BackupSpec defines parametes associated with the full and + delta snapshots of etcd properties: deltaSnapshotMemoryLimit: description: DeltaSnapshotMemoryLimit defines the memory limit after @@ -414,6 +71,9 @@ spec: - Exponential - LimitBased type: string + image: + description: Image defines the etcd container image and tag + type: string port: description: Port define the port on which etcd-backup-restore server will exposed. @@ -446,6 +106,8 @@ spec: prefix: type: string provider: + description: StorageProvider defines the type of object store + provider for storing backups. enum: - aws - gcp @@ -454,6 +116,8 @@ spec: - alicloud type: string secretRef: + description: SecretReference represents a Secret Reference. + It has enough information to retrieve secret in any namespace properties: name: description: Name is unique within a namespace to reference @@ -465,13 +129,14 @@ spec: type: string type: object required: - - container - - provider - - secretRef + - prefix type: object tls: + description: TLSConfig hold the TLS configuration details. properties: clientTLSSecretRef: + description: SecretReference represents a Secret Reference. + It has enough information to retrieve secret in any namespace properties: name: description: Name is unique within a namespace to reference @@ -483,6 +148,21 @@ spec: type: string type: object serverTLSSecretRef: + description: SecretReference represents a Secret Reference. + It has enough information to retrieve secret in any namespace + properties: + name: + description: Name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: Namespace defines the space within which the + secret name must be unique. + type: string + type: object + tlsCASecretRef: + description: SecretReference represents a Secret Reference. + It has enough information to retrieve secret in any namespace properties: name: description: Name is unique within a namespace to reference @@ -494,19 +174,19 @@ spec: type: string type: object required: - - serverTLSSecretRef - clientTLSSecretRef + - serverTLSSecretRef + - tlsCASecretRef type: object - version: - description: Version defines the etcd-backup-restore container image - version - type: string required: - - version + - image type: object etcd: + description: EtcdConfig defines parametes associated etcd deployed properties: authSecretRef: + description: SecretReference represents a Secret Reference. It has + enough information to retrieve secret in any namespace properties: name: description: Name is unique within a namespace to reference @@ -523,6 +203,9 @@ spec: description: DefragmentationSchedule defines the cron standard schedule for defragmentation of etcd. type: string + image: + description: Image defines the etcd container image and tag + type: string metrics: description: Metrics defines the level of detail for exported metrics of etcd, specify 'extensive' to include histogram metrics. @@ -555,8 +238,11 @@ spec: serverPort: type: integer tls: + description: TLSConfig hold the TLS configuration details. properties: clientTLSSecretRef: + description: SecretReference represents a Secret Reference. + It has enough information to retrieve secret in any namespace properties: name: description: Name is unique within a namespace to reference @@ -568,6 +254,21 @@ spec: type: string type: object serverTLSSecretRef: + description: SecretReference represents a Secret Reference. + It has enough information to retrieve secret in any namespace + properties: + name: + description: Name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: Namespace defines the space within which the + secret name must be unique. + type: string + type: object + tlsCASecretRef: + description: SecretReference represents a Secret Reference. + It has enough information to retrieve secret in any namespace properties: name: description: Name is unique within a namespace to reference @@ -579,14 +280,12 @@ spec: type: string type: object required: - - serverTLSSecretRef - clientTLSSecretRef + - serverTLSSecretRef + - tlsCASecretRef type: object - version: - description: Version defines the etcd container image version - type: string required: - - version + - image type: object labels: additionalProperties: @@ -594,6 +293,51 @@ spec: type: object replicas: type: integer + selector: + description: 'selector is a label query over pods that should match + the replica count. It must match the pod template''s labels. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors' + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object storageCapacity: description: StorageCapacity defines the size of persistent volume. type: string @@ -601,15 +345,23 @@ spec: description: 'StorageClass defines the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' type: string + volumeClaimTemplate: + description: VolumeClaimTemplate defines the volume claim template to + be created + type: string required: - - etcd - backup + - etcd - replicas + - selector type: object status: + description: EtcdStatus defines the observed state of Etcd properties: conditions: items: + description: Condition holds the information about the state of a + resource. properties: lastTransitionTime: description: Last time the condition transitioned from one status @@ -640,412 +392,27 @@ spec: type: integer endpoints: items: + description: 'Endpoints is a collection of endpoints that implement + the actual service. Example: Name: "mysvc", Subsets: [ { Addresses: + [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], Ports: [{"name": + "a", "port": 8675}, {"name": "b", "port": 309}] }, { Addresses: + [{"ip": "10.10.3.3"}], Ports: [{"name": "a", "port": 93}, + {"name": "b", "port": 76}] }, ]' properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' - properties: - annotations: - additionalProperties: - type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' - type: object - clusterName: - description: The name of the cluster which the object belongs - to. This is used to distinguish resources with same name - and namespace in different clusters. This field is not set - anywhere right now and apiserver is going to ignore it if - set in create or update request. - type: string - creationTimestamp: - description: "CreationTimestamp is a timestamp representing - the server time when this object was created. It is not - guaranteed to be set in happens-before order across separate - operations. Clients may not set this value. It is represented - in RFC3339 form and is in UTC. \n Populated by the system. - Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" - format: date-time - type: string - deletionGracePeriodSeconds: - description: Number of seconds allowed for this object to - gracefully terminate before it will be removed from the - system. Only set when deletionTimestamp is also set. May - only be shortened. Read-only. - format: int64 - type: integer - deletionTimestamp: - description: "DeletionTimestamp is RFC 3339 date and time - at which this resource will be deleted. This field is set - by the server when a graceful deletion is requested by the - user, and is not directly settable by a client. The resource - is expected to be deleted (no longer visible from resource - lists, and not reachable by name) after the time in this - field, once the finalizers list is empty. As long as the - finalizers list contains items, deletion is blocked. Once - the deletionTimestamp is set, this value may not be unset - or be set further into the future, although it may be shortened - or the resource may be deleted prior to this time. For example, - a user may request that a pod is deleted in 30 seconds. - The Kubelet will react by sending a graceful termination - signal to the containers in the pod. After that 30 seconds, - the Kubelet will send a hard termination signal (SIGKILL) - to the container and after cleanup, remove the pod from - the API. In the presence of network partitions, this object - may still exist after this timestamp, until an administrator - or automated process can determine the resource is fully - terminated. If not set, graceful deletion of the object - has not been requested. \n Populated by the system when - a graceful deletion is requested. Read-only. More info: - https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" - format: date-time - type: string - finalizers: - description: Must be empty before the object is deleted from - the registry. Each entry is an identifier for the responsible - component that will remove the entry from the list. If the - deletionTimestamp of the object is non-nil, entries in this - list can only be removed. - items: - type: string - type: array - generateName: - description: "GenerateName is an optional prefix, used by - the server, to generate a unique name ONLY IF the Name field - has not been provided. If this field is used, the name returned - to the client will be different than the name passed. This - value will also be combined with a unique suffix. The provided - value has the same validation rules as the Name field, and - may be truncated by the length of the suffix required to - make the value unique on the server. \n If this field is - specified and the generated name exists, the server will - NOT return a 409 - instead, it will either return 201 Created - or 500 with Reason ServerTimeout indicating a unique name - could not be found in the time allotted, and the client - should retry (optionally after the time indicated in the - Retry-After header). \n Applied only if Name is not specified. - More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" - type: string - generation: - description: A sequence number representing a specific generation - of the desired state. Populated by the system. Read-only. - format: int64 - type: integer - initializers: - description: "An initializer is a controller which enforces - some system invariant at object creation time. This field - is a list of initializers that have not yet acted on this - object. If nil or empty, this object has been completely - initialized. Otherwise, the object is considered uninitialized - and is hidden (in list/watch and get calls) from clients - that haven't explicitly asked to observe uninitialized objects. - \n When an object is created, the system will populate this - list with the current set of initializers. Only privileged - users may set or modify this list. Once it is empty, it - may not be modified further by any user. \n DEPRECATED - - initializers are an alpha field and will be removed in v1.15." - properties: - pending: - description: Pending is a list of initializers that must - execute in order before this object is visible. When - the last pending initializer is removed, and no failing - result is set, the initializers struct will be set to - nil and the object is considered as initialized and - visible to all clients. - items: - properties: - name: - description: name of the process that is responsible - for initializing this object. - type: string - required: - - name - type: object - type: array - result: - description: If result is set with the Failure field, - the object will be persisted to storage and then deleted, - ensuring that other clients can observe the deletion. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema - of this representation of an object. Servers should - convert recognized schemas to the latest internal - value, and may reject unrecognized values. More - info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - code: - description: Suggested HTTP return code for this status, - 0 if not set. - format: int32 - type: integer - details: - description: Extended data associated with the reason. Each - reason may define its own extended details. This - field is optional and the data returned is not guaranteed - to conform to any schema except that defined by - the reason type. - properties: - causes: - description: The Causes array includes more details - associated with the StatusReason failure. Not - all StatusReasons may provide detailed causes. - items: - properties: - field: - description: "The field of the resource - that has caused this error, as named by - its JSON serialization. May include dot - and postfix notation for nested attributes. - Arrays are zero-indexed. Fields may appear - more than once in an array of causes due - to fields having multiple errors. Optional. - \n Examples: \"name\" - the field \"name\" - on the current resource \"items[0].name\" - - the field \"name\" on the first array - entry in \"items\"" - type: string - message: - description: A human-readable description - of the cause of the error. This field - may be presented as-is to a reader. - type: string - reason: - description: A machine-readable description - of the cause of the error. If this value - is empty there is no information available. - type: string - type: object - type: array - group: - description: The group attribute of the resource - associated with the status StatusReason. - type: string - kind: - description: 'The kind attribute of the resource - associated with the status StatusReason. On - some operations may differ from the requested - resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - name: - description: The name attribute of the resource - associated with the status StatusReason (when - there is a single name which can be described). - type: string - retryAfterSeconds: - description: If specified, the time in seconds - before the operation should be retried. Some - errors may indicate the client must take an - alternate action - for those errors this field - may indicate how long to wait before taking - the alternate action. - format: int32 - type: integer - uid: - description: 'UID of the resource. (when there - is a single resource which can be described). - More info: http://kubernetes.io/docs/user-guide/identifiers#uids' - type: string - type: object - kind: - description: 'Kind is a string value representing - the REST resource this object represents. Servers - may infer this from the endpoint the client submits - requests to. Cannot be updated. In CamelCase. More - info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - message: - description: A human-readable description of the status - of this operation. - type: string - metadata: - description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - properties: - continue: - description: continue may be set if the user set - a limit on the number of items returned, and - indicates that the server has more data available. - The value is opaque and may be used to issue - another request to the endpoint that served - this list to retrieve the next set of available - objects. Continuing a consistent list may not - be possible if the server configuration has - changed or more than a few minutes have passed. - The resourceVersion field returned when using - this continue value will be identical to the - value in the first response, unless you have - received this token from an error message. - type: string - resourceVersion: - description: 'String that identifies the server''s - internal version of this object that can be - used by clients to determine when objects have - changed. Value must be treated as opaque by - clients and passed unmodified back to the server. - Populated by the system. Read-only. More info: - https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' - type: string - selfLink: - description: selfLink is a URL representing this - object. Populated by the system. Read-only. - type: string - type: object - reason: - description: A machine-readable description of why - this operation is in the "Failure" status. If this - value is empty there is no information available. - A Reason clarifies an HTTP status code but does - not override it. - type: string - status: - description: 'Status of the operation. One of: "Success" - or "Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' - type: string - type: object - required: - - pending - type: object - labels: - additionalProperties: - type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' - type: object - managedFields: - description: "ManagedFields maps workflow-id and version to - the set of fields that are managed by that workflow. This - is mostly for internal housekeeping, and users typically - shouldn't need to set or understand this field. A workflow - can be the user's name, a controller's name, or the name - of a specific apply path like \"ci-cd\". The set of fields - is always in the version that the workflow used when modifying - the object. \n This field is alpha and can be changed or - removed without notice." - items: - properties: - apiVersion: - description: APIVersion defines the version of this - resource that this field set applies to. The format - is "group/version" just like the top-level APIVersion - field. It is necessary to track the version of a field - set because it cannot be automatically converted. - type: string - fields: - additionalProperties: true - description: Fields identifies a set of fields. - type: object - manager: - description: Manager is an identifier of the workflow - managing these fields. - type: string - operation: - description: Operation is the type of operation which - lead to this ManagedFieldsEntry being created. The - only valid values for this field are 'Apply' and 'Update'. - type: string - time: - description: Time is timestamp of when these fields - were set. It should always be empty if Operation is - 'Apply' - format: date-time - type: string - type: object - type: array - name: - description: 'Name must be unique within a namespace. Is required - when creating resources, although some resources may allow - a client to request the generation of an appropriate name - automatically. Name is primarily intended for creation idempotence - and configuration definition. Cannot be updated. More info: - http://kubernetes.io/docs/user-guide/identifiers#names' - type: string - namespace: - description: "Namespace defines the space within each name - must be unique. An empty namespace is equivalent to the - \"default\" namespace, but \"default\" is the canonical - representation. Not all objects are required to be scoped - to a namespace - the value of this field for those objects - will be empty. \n Must be a DNS_LABEL. Cannot be updated. - More info: http://kubernetes.io/docs/user-guide/namespaces" - type: string - ownerReferences: - description: List of objects depended by this object. If ALL - objects in the list have been deleted, this object will - be garbage collected. If this object is managed by a controller, - then an entry in this list will point to this controller, - with the controller field set to true. There cannot be more - than one managing controller. - items: - properties: - apiVersion: - description: API version of the referent. - type: string - blockOwnerDeletion: - description: If true, AND if the owner has the "foregroundDeletion" - finalizer, then the owner cannot be deleted from the - key-value store until this reference is removed. Defaults - to false. To set this field, a user needs "delete" - permission of the owner, otherwise 422 (Unprocessable - Entity) will be returned. - type: boolean - controller: - description: If true, this reference points to the managing - controller. - type: boolean - kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - name: - description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' - type: string - uid: - description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' - type: string - required: - - apiVersion - - kind - - name - - uid - type: object - type: array - resourceVersion: - description: "An opaque value that represents the internal - version of this object that can be used by clients to determine - when objects have changed. May be used for optimistic concurrency, - change detection, and the watch operation on a resource - or set of resources. Clients must treat these values as - opaque and passed unmodified back to the server. They may - only be valid for a particular resource or set of resources. - \n Populated by the system. Read-only. Value must be treated - as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency" - type: string - selfLink: - description: SelfLink is a URL representing this object. Populated - by the system. Read-only. - type: string - uid: - description: "UID is the unique in time and space value for - this object. It is typically generated by the server on - successful creation of a resource and is not allowed to - change on PUT operations. \n Populated by the system. Read-only. - More info: http://kubernetes.io/docs/user-guide/identifiers#uids" - type: string + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' type: object subsets: description: The set of all endpoints is the union of all subsets. @@ -1057,12 +424,21 @@ spec: Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service. items: + description: 'EndpointSubset is a group of addresses with a + common set of ports. The expanded set of endpoints is the + Cartesian product of Addresses x Ports. For example, given: { Addresses: + [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], Ports: [{"name": + "a", "port": 8675}, {"name": "b", "port": 309}] } The resulting + set of endpoints can be viewed as: a: [ 10.10.1.1:8675, + 10.10.2.2:8675 ], b: [ 10.10.1.1:309, 10.10.2.2:309 ]' properties: addresses: description: IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize. items: + description: EndpointAddress is a tuple that describes + single IP address. properties: hostname: description: The Hostname of this endpoint @@ -1106,7 +482,7 @@ spec: type: string kind: description: 'Kind of the referent. More info: - https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: description: 'Name of the referent. More info: @@ -1118,7 +494,7 @@ spec: type: string resourceVersion: description: 'Specific resourceVersion to which - this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' type: string uid: description: 'UID of the referent. More info: @@ -1135,6 +511,8 @@ spec: not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check. items: + description: EndpointAddress is a tuple that describes + single IP address. properties: hostname: description: The Hostname of this endpoint @@ -1178,7 +556,7 @@ spec: type: string kind: description: 'Kind of the referent. More info: - https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: description: 'Name of the referent. More info: @@ -1190,7 +568,7 @@ spec: type: string resourceVersion: description: 'Specific resourceVersion to which - this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' type: string uid: description: 'UID of the referent. More info: @@ -1204,11 +582,14 @@ spec: ports: description: Port numbers available on the related IP addresses. items: + description: EndpointPort is a tuple that describes a + single port. properties: name: - description: The name of this port (corresponds to - ServicePort.Name). Must be a DNS_LABEL. Optional - only if one port is defined. + description: The name of this port. This must match + the 'name' field in the corresponding ServicePort. + Must be a DNS_LABEL. Optional only if one port is + defined. type: string port: description: The port number of the endpoint. @@ -1227,6 +608,8 @@ spec: type: object type: array etcd: + description: CrossVersionObjectReference contains enough information + to let you identify the referred resource. properties: apiVersion: description: API version of the referent @@ -1246,6 +629,8 @@ spec: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. properties: key: description: key is the label key that the selector applies @@ -1297,6 +682,7 @@ spec: type: integer type: object type: object + version: v1alpha1 versions: - name: v1alpha1 served: true diff --git a/config/crd/druid.gardener.cloud_etcds.yaml b/config/crd/druid.gardener.cloud_etcds.yaml deleted file mode 100644 index aa881e387..000000000 --- a/config/crd/druid.gardener.cloud_etcds.yaml +++ /dev/null @@ -1,1325 +0,0 @@ - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - creationTimestamp: null - name: etcds.druid.gardener.cloud -spec: - group: druid.gardener.cloud - names: - kind: Etcd - plural: etcds - scope: "" - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: Etcd is the Schema for the etcds API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - properties: - annotations: - additionalProperties: - type: string - description: 'Annotations is an unstructured key value map stored - with a resource that may be set by external tools to store and retrieve - arbitrary metadata. They are not queryable and should be preserved - when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' - type: object - clusterName: - description: The name of the cluster which the object belongs to. - This is used to distinguish resources with same name and namespace - in different clusters. This field is not set anywhere right now - and apiserver is going to ignore it if set in create or update request. - type: string - creationTimestamp: - description: "CreationTimestamp is a timestamp representing the server - time when this object was created. It is not guaranteed to be set - in happens-before order across separate operations. Clients may - not set this value. It is represented in RFC3339 form and is in - UTC. \n Populated by the system. Read-only. Null for lists. More - info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" - format: date-time - type: string - deletionGracePeriodSeconds: - description: Number of seconds allowed for this object to gracefully - terminate before it will be removed from the system. Only set when - deletionTimestamp is also set. May only be shortened. Read-only. - format: int64 - type: integer - deletionTimestamp: - description: "DeletionTimestamp is RFC 3339 date and time at which - this resource will be deleted. This field is set by the server when - a graceful deletion is requested by the user, and is not directly - settable by a client. The resource is expected to be deleted (no - longer visible from resource lists, and not reachable by name) after - the time in this field, once the finalizers list is empty. As long - as the finalizers list contains items, deletion is blocked. Once - the deletionTimestamp is set, this value may not be unset or be - set further into the future, although it may be shortened or the - resource may be deleted prior to this time. For example, a user - may request that a pod is deleted in 30 seconds. The Kubelet will - react by sending a graceful termination signal to the containers - in the pod. After that 30 seconds, the Kubelet will send a hard - termination signal (SIGKILL) to the container and after cleanup, - remove the pod from the API. In the presence of network partitions, - this object may still exist after this timestamp, until an administrator - or automated process can determine the resource is fully terminated. - If not set, graceful deletion of the object has not been requested. - \n Populated by the system when a graceful deletion is requested. - Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" - format: date-time - type: string - finalizers: - description: Must be empty before the object is deleted from the registry. - Each entry is an identifier for the responsible component that will - remove the entry from the list. If the deletionTimestamp of the - object is non-nil, entries in this list can only be removed. - items: - type: string - type: array - generateName: - description: "GenerateName is an optional prefix, used by the server, - to generate a unique name ONLY IF the Name field has not been provided. - If this field is used, the name returned to the client will be different - than the name passed. This value will also be combined with a unique - suffix. The provided value has the same validation rules as the - Name field, and may be truncated by the length of the suffix required - to make the value unique on the server. \n If this field is specified - and the generated name exists, the server will NOT return a 409 - - instead, it will either return 201 Created or 500 with Reason - ServerTimeout indicating a unique name could not be found in the - time allotted, and the client should retry (optionally after the - time indicated in the Retry-After header). \n Applied only if Name - is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" - type: string - generation: - description: A sequence number representing a specific generation - of the desired state. Populated by the system. Read-only. - format: int64 - type: integer - initializers: - description: "An initializer is a controller which enforces some system - invariant at object creation time. This field is a list of initializers - that have not yet acted on this object. If nil or empty, this object - has been completely initialized. Otherwise, the object is considered - uninitialized and is hidden (in list/watch and get calls) from clients - that haven't explicitly asked to observe uninitialized objects. - \n When an object is created, the system will populate this list - with the current set of initializers. Only privileged users may - set or modify this list. Once it is empty, it may not be modified - further by any user. \n DEPRECATED - initializers are an alpha field - and will be removed in v1.15." - properties: - pending: - description: Pending is a list of initializers that must execute - in order before this object is visible. When the last pending - initializer is removed, and no failing result is set, the initializers - struct will be set to nil and the object is considered as initialized - and visible to all clients. - items: - properties: - name: - description: name of the process that is responsible for - initializing this object. - type: string - required: - - name - type: object - type: array - result: - description: If result is set with the Failure field, the object - will be persisted to storage and then deleted, ensuring that - other clients can observe the deletion. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - code: - description: Suggested HTTP return code for this status, 0 - if not set. - format: int32 - type: integer - details: - description: Extended data associated with the reason. Each - reason may define its own extended details. This field is - optional and the data returned is not guaranteed to conform - to any schema except that defined by the reason type. - properties: - causes: - description: The Causes array includes more details associated - with the StatusReason failure. Not all StatusReasons - may provide detailed causes. - items: - properties: - field: - description: "The field of the resource that has - caused this error, as named by its JSON serialization. - May include dot and postfix notation for nested - attributes. Arrays are zero-indexed. Fields may - appear more than once in an array of causes due - to fields having multiple errors. Optional. \n - Examples: \"name\" - the field \"name\" on the - current resource \"items[0].name\" - the field - \"name\" on the first array entry in \"items\"" - type: string - message: - description: A human-readable description of the - cause of the error. This field may be presented - as-is to a reader. - type: string - reason: - description: A machine-readable description of the - cause of the error. If this value is empty there - is no information available. - type: string - type: object - type: array - group: - description: The group attribute of the resource associated - with the status StatusReason. - type: string - kind: - description: 'The kind attribute of the resource associated - with the status StatusReason. On some operations may - differ from the requested resource Kind. More info: - https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - name: - description: The name attribute of the resource associated - with the status StatusReason (when there is a single - name which can be described). - type: string - retryAfterSeconds: - description: If specified, the time in seconds before - the operation should be retried. Some errors may indicate - the client must take an alternate action - for those - errors this field may indicate how long to wait before - taking the alternate action. - format: int32 - type: integer - uid: - description: 'UID of the resource. (when there is a single - resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids' - type: string - type: object - kind: - description: 'Kind is a string value representing the REST - resource this object represents. Servers may infer this - from the endpoint the client submits requests to. Cannot - be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - message: - description: A human-readable description of the status of - this operation. - type: string - metadata: - description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - properties: - continue: - description: continue may be set if the user set a limit - on the number of items returned, and indicates that - the server has more data available. The value is opaque - and may be used to issue another request to the endpoint - that served this list to retrieve the next set of available - objects. Continuing a consistent list may not be possible - if the server configuration has changed or more than - a few minutes have passed. The resourceVersion field - returned when using this continue value will be identical - to the value in the first response, unless you have - received this token from an error message. - type: string - resourceVersion: - description: 'String that identifies the server''s internal - version of this object that can be used by clients to - determine when objects have changed. Value must be treated - as opaque by clients and passed unmodified back to the - server. Populated by the system. Read-only. More info: - https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' - type: string - selfLink: - description: selfLink is a URL representing this object. - Populated by the system. Read-only. - type: string - type: object - reason: - description: A machine-readable description of why this operation - is in the "Failure" status. If this value is empty there - is no information available. A Reason clarifies an HTTP - status code but does not override it. - type: string - status: - description: 'Status of the operation. One of: "Success" or - "Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' - type: string - type: object - required: - - pending - type: object - labels: - additionalProperties: - type: string - description: 'Map of string keys and values that can be used to organize - and categorize (scope and select) objects. May match selectors of - replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' - type: object - managedFields: - description: "ManagedFields maps workflow-id and version to the set - of fields that are managed by that workflow. This is mostly for - internal housekeeping, and users typically shouldn't need to set - or understand this field. A workflow can be the user's name, a controller's - name, or the name of a specific apply path like \"ci-cd\". The set - of fields is always in the version that the workflow used when modifying - the object. \n This field is alpha and can be changed or removed - without notice." - items: - properties: - apiVersion: - description: APIVersion defines the version of this resource - that this field set applies to. The format is "group/version" - just like the top-level APIVersion field. It is necessary - to track the version of a field set because it cannot be automatically - converted. - type: string - fields: - additionalProperties: true - description: Fields identifies a set of fields. - type: object - manager: - description: Manager is an identifier of the workflow managing - these fields. - type: string - operation: - description: Operation is the type of operation which lead to - this ManagedFieldsEntry being created. The only valid values - for this field are 'Apply' and 'Update'. - type: string - time: - description: Time is timestamp of when these fields were set. - It should always be empty if Operation is 'Apply' - format: date-time - type: string - type: object - type: array - name: - description: 'Name must be unique within a namespace. Is required - when creating resources, although some resources may allow a client - to request the generation of an appropriate name automatically. - Name is primarily intended for creation idempotence and configuration - definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' - type: string - namespace: - description: "Namespace defines the space within each name must be - unique. An empty namespace is equivalent to the \"default\" namespace, - but \"default\" is the canonical representation. Not all objects - are required to be scoped to a namespace - the value of this field - for those objects will be empty. \n Must be a DNS_LABEL. Cannot - be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" - type: string - ownerReferences: - description: List of objects depended by this object. If ALL objects - in the list have been deleted, this object will be garbage collected. - If this object is managed by a controller, then an entry in this - list will point to this controller, with the controller field set - to true. There cannot be more than one managing controller. - items: - properties: - apiVersion: - description: API version of the referent. - type: string - blockOwnerDeletion: - description: If true, AND if the owner has the "foregroundDeletion" - finalizer, then the owner cannot be deleted from the key-value - store until this reference is removed. Defaults to false. - To set this field, a user needs "delete" permission of the - owner, otherwise 422 (Unprocessable Entity) will be returned. - type: boolean - controller: - description: If true, this reference points to the managing - controller. - type: boolean - kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - name: - description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' - type: string - uid: - description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' - type: string - required: - - apiVersion - - kind - - name - - uid - type: object - type: array - resourceVersion: - description: "An opaque value that represents the internal version - of this object that can be used by clients to determine when objects - have changed. May be used for optimistic concurrency, change detection, - and the watch operation on a resource or set of resources. Clients - must treat these values as opaque and passed unmodified back to - the server. They may only be valid for a particular resource or - set of resources. \n Populated by the system. Read-only. Value must - be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency" - type: string - selfLink: - description: SelfLink is a URL representing this object. Populated - by the system. Read-only. - type: string - uid: - description: "UID is the unique in time and space value for this object. - It is typically generated by the server on successful creation of - a resource and is not allowed to change on PUT operations. \n Populated - by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" - type: string - type: object - spec: - properties: - annotations: - additionalProperties: - type: string - type: object - backup: - properties: - deltaSnapshotMemoryLimit: - description: DeltaSnapshotMemoryLimit defines the memory limit - after which delta snapshots will be taken - type: string - deltaSnapshotPeriod: - description: DeltaSnapshotPeriod defines the period after which - delta snapshots will be taken - type: string - fullSnapshotSchedule: - description: FullSnapshotSchedule defines the cron standard schedule - for full snapshots. - type: string - garbageCollectionPeriod: - description: GarbageCollectionPeriod defines the period for garbage - collecting old backups - type: string - garbageCollectionPolicy: - description: GarbageCollectionPolicy defines the policy for garbage - collecting old backups - enum: - - Exponential - - LimitBased - type: string - port: - description: Port define the port on which etcd-backup-restore - server will exposed. - type: integer - resources: - description: 'Resources defines the compute Resources required - by backup-restore container. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - type: string - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - store: - description: Store defines the specification of object store provider - for storing backups. - properties: - container: - type: string - prefix: - type: string - provider: - enum: - - aws - - gcp - - os - - azure - - alicloud - type: string - secretRef: - properties: - name: - description: Name is unique within a namespace to reference - a secret resource. - type: string - namespace: - description: Namespace defines the space within which - the secret name must be unique. - type: string - type: object - required: - - container - - provider - - secretRef - type: object - tls: - properties: - clientTLSSecretRef: - properties: - name: - description: Name is unique within a namespace to reference - a secret resource. - type: string - namespace: - description: Namespace defines the space within which - the secret name must be unique. - type: string - type: object - serverTLSSecretRef: - properties: - name: - description: Name is unique within a namespace to reference - a secret resource. - type: string - namespace: - description: Namespace defines the space within which - the secret name must be unique. - type: string - type: object - required: - - serverTLSSecretRef - - clientTLSSecretRef - type: object - version: - description: Version defines the etcd-backup-restore container - image version - pattern: ^(v)?(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - type: string - required: - - version - type: object - etcd: - properties: - authSecretRef: - properties: - name: - description: Name is unique within a namespace to reference - a secret resource. - type: string - namespace: - description: Namespace defines the space within which the - secret name must be unique. - type: string - type: object - clientPort: - type: integer - defragmentationSchedule: - description: DefragmentationSchedule defines the cron standard - schedule for defragmentation of etcd. - type: string - metrics: - description: Metrics defines the level of detail for exported - metrics of etcd, specify 'extensive' to include histogram metrics. - enum: - - basic - - extensive - type: string - quota: - description: Quota defines the etcd DB quota. - type: string - resources: - description: 'Resources defines the compute Resources required - by etcd container. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - type: string - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - serverPort: - type: integer - tls: - properties: - clientTLSSecretRef: - properties: - name: - description: Name is unique within a namespace to reference - a secret resource. - type: string - namespace: - description: Namespace defines the space within which - the secret name must be unique. - type: string - type: object - serverTLSSecretRef: - properties: - name: - description: Name is unique within a namespace to reference - a secret resource. - type: string - namespace: - description: Namespace defines the space within which - the secret name must be unique. - type: string - type: object - required: - - serverTLSSecretRef - - clientTLSSecretRef - type: object - version: - description: Version defines the etcd container image version - pattern: ^(v)?(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - type: string - required: - - version - type: object - labels: - additionalProperties: - type: string - type: object - replicas: - type: integer - storageCapacity: - description: StorageCapacity defines the size of persistent volume. - type: string - storageClass: - description: 'StorageClass defines the name of the StorageClass required - by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - required: - - etcd - - backup - - replicas - type: object - status: - properties: - conditions: - items: - properties: - lastTransitionTime: - description: Last time the condition transitioned from one status - to another. - format: date-time - type: string - lastUpdateTime: - description: Last time the condition was updated. - format: date-time - type: string - message: - description: A human readable message indicating details about - the transition. - type: string - reason: - description: The reason for the condition's last transition. - type: string - status: - description: Status of the condition, one of True, False, Unknown. - type: string - type: - description: Type of the Shoot condition. - type: string - type: object - type: array - currentReplicas: - format: int32 - type: integer - endpoints: - items: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' - properties: - annotations: - additionalProperties: - type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not - queryable and should be preserved when modifying objects. - More info: http://kubernetes.io/docs/user-guide/annotations' - type: object - clusterName: - description: The name of the cluster which the object belongs - to. This is used to distinguish resources with same name - and namespace in different clusters. This field is not - set anywhere right now and apiserver is going to ignore - it if set in create or update request. - type: string - creationTimestamp: - description: "CreationTimestamp is a timestamp representing - the server time when this object was created. It is not - guaranteed to be set in happens-before order across separate - operations. Clients may not set this value. It is represented - in RFC3339 form and is in UTC. \n Populated by the system. - Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" - format: date-time - type: string - deletionGracePeriodSeconds: - description: Number of seconds allowed for this object to - gracefully terminate before it will be removed from the - system. Only set when deletionTimestamp is also set. May - only be shortened. Read-only. - format: int64 - type: integer - deletionTimestamp: - description: "DeletionTimestamp is RFC 3339 date and time - at which this resource will be deleted. This field is - set by the server when a graceful deletion is requested - by the user, and is not directly settable by a client. - The resource is expected to be deleted (no longer visible - from resource lists, and not reachable by name) after - the time in this field, once the finalizers list is empty. - As long as the finalizers list contains items, deletion - is blocked. Once the deletionTimestamp is set, this value - may not be unset or be set further into the future, although - it may be shortened or the resource may be deleted prior - to this time. For example, a user may request that a pod - is deleted in 30 seconds. The Kubelet will react by sending - a graceful termination signal to the containers in the - pod. After that 30 seconds, the Kubelet will send a hard - termination signal (SIGKILL) to the container and after - cleanup, remove the pod from the API. In the presence - of network partitions, this object may still exist after - this timestamp, until an administrator or automated process - can determine the resource is fully terminated. If not - set, graceful deletion of the object has not been requested. - \n Populated by the system when a graceful deletion is - requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" - format: date-time - type: string - finalizers: - description: Must be empty before the object is deleted - from the registry. Each entry is an identifier for the - responsible component that will remove the entry from - the list. If the deletionTimestamp of the object is non-nil, - entries in this list can only be removed. - items: - type: string - type: array - generateName: - description: "GenerateName is an optional prefix, used by - the server, to generate a unique name ONLY IF the Name - field has not been provided. If this field is used, the - name returned to the client will be different than the - name passed. This value will also be combined with a unique - suffix. The provided value has the same validation rules - as the Name field, and may be truncated by the length - of the suffix required to make the value unique on the - server. \n If this field is specified and the generated - name exists, the server will NOT return a 409 - instead, - it will either return 201 Created or 500 with Reason ServerTimeout - indicating a unique name could not be found in the time - allotted, and the client should retry (optionally after - the time indicated in the Retry-After header). \n Applied - only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" - type: string - generation: - description: A sequence number representing a specific generation - of the desired state. Populated by the system. Read-only. - format: int64 - type: integer - initializers: - description: "An initializer is a controller which enforces - some system invariant at object creation time. This field - is a list of initializers that have not yet acted on this - object. If nil or empty, this object has been completely - initialized. Otherwise, the object is considered uninitialized - and is hidden (in list/watch and get calls) from clients - that haven't explicitly asked to observe uninitialized - objects. \n When an object is created, the system will - populate this list with the current set of initializers. - Only privileged users may set or modify this list. Once - it is empty, it may not be modified further by any user. - \n DEPRECATED - initializers are an alpha field and will - be removed in v1.15." - properties: - pending: - description: Pending is a list of initializers that - must execute in order before this object is visible. - When the last pending initializer is removed, and - no failing result is set, the initializers struct - will be set to nil and the object is considered as - initialized and visible to all clients. - items: - properties: - name: - description: name of the process that is responsible - for initializing this object. - type: string - required: - - name - type: object - type: array - result: - description: If result is set with the Failure field, - the object will be persisted to storage and then deleted, - ensuring that other clients can observe the deletion. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema - of this representation of an object. Servers should - convert recognized schemas to the latest internal - value, and may reject unrecognized values. More - info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - code: - description: Suggested HTTP return code for this - status, 0 if not set. - format: int32 - type: integer - details: - description: Extended data associated with the reason. Each - reason may define its own extended details. This - field is optional and the data returned is not - guaranteed to conform to any schema except that - defined by the reason type. - properties: - causes: - description: The Causes array includes more - details associated with the StatusReason failure. - Not all StatusReasons may provide detailed - causes. - items: - properties: - field: - description: "The field of the resource - that has caused this error, as named - by its JSON serialization. May include - dot and postfix notation for nested - attributes. Arrays are zero-indexed. - \ Fields may appear more than once in - an array of causes due to fields having - multiple errors. Optional. \n Examples: - \ \"name\" - the field \"name\" on - the current resource \"items[0].name\" - - the field \"name\" on the first array - entry in \"items\"" - type: string - message: - description: A human-readable description - of the cause of the error. This field - may be presented as-is to a reader. - type: string - reason: - description: A machine-readable description - of the cause of the error. If this value - is empty there is no information available. - type: string - type: object - type: array - group: - description: The group attribute of the resource - associated with the status StatusReason. - type: string - kind: - description: 'The kind attribute of the resource - associated with the status StatusReason. On - some operations may differ from the requested - resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - name: - description: The name attribute of the resource - associated with the status StatusReason (when - there is a single name which can be described). - type: string - retryAfterSeconds: - description: If specified, the time in seconds - before the operation should be retried. Some - errors may indicate the client must take an - alternate action - for those errors this field - may indicate how long to wait before taking - the alternate action. - format: int32 - type: integer - uid: - description: 'UID of the resource. (when there - is a single resource which can be described). - More info: http://kubernetes.io/docs/user-guide/identifiers#uids' - type: string - type: object - kind: - description: 'Kind is a string value representing - the REST resource this object represents. Servers - may infer this from the endpoint the client submits - requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - message: - description: A human-readable description of the - status of this operation. - type: string - metadata: - description: 'Standard list metadata. More info: - https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - properties: - continue: - description: continue may be set if the user - set a limit on the number of items returned, - and indicates that the server has more data - available. The value is opaque and may be - used to issue another request to the endpoint - that served this list to retrieve the next - set of available objects. Continuing a consistent - list may not be possible if the server configuration - has changed or more than a few minutes have - passed. The resourceVersion field returned - when using this continue value will be identical - to the value in the first response, unless - you have received this token from an error - message. - type: string - resourceVersion: - description: 'String that identifies the server''s - internal version of this object that can be - used by clients to determine when objects - have changed. Value must be treated as opaque - by clients and passed unmodified back to the - server. Populated by the system. Read-only. - More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' - type: string - selfLink: - description: selfLink is a URL representing - this object. Populated by the system. Read-only. - type: string - type: object - reason: - description: A machine-readable description of why - this operation is in the "Failure" status. If - this value is empty there is no information available. - A Reason clarifies an HTTP status code but does - not override it. - type: string - status: - description: 'Status of the operation. One of: "Success" - or "Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' - type: string - type: object - required: - - pending - type: object - labels: - additionalProperties: - type: string - description: 'Map of string keys and values that can be - used to organize and categorize (scope and select) objects. - May match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' - type: object - managedFields: - description: "ManagedFields maps workflow-id and version - to the set of fields that are managed by that workflow. - This is mostly for internal housekeeping, and users typically - shouldn't need to set or understand this field. A workflow - can be the user's name, a controller's name, or the name - of a specific apply path like \"ci-cd\". The set of fields - is always in the version that the workflow used when modifying - the object. \n This field is alpha and can be changed - or removed without notice." - items: - properties: - apiVersion: - description: APIVersion defines the version of this - resource that this field set applies to. The format - is "group/version" just like the top-level APIVersion - field. It is necessary to track the version of a - field set because it cannot be automatically converted. - type: string - fields: - additionalProperties: true - description: Fields identifies a set of fields. - type: object - manager: - description: Manager is an identifier of the workflow - managing these fields. - type: string - operation: - description: Operation is the type of operation which - lead to this ManagedFieldsEntry being created. The - only valid values for this field are 'Apply' and - 'Update'. - type: string - time: - description: Time is timestamp of when these fields - were set. It should always be empty if Operation - is 'Apply' - format: date-time - type: string - type: object - type: array - name: - description: 'Name must be unique within a namespace. Is - required when creating resources, although some resources - may allow a client to request the generation of an appropriate - name automatically. Name is primarily intended for creation - idempotence and configuration definition. Cannot be updated. - More info: http://kubernetes.io/docs/user-guide/identifiers#names' - type: string - namespace: - description: "Namespace defines the space within each name - must be unique. An empty namespace is equivalent to the - \"default\" namespace, but \"default\" is the canonical - representation. Not all objects are required to be scoped - to a namespace - the value of this field for those objects - will be empty. \n Must be a DNS_LABEL. Cannot be updated. - More info: http://kubernetes.io/docs/user-guide/namespaces" - type: string - ownerReferences: - description: List of objects depended by this object. If - ALL objects in the list have been deleted, this object - will be garbage collected. If this object is managed by - a controller, then an entry in this list will point to - this controller, with the controller field set to true. - There cannot be more than one managing controller. - items: - properties: - apiVersion: - description: API version of the referent. - type: string - blockOwnerDeletion: - description: If true, AND if the owner has the "foregroundDeletion" - finalizer, then the owner cannot be deleted from - the key-value store until this reference is removed. - Defaults to false. To set this field, a user needs - "delete" permission of the owner, otherwise 422 - (Unprocessable Entity) will be returned. - type: boolean - controller: - description: If true, this reference points to the - managing controller. - type: boolean - kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - name: - description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' - type: string - uid: - description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' - type: string - required: - - apiVersion - - kind - - name - - uid - type: object - type: array - resourceVersion: - description: "An opaque value that represents the internal - version of this object that can be used by clients to - determine when objects have changed. May be used for optimistic - concurrency, change detection, and the watch operation - on a resource or set of resources. Clients must treat - these values as opaque and passed unmodified back to the - server. They may only be valid for a particular resource - or set of resources. \n Populated by the system. Read-only. - Value must be treated as opaque by clients and . More - info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency" - type: string - selfLink: - description: SelfLink is a URL representing this object. - Populated by the system. Read-only. - type: string - uid: - description: "UID is the unique in time and space value - for this object. It is typically generated by the server - on successful creation of a resource and is not allowed - to change on PUT operations. \n Populated by the system. - Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" - type: string - type: object - subsets: - description: The set of all endpoints is the union of all subsets. - Addresses are placed into subsets according to the IPs they - share. A single address with multiple ports, some of which - are ready and some of which are not (because they come from - different containers) will result in the address being displayed - in different subsets for the different ports. No address will - appear in both Addresses and NotReadyAddresses in the same - subset. Sets of addresses and ports that comprise a service. - items: - properties: - addresses: - description: IP addresses which offer the related ports - that are marked as ready. These endpoints should be - considered safe for load balancers and clients to utilize. - items: - properties: - hostname: - description: The Hostname of this endpoint - type: string - ip: - description: 'The IP of this endpoint. May not be - loopback (127.0.0.0/8), link-local (169.254.0.0/16), - or link-local multicast ((224.0.0.0/24). IPv6 - is also accepted but not fully supported on all - platforms. Also, certain kubernetes components, - like kube-proxy, are not IPv6 ready. TODO: This - should allow hostname or IP, See #4447.' - type: string - nodeName: - description: 'Optional: Node hosting this endpoint. - This can be used to determine endpoints local - to a node.' - type: string - targetRef: - description: Reference to object providing the endpoint. - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: 'If referring to a piece of an - object instead of an entire object, this string - should contain a valid JSON/Go field access - statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to - a container within a pod, this would take - on a value like: "spec.containers{name}" (where - "name" refers to the name of the container - that triggered the event) or if no container - name is specified "spec.containers[2]" (container - with index 2 in this pod). This syntax is - chosen only to have some well-defined way - of referencing a part of an object. TODO: - this design is not final and this field is - subject to change in the future.' - type: string - kind: - description: 'Kind of the referent. More info: - https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - namespace: - description: 'Namespace of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' - type: string - resourceVersion: - description: 'Specific resourceVersion to which - this reference is made, if any. More info: - https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' - type: string - uid: - description: 'UID of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' - type: string - type: object - required: - - ip - type: object - type: array - notReadyAddresses: - description: IP addresses which offer the related ports - but are not currently marked as ready because they have - not yet finished starting, have recently failed a readiness - check, or have recently failed a liveness check. - items: - properties: - hostname: - description: The Hostname of this endpoint - type: string - ip: - description: 'The IP of this endpoint. May not be - loopback (127.0.0.0/8), link-local (169.254.0.0/16), - or link-local multicast ((224.0.0.0/24). IPv6 - is also accepted but not fully supported on all - platforms. Also, certain kubernetes components, - like kube-proxy, are not IPv6 ready. TODO: This - should allow hostname or IP, See #4447.' - type: string - nodeName: - description: 'Optional: Node hosting this endpoint. - This can be used to determine endpoints local - to a node.' - type: string - targetRef: - description: Reference to object providing the endpoint. - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: 'If referring to a piece of an - object instead of an entire object, this string - should contain a valid JSON/Go field access - statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to - a container within a pod, this would take - on a value like: "spec.containers{name}" (where - "name" refers to the name of the container - that triggered the event) or if no container - name is specified "spec.containers[2]" (container - with index 2 in this pod). This syntax is - chosen only to have some well-defined way - of referencing a part of an object. TODO: - this design is not final and this field is - subject to change in the future.' - type: string - kind: - description: 'Kind of the referent. More info: - https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - namespace: - description: 'Namespace of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' - type: string - resourceVersion: - description: 'Specific resourceVersion to which - this reference is made, if any. More info: - https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' - type: string - uid: - description: 'UID of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' - type: string - type: object - required: - - ip - type: object - type: array - ports: - description: Port numbers available on the related IP - addresses. - items: - properties: - name: - description: The name of this port (corresponds - to ServicePort.Name). Must be a DNS_LABEL. Optional - only if one port is defined. - type: string - port: - description: The port number of the endpoint. - format: int32 - type: integer - protocol: - description: The IP protocol for this port. Must - be UDP, TCP, or SCTP. Default is TCP. - type: string - required: - - port - type: object - type: array - type: object - type: array - type: object - type: array - etcd: - properties: - apiVersion: - description: API version of the referent - type: string - kind: - description: Kind of the referent - type: string - name: - description: Name of the referent - type: string - type: object - labelSelector: - description: selector is a label query over pods that should match - the replica count. It must match the pod template's labels. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - lastError: - type: string - ready: - type: boolean - readyReplicas: - format: int32 - type: integer - replicas: - format: int32 - type: integer - serviceName: - type: string - updatedReplicas: - format: int32 - type: integer - type: object - type: object - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/default/manager_image_patch.yaml b/config/default/manager_image_patch.yaml index 925da7ba5..a83263022 100644 --- a/config/default/manager_image_patch.yaml +++ b/config/default/manager_image_patch.yaml @@ -8,5 +8,5 @@ spec: spec: containers: # Change the value of image field below to your controller image URL - - image: eu.gcr.io/gardener-project/gardener/druid:0.1.0-dev + - image: eu.gcr.io/gardener-project/gardener/etcd-druid:0.1.0-dev name: druid diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 590002165..7f249764a 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -11,18 +11,18 @@ rules: resources: - etcds verbs: + - create + - delete - get - list - - watch - - create - - update - patch - - delete + - update + - watch - apiGroups: - druid.gardener.cloud resources: - etcds/status verbs: - get - - update - patch + - update diff --git a/config/samples/druid_v1alpha1_etcd.yaml b/config/samples/druid_v1alpha1_etcd.yaml index b55242c9e..85c9d1077 100644 --- a/config/samples/druid_v1alpha1_etcd.yaml +++ b/config/samples/druid_v1alpha1_etcd.yaml @@ -8,6 +8,10 @@ metadata: garden.sapcloud.io/role: controlplane role: test spec: + selector: + app: etcd-statefulset + garden.sapcloud.io/role: controlplane + role: test annotations: app: etcd-statefulset garden.sapcloud.io/role: controlplane @@ -24,7 +28,7 @@ spec: role: test etcd: metrics: basic - version: v3.3.13 + image: quay.io/coreos/etcd:v3.3.13 defragmentationSchedule: '0 */24 * * *' resources: limits: {cpu: 2500m, memory: 4Gi} @@ -32,7 +36,7 @@ spec: clientPort: 2379 serverPort: 2380 backup: - version: 0.8.0-dev-dd26b65a2ab4efd77805584b33e8c559dd3c6d53 + image: eu.gcr.io/gardener-project/gardener/etcdbrctl:0.8.0-dev-dd26b65a2ab4efd77805584b33e8c559dd3c6d53 port: 8080 fullSnapshotSchedule: '0 */24 * * *' resources: diff --git a/controllers/controller_ref_manager.go b/controllers/controller_ref_manager.go index 686522583..b8d51fe67 100644 --- a/controllers/controller_ref_manager.go +++ b/controllers/controller_ref_manager.go @@ -27,7 +27,6 @@ import ( "fmt" "sync" - //"github.com/gardener/hvpa-controller/api/v1alpha1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" diff --git a/controllers/etcd_controller.go b/controllers/etcd_controller.go index 3f3fd570c..808270107 100644 --- a/controllers/etcd_controller.go +++ b/controllers/etcd_controller.go @@ -39,7 +39,6 @@ import ( "github.com/gardener/etcd-druid/pkg/utils" kubernetes "github.com/gardener/etcd-druid/pkg/client/kubernetes" - "github.com/gardener/gardener/pkg/utils/imagevector" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -238,9 +237,7 @@ func (r *EtcdReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { func (r *EtcdReconciler) reconcileServices(etcd *druidv1alpha1.Etcd) (*corev1.Service, error) { logger.Infof("Reconciling etcd services for etcd:%s in namespace:%s", etcd.Name, etcd.Namespace) - selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ - MatchLabels: etcd.Spec.Labels, - }) + selector, err := metav1.LabelSelectorAsSelector(etcd.Spec.Selector) if err != nil { logger.Error(err, "Error converting etcd selector to selector") return nil, err @@ -249,17 +246,21 @@ func (r *EtcdReconciler) reconcileServices(etcd *druidv1alpha1.Etcd) (*corev1.Se // list all services to include the services that don't match the etcd`s selector // anymore but has the stale controller ref. services := &corev1.ServiceList{} - err = r.List(context.TODO(), services, client.InNamespace(etcd.Namespace)) + err = r.List(context.TODO(), services, client.InNamespace(etcd.Namespace), client.MatchingLabelsSelector{Selector: selector}) if err != nil { logger.Error(err, "Error listing statefulsets") return nil, err } + for _, s := range services.Items { + logger.Infof("Services: %s\n", s.Name) + } // NOTE: filteredStatefulSets are pointing to deepcopies of the cache, but this could change in the future. // Ref: https://github.com/kubernetes-sigs/controller-runtime/blob/release-0.2/pkg/cache/internal/cache_reader.go#L74 // if you need to modify them, you need to copy it first. filteredServices, err := r.claimServices(etcd, selector, services) if err != nil { + logger.Error(err, "Error claiming service") return nil, err } @@ -281,6 +282,7 @@ func (r *EtcdReconciler) reconcileServices(etcd *druidv1alpha1.Etcd) (*corev1.Se if err != nil { return nil, err } + // Statefulset is claimed by for this etcd. Just sync the specs if service, err = r.syncServiceSpec(service, etcd); err != nil { return nil, err @@ -330,6 +332,7 @@ func (r *EtcdReconciler) syncServiceSpec(ss *corev1.Service, etcd *druidv1alpha1 decoded.Spec.DeepCopyInto(&ssCopy.Spec) // Copy ClusterIP as the field is immutable ssCopy.Spec.ClusterIP = ss.Spec.ClusterIP + err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { return r.Patch(context.TODO(), ssCopy, client.MergeFrom(ss)) }) @@ -353,7 +356,7 @@ func (r *EtcdReconciler) getServiceFromEtcd(etcd *druidv1alpha1.Etcd) (*corev1.S if _, ok := r.RenderedChart.Files()[servicePath]; !ok { return nil, fmt.Errorf("missing service template file in the charts: %v", servicePath) } - //logger.Infof("%v: %v", statefulsetPath, renderer.Files()[statefulsetPath]) + decoder := yaml.NewYAMLOrJSONDecoder(bytes.NewReader([]byte(r.RenderedChart.Files()[servicePath])), 1024) if err = decoder.Decode(&decoded); err != nil { @@ -365,9 +368,7 @@ func (r *EtcdReconciler) getServiceFromEtcd(etcd *druidv1alpha1.Etcd) (*corev1.S func (r *EtcdReconciler) reconcileConfigMaps(etcd *druidv1alpha1.Etcd) (*corev1.ConfigMap, error) { logger.Infof("Reconciling etcd configmap for etcd:%s in namespace:%s", etcd.Name, etcd.Namespace) - selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ - MatchLabels: etcd.Spec.Labels, - }) + selector, err := metav1.LabelSelectorAsSelector(etcd.Spec.Selector) if err != nil { logger.Error(err, "Error converting etcd selector to selector") return nil, err @@ -376,12 +377,14 @@ func (r *EtcdReconciler) reconcileConfigMaps(etcd *druidv1alpha1.Etcd) (*corev1. // list all configmaps to include the configmaps that don't match the etcd`s selector // anymore but has the stale controller ref. cms := &corev1.ConfigMapList{} - err = r.List(context.TODO(), cms, client.InNamespace(etcd.Namespace)) + err = r.List(context.TODO(), cms, client.InNamespace(etcd.Namespace), client.MatchingLabelsSelector{Selector: selector}) if err != nil { logger.Error(err, "Error listing statefulsets") return nil, err } + logger.Infof("Configmaps: %d\n", len(cms.Items)) + // NOTE: filteredStatefulSets are pointing to deepcopies of the cache, but this could change in the future. // Ref: https://github.com/kubernetes-sigs/controller-runtime/blob/release-0.2/pkg/cache/internal/cache_reader.go#L74 // if you need to modify them, you need to copy it first. @@ -455,6 +458,7 @@ func (r *EtcdReconciler) syncConfigMapData(cm *corev1.ConfigMap, etcd *druidv1al } cmCopy := cm.DeepCopy() cmCopy.Data = decoded.Data + err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { return r.Patch(context.TODO(), cmCopy, client.MergeFrom(cm)) }) @@ -490,9 +494,7 @@ func (r *EtcdReconciler) getConfigMapFromEtcd(etcd *druidv1alpha1.Etcd) (*corev1 func (r *EtcdReconciler) reconcileStatefulSet(cm *corev1.ConfigMap, svc *corev1.Service, etcd *druidv1alpha1.Etcd) (*appsv1.StatefulSet, error) { logger.Infof("Reconciling etcd statefulset for etcd:%s in namespace:%s", etcd.Name, etcd.Namespace) - selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ - MatchLabels: etcd.Spec.Labels, - }) + selector, err := metav1.LabelSelectorAsSelector(etcd.Spec.Selector) if err != nil { logger.Error(err, "Error converting etcd selector to selector") return nil, err @@ -501,7 +503,7 @@ func (r *EtcdReconciler) reconcileStatefulSet(cm *corev1.ConfigMap, svc *corev1. // list all statefulsets to include the statefulsets that don't match the etcd`s selector // anymore but has the stale controller ref. statefulSets := &appsv1.StatefulSetList{} - err = r.List(context.TODO(), statefulSets, client.InNamespace(etcd.Namespace)) + err = r.List(context.TODO(), statefulSets, client.InNamespace(etcd.Namespace), client.MatchingLabelsSelector{Selector: selector}) if err != nil { logger.Error(err, "Error listing statefulsets") return nil, err @@ -577,6 +579,7 @@ func (r *EtcdReconciler) syncStatefulSetSpec(ss *appsv1.StatefulSet, cm *corev1. ssCopy.Spec.Replicas = decoded.Spec.Replicas ssCopy.Spec.UpdateStrategy = decoded.Spec.UpdateStrategy ssCopy.Spec.Template = decoded.Spec.Template + err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { return r.Patch(context.TODO(), ssCopy, client.MergeFrom(ss)) }) @@ -618,7 +621,9 @@ func (r *EtcdReconciler) reconcileEtcd(etcd *druidv1alpha1.Etcd) (*corev1.Servic } chartPath := getChartPath() renderer, err := r.ChartApplier.Render(chartPath, etcd.Name, etcd.Namespace, values) - + if err != nil { + return nil, nil, err + } r.RenderedChart = renderer svc, err := r.reconcileServices(etcd) @@ -666,22 +671,12 @@ func (r *EtcdReconciler) getMapFromEtcd(etcd *druidv1alpha1.Etcd) (map[string]in if etcd.Spec.Replicas != 0 { statefulsetReplicas = 1 } - imageYAMLPath := r.getImageYAMLPath() - imageVector, err := imagevector.ReadGlobalImageVectorWithEnvOverride(imageYAMLPath) - if err != nil { - return nil, err - } - images, err := imagevector.FindImages(imageVector, []string{"etcd", "backup-restore"}) - if err != nil { - return nil, err - } etcdValues := map[string]interface{}{ "defragmentationSchedule": etcd.Spec.Etcd.DefragmentationSchedule, "serverPort": etcd.Spec.Etcd.ServerPort, "clientPort": etcd.Spec.Etcd.ClientPort, - "imageRepository": images["etcd"].Repository, - "imageVersion": etcd.Spec.Etcd.Version, + "image": etcd.Spec.Etcd.Image, "metrics": etcd.Spec.Etcd.Metrics, "resources": etcd.Spec.Etcd.Resources, "enableTLS": (etcd.Spec.Etcd.TLS != nil), @@ -695,8 +690,7 @@ func (r *EtcdReconciler) getMapFromEtcd(etcd *druidv1alpha1.Etcd) (map[string]in quota = etcd.Spec.Etcd.Quota.Value() } backupValues := map[string]interface{}{ - "imageVersion": etcd.Spec.Backup.Version, - "imageRepository": images["backup-restore"].Repository, + "image": etcd.Spec.Backup.Image, "fullSnapshotSchedule": etcd.Spec.Backup.FullSnapshotSchedule, "port": etcd.Spec.Backup.Port, "resources": etcd.Spec.Backup.Resources, @@ -718,24 +712,27 @@ func (r *EtcdReconciler) getMapFromEtcd(etcd *druidv1alpha1.Etcd) (map[string]in } values := map[string]interface{}{ - "etcd": etcdValues, - "backup": backupValues, - "store": storeValues, - "name": etcd.Name, - "replicas": etcd.Spec.Replicas, - "labels": etcd.Spec.Labels, - "annotations": etcd.Spec.Annotations, - "storageClass": etcd.Spec.StorageClass, - "storageCapacity": etcd.Spec.StorageCapacity, - "uid": etcd.UID, - "statefulsetReplicas": statefulsetReplicas, - "serviceName": fmt.Sprintf("etcd-client-%s", string(etcd.UID[:6])), - "configMapName": fmt.Sprintf("etcd-bootstrap-%s", string(etcd.UID[:6])), + "etcd": etcdValues, + "backup": backupValues, + "store": storeValues, + "name": etcd.Name, + "replicas": etcd.Spec.Replicas, + "labels": etcd.Spec.Labels, + "annotations": etcd.Spec.Annotations, + "storageClass": etcd.Spec.StorageClass, + "storageCapacity": etcd.Spec.StorageCapacity, + "uid": etcd.UID, + "statefulsetReplicas": statefulsetReplicas, + "serviceName": fmt.Sprintf("%s-client", etcd.Name), + "configMapName": fmt.Sprintf("etcd-bootstrap-%s", string(etcd.UID[:6])), + "volumeClaimTemplateName": etcd.Spec.VolumeClaimTemplate, + "selector": etcd.Spec.Selector, } if etcd.Spec.Etcd.TLS != nil { values["tlsServerSecret"] = etcd.Spec.Etcd.TLS.ServerTLSSecretRef.Name values["tlsClientSecret"] = etcd.Spec.Etcd.TLS.ClientTLSSecretRef.Name + values["tlsCASecret"] = etcd.Spec.Etcd.TLS.TLSCASecretRef.Name } return values, nil @@ -792,52 +789,63 @@ func (r *EtcdReconciler) addFinalizersToDependantSecrets(etcd *druidv1alpha1.Etc } func (r *EtcdReconciler) removeFinalizersToDependantSecrets(etcd *druidv1alpha1.Etcd) error { + var err error storeSecret := corev1.Secret{} - if err := r.Client.Get(context.TODO(), types.NamespacedName{ + err = r.Client.Get(context.TODO(), types.NamespacedName{ Name: etcd.Spec.Backup.Store.SecretRef.Name, Namespace: etcd.Namespace, - }, &storeSecret); err != nil { + }, &storeSecret) + if err != nil && !errors.IsNotFound(err) { return err } - if finalizers := sets.NewString(storeSecret.Finalizers...); finalizers.Has(FinalizerName) { - logger.Infof("Removing finalizer (%s) from secret %s", FinalizerName, storeSecret.GetName()) - storeSecretCopy := storeSecret.DeepCopy() - finalizers.Delete(FinalizerName) - storeSecretCopy.Finalizers = finalizers.UnsortedList() - if err := r.Update(context.TODO(), storeSecretCopy); err != nil { - return err + if err == nil { + if finalizers := sets.NewString(storeSecret.Finalizers...); finalizers.Has(FinalizerName) { + logger.Infof("Removing finalizer (%s) from secret %s", FinalizerName, storeSecret.GetName()) + storeSecretCopy := storeSecret.DeepCopy() + finalizers.Delete(FinalizerName) + storeSecretCopy.Finalizers = finalizers.UnsortedList() + if err := r.Update(context.TODO(), storeSecretCopy); err != nil { + return err + } } } if etcd.Spec.Etcd.TLS != nil { clientSecret := corev1.Secret{} - if err := r.Client.Get(context.TODO(), types.NamespacedName{ + err = r.Client.Get(context.TODO(), types.NamespacedName{ Name: etcd.Spec.Etcd.TLS.ClientTLSSecretRef.Name, Namespace: etcd.Namespace, - }, &clientSecret); err != nil { + }, &clientSecret) + if err != nil && !errors.IsNotFound(err) { return err } - if finalizers := sets.NewString(clientSecret.Finalizers...); finalizers.Has(FinalizerName) { - logger.Infof("Removing finalizer (%s) from secret %s", FinalizerName, clientSecret.GetName()) - clientSecretCopy := clientSecret.DeepCopy() - finalizers.Delete(FinalizerName) - clientSecretCopy.Finalizers = finalizers.UnsortedList() - if err := r.Update(context.TODO(), clientSecretCopy); err != nil { - return err + if err == nil { + if finalizers := sets.NewString(clientSecret.Finalizers...); finalizers.Has(FinalizerName) { + logger.Infof("Removing finalizer (%s) from secret %s", FinalizerName, clientSecret.GetName()) + clientSecretCopy := clientSecret.DeepCopy() + finalizers.Delete(FinalizerName) + clientSecretCopy.Finalizers = finalizers.UnsortedList() + if err := r.Update(context.TODO(), clientSecretCopy); err != nil { + return err + } } } - serverSecret := corev1.Secret{} - r.Client.Get(context.TODO(), types.NamespacedName{ + err = r.Client.Get(context.TODO(), types.NamespacedName{ Name: etcd.Spec.Etcd.TLS.ServerTLSSecretRef.Name, Namespace: etcd.Namespace, }, &serverSecret) - if finalizers := sets.NewString(serverSecret.Finalizers...); finalizers.Has(FinalizerName) { - logger.Infof("Removing finalizer (%s) from secret %s", FinalizerName, serverSecret.GetName()) - serverSecretCopy := serverSecret.DeepCopy() - finalizers.Delete(FinalizerName) - serverSecretCopy.Finalizers = finalizers.UnsortedList() - if err := r.Update(context.TODO(), serverSecretCopy); err != nil { - return err + if err != nil && !errors.IsNotFound(err) { + return err + } + if err == nil { + if finalizers := sets.NewString(serverSecret.Finalizers...); finalizers.Has(FinalizerName) { + logger.Infof("Removing finalizer (%s) from secret %s", FinalizerName, serverSecret.GetName()) + serverSecretCopy := serverSecret.DeepCopy() + finalizers.Delete(FinalizerName) + serverSecretCopy.Finalizers = finalizers.UnsortedList() + if err := r.Update(context.TODO(), serverSecretCopy); err != nil { + return err + } } } } diff --git a/controllers/etcd_controller_test.go b/controllers/etcd_controller_test.go index 30312ae48..b7b22c002 100644 --- a/controllers/etcd_controller_test.go +++ b/controllers/etcd_controller_test.go @@ -57,7 +57,8 @@ var _ = Describe("Druid", func() { storeSecret := instance.Spec.Backup.Store.SecretRef.Name tlsClientSecret := instance.Spec.Etcd.TLS.ClientTLSSecretRef.Name tlsServerName := instance.Spec.Etcd.TLS.ServerTLSSecretRef.Name - errors := createSecrets(c, instance.Namespace, storeSecret, tlsClientSecret, tlsServerName) + tlsCAName := instance.Spec.Etcd.TLS.TLSCASecretRef.Name + errors := createSecrets(c, instance.Namespace, storeSecret, tlsCAName, tlsClientSecret, tlsServerName) Expect(len(errors)).Should(BeZero()) }) It("should create statefulset", func() { @@ -88,7 +89,7 @@ var _ = Describe("Druid", func() { Expect(err).NotTo(HaveOccurred()) c = mgr.GetClient() - ss = createStatefulset("foo3", "default", instance.Labels) + ss = createStatefulset("foo3", "default", instance.Spec.Labels) storeSecret := instance.Spec.Backup.Store.SecretRef.Name tlsClientSecret := instance.Spec.Etcd.TLS.ClientTLSSecretRef.Name tlsServerName := instance.Spec.Etcd.TLS.ServerTLSSecretRef.Name @@ -120,7 +121,7 @@ func getStatefulset(c client.Client, instance *druidv1alpha1.Etcd, ss *appsv1.St defer cancel() wait.PollImmediateUntil(1*time.Second, func() (bool, error) { req := types.NamespacedName{ - Name: fmt.Sprintf("etcd-sts-%s", string(instance.UID[:6])), + Name: fmt.Sprintf("%s", instance.Name), Namespace: instance.Namespace, } if err := c.Get(ctx, req, ss); err != nil { @@ -178,12 +179,13 @@ func getEtcd(name, namespace string) *druidv1alpha1.Etcd { tlsConfig := &druidv1alpha1.TLSConfig{ ClientTLSSecretRef: corev1.SecretReference{ - Name: "etcd-client-tls", - Namespace: namespace, + Name: "etcd-client-tls", }, ServerTLSSecretRef: corev1.SecretReference{ - Name: "etcd-server-tls", - Namespace: namespace, + Name: "etcd-server-tls", + }, + TLSCASecretRef: corev1.SecretReference{ + Name: "ca-etcd", }, } @@ -202,12 +204,18 @@ func getEtcd(name, namespace string) *druidv1alpha1.Etcd { "role": "test", "name": name, }, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "etcd-statefulset", + "name": name, + }, + }, Replicas: 1, StorageClass: &storageClass, StorageCapacity: &storageCapacity, Backup: druidv1alpha1.BackupSpec{ - Version: "0.8.0-dev", + Image: "eu.gcr.io/gardener-project/gardener/etcdbrctl:0.8.0-dev", Port: &port, FullSnapshotSchedule: &snapshotSchedule, GarbageCollectionPolicy: &garbageCollectionPolicy, @@ -237,7 +245,7 @@ func getEtcd(name, namespace string) *druidv1alpha1.Etcd { Etcd: druidv1alpha1.EtcdConfig{ Quota: "a, Metrics: druidv1alpha1.Basic, - Version: "v3.3.13", + Image: "quay.io/coreos/etcd:v3.3.13", DefragmentationSchedule: &defragSchedule, Resources: &corev1.ResourceRequirements{ Limits: corev1.ResourceList{ diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 5fb802b4b..860358f63 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -85,7 +85,9 @@ var _ = BeforeSuite(func(done Done) { Expect(k8sClient).ToNot(BeNil()) Expect(cfg).ToNot(BeNil()) - mgr, err = manager.New(cfg, manager.Options{}) + mgr, err = manager.New(cfg, manager.Options{ + MetricsBindAddress: "0", + }) Expect(err).NotTo(HaveOccurred()) Expect(cfg).ToNot(BeNil()) diff --git a/go.mod b/go.mod index a066170cf..64375c681 100644 --- a/go.mod +++ b/go.mod @@ -3,48 +3,55 @@ module github.com/gardener/etcd-druid go 1.12 require ( - github.com/Masterminds/goutils v1.1.0 // indirect - github.com/cyphar/filepath-securejoin v0.2.2 // indirect - github.com/gardener/external-dns-management v0.0.0-20190722114702-f6b12f6e4b43 // indirect - github.com/gardener/gardener v0.0.0-20191001105106-32539124646f - github.com/gobuffalo/logger v1.0.1 // indirect - github.com/gobuffalo/packd v0.3.0 // indirect - github.com/gobwas/glob v0.2.3 // indirect + cloud.google.com/go v0.50.0 // indirect + github.com/Masterminds/semver v1.5.0 // indirect + github.com/gardener/gardener v0.33.6 + github.com/gogo/protobuf v1.3.1 // indirect + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 // indirect github.com/hashicorp/go-multierror v1.0.0 // indirect - github.com/karrick/godirwalk v1.10.12 // indirect - github.com/onsi/ginkgo v1.10.3 + github.com/huandu/xstrings v1.2.1 // indirect + github.com/imdario/mergo v0.3.8 // indirect + github.com/json-iterator/go v1.1.9 // indirect + github.com/mitchellh/reflectwalk v1.0.1 // indirect + github.com/onsi/ginkgo v1.8.0 github.com/onsi/gomega v1.5.0 + github.com/prometheus/client_golang v1.3.0 // indirect github.com/sirupsen/logrus v1.4.2 - github.com/ugorji/go/codec v1.1.7 // indirect - golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e // indirect - golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect - golang.org/x/net v0.0.0-20191204025024-5ee1b9f4859a - golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e // indirect - golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e // indirect - golang.org/x/tools v0.0.0-20191204193430-660eba4da30b // indirect + go.uber.org/atomic v1.5.1 // indirect + go.uber.org/multierr v1.4.0 // indirect + go.uber.org/zap v1.13.0 // indirect + golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876 // indirect + golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 + golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 // indirect + golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8 // indirect + golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect + golang.org/x/tools v0.0.0-20191230220329-2aa90c603ae3 // indirect golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 // indirect - gopkg.in/ini.v1 v1.44.0 // indirect - k8s.io/api v0.0.0-20190826194732-9f642ccb7a30 - k8s.io/apimachinery v0.0.0-20190827074644-f378a67c6af3 + google.golang.org/appengine v1.6.5 // indirect + gopkg.in/yaml.v2 v2.2.7 // indirect + k8s.io/api v0.17.0 + k8s.io/apiextensions-apiserver v0.17.0 // indirect + k8s.io/apimachinery v0.17.0 k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible - k8s.io/helm v2.14.3+incompatible - k8s.io/kube-openapi v0.0.0-20190722073852-5e22f3d471e6 // indirect - k8s.io/utils v0.0.0-20190712204705-3dccf664f023 // indirect - sigs.k8s.io/controller-runtime v0.2.0-beta.4 + k8s.io/helm v2.16.1+incompatible + k8s.io/klog v1.0.0 // indirect + k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a // indirect + k8s.io/utils v0.0.0-20191218082557-f07c713de883 // indirect + sigs.k8s.io/controller-runtime v0.4.0 + sigs.k8s.io/controller-tools v0.2.4 // indirect ) replace ( - k8s.io/api => k8s.io/api v0.0.0-20190313235455-40a48860b5ab //kubernetes-1.14.0 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.0.0-20190315093550-53c4693659ed // kubernetes-1.14.0 - k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1 // kubernetes-1.14.0 - k8s.io/apiserver => k8s.io/apiserver v0.0.0-20190313205120-8b27c41bdbb1 // kubernetes-1.14.0 - k8s.io/client-go => k8s.io/client-go v11.0.0+incompatible // kubernetes-1.14.0 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.0.0-20190314002537-50662da99b70 // kubernetes-1.14.0 - k8s.io/code-generator => k8s.io/code-generator v0.0.0-20190311093542-50b561225d70 // kubernetes-1.14.4 - k8s.io/component-base => k8s.io/component-base v0.0.0-20190314000054-4a91899592f4 // kubernetes-1.14.0 + github.com/gardener/gardener-extensions => github.com/gardener/gardener-extensions v0.0.0-20191028142629-438a3dcf5eca + k8s.io/api => k8s.io/api v0.0.0-20190918155943-95b840bb6a1f // kubernetes-1.16.0 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783 // kubernetes-1.16.0 + k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655 // kubernetes-1.16.0 + k8s.io/apiserver => k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad // kubernetes-1.16.0 + k8s.io/client-go => k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90 // kubernetes-1.16.0 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.0.0-20190918163108-da9fdfce26bb // kubernetes-1.16.0 + k8s.io/code-generator => k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269 // kubernetes-1.16.0 + k8s.io/component-base => k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090 // kubernetes-1.16.0 k8s.io/helm => k8s.io/helm v2.13.1+incompatible - k8s.io/klog => k8s.io/klog v0.1.0 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.0.0-20190314000639-da8327669ac5 // kubernetes-1.14.0 - k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20190320154901-5e45bb682580 - sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.2.0-beta.2 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.0.0-20190918161219-8c8f079fddc3 // kubernetes-1.16.0 + k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf ) diff --git a/go.sum b/go.sum index c585d11d6..89428b984 100644 --- a/go.sum +++ b/go.sum @@ -1,24 +1,31 @@ -cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.1 h1:2kHhTjz+eKEI7tt3Fqf5j3APCq+z9tuY2CzeCIxTo+A= -cloud.google.com/go v0.37.1/go.mod h1:SAbnLi6YTSPKSI0dTUEOVLCkyPfKXK8n4ibqiMoj4ok= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.43.0 h1:banaiRPAM8kUVYneOSkhgcDsLzEvL25FinuiSZaH/2w= cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= -contrib.go.opencensus.io/exporter/ocagent v0.4.6/go.mod h1:YuG83h+XWwqWjvCqn7vK4KSyLKhThY3+gNGQ37iS2V0= -contrib.go.opencensus.io/exporter/ocagent v0.4.10/go.mod h1:ueLzZcP7LPhPulEBukGn4aLh7Mx9YJwpVJ9nL2FYltw= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0 h1:0E3eE8MX426vUOs7aHfI7aN1BrIzzzf4ccKCSfSjGmc= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +contrib.go.opencensus.io/exporter/ocagent v0.2.0/go.mod h1:0fnkYHF+ORKj7HWzOExKkUHeFX79gXSKUQbpnAM+wzo= contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= +github.com/Azure/azure-sdk-for-go v26.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v32.6.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v11.5.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest/adal v0.2.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= -github.com/Azure/go-autorest/autorest/adal v0.4.0/go.mod h1:n1iM1u+ZMYni6IIY+FB82IT/DbSdC3T3gO1qFQ/3jEw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/azure/auth v0.3.0/go.mod h1:CI4BQYBct8NS7BXNBBX+RchsFsUu5+oz+OSyR/ZIi7U= @@ -28,11 +35,8 @@ github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+v github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= -github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= -github.com/Azure/go-autorest/tracing v0.4.0/go.mod h1:sVZ/n8H0f4naUjHNvSe2qjNiC3oV6+8CCqU9mhEvav8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -41,158 +45,212 @@ github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RP github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/sprig v0.0.0-20190301161902-9f8fceff796f/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/sprig v2.20.0+incompatible h1:dJTKKuUkYW3RMFdQFXPU/s6hg10RgctmTjRcbZ98Ap8= github.com/Masterminds/sprig v2.20.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= +github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/ahmetb/gen-crd-api-reference-docs v0.1.5/go.mod h1:P/XzJ+c2+khJKNKABcm2biRwk2QAuwbLf8DlXuaL7WM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20180828111155-cad214d7d71f/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA= github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190723075400-e63e3f9dd712/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= -github.com/aliyun/aliyun-oss-go-sdk v1.9.6/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/aliyun/aliyun-oss-go-sdk v2.0.1+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 h1:Kn3rqvbUFqSepE2OqVu0Pn1CbDw9IuMlONapol0zuwk= github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/aws/aws-sdk-go v1.12.79/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.13.54/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= github.com/aws/aws-sdk-go v1.21.10/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/census-instrumentation/opencensus-proto v0.1.0-0.20181214143942-ba49f56771b8/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/census-instrumentation/opencensus-proto v0.0.2-0.20180913191712-f303ae3f8d6a/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.1.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/docker/docker v0.7.3-0.20180612054059-a9fbbdc8dd87/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/elazarl/goproxy v0.0.0-20190703090003-6125c262ffb0/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/elazarl/goproxy/ext v0.0.0-20190703090003-6125c262ffb0/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= +github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= -github.com/emicklei/go-restful v2.9.3+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.6+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.1.0+incompatible h1:K1MDoo4AZ4wU0GIU/fPmtZg7VpzLjCxu+UwBD1FvwOc= github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/frankban/quicktest v1.5.0/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/gardener/controller-manager-library v0.0.0-20190418145731-83f4bac4b55f/go.mod h1:v6cbldxmpL2fYBEB2lSnq3LSEPwIHus9En6iIhwNE1k= -github.com/gardener/controller-manager-library v0.0.0-20190715130150-315e86b01963/go.mod h1:v6cbldxmpL2fYBEB2lSnq3LSEPwIHus9En6iIhwNE1k= github.com/gardener/controller-manager-library v0.0.0-20190830161011-2626c078acef/go.mod h1:v6cbldxmpL2fYBEB2lSnq3LSEPwIHus9En6iIhwNE1k= +github.com/gardener/controller-manager-library v0.0.0-20191022090355-2f744b5822cc/go.mod h1:v6cbldxmpL2fYBEB2lSnq3LSEPwIHus9En6iIhwNE1k= github.com/gardener/etcd-backup-restore v0.0.0-20190807103447-4c8bc2972b60/go.mod h1:w55uXFfKnGZFpcfl18L1yFtd2ZhRt1NpA7bxkAAHeKg= github.com/gardener/external-dns-management v0.0.0-20190220100540-b4bbb5832a03/go.mod h1:Y3om11E865x4aQ7cmcHjknb8RMgCO153huRb/SvP+9o= -github.com/gardener/external-dns-management v0.0.0-20190722114702-f6b12f6e4b43/go.mod h1:Y3om11E865x4aQ7cmcHjknb8RMgCO153huRb/SvP+9o= -github.com/gardener/gardener v0.0.0-20190812144748-dec8f3c7f288/go.mod h1:ioI69PbfNBUp10k2OtIED7u0l6u1NePBw/wgox9uayQ= -github.com/gardener/gardener v0.0.0-20190830053951-194cf8abb797/go.mod h1:UjyQiGrjIRJQ82t2PTqtdMXzJxqH/ftQ36jbpFxAWXw= -github.com/gardener/gardener v0.0.0-20191001105106-32539124646f h1:qJzQIcNCiZjnmS0cypyuJGM4CNkGMGFsxyn+VYXjdY8= -github.com/gardener/gardener v0.0.0-20191001105106-32539124646f/go.mod h1:QNOx6rf0BO62L7RWv5G5zTAWsPcM1Jet8A4sWMqfPUE= -github.com/gardener/gardener-extensions v0.0.0-20190725050243-a80ef643c64b/go.mod h1:uXjtl3KeVdQXuGIP26+84wJY1Kwru67l0FXm7A5DiME= -github.com/gardener/gardener-extensions v0.0.0-20190820050625-a15de8a82f6b/go.mod h1:q69+1cUGSfQ8gSMWzU7GFz/R8K8MpOLQBT2wJJcCjEA= -github.com/gardener/gardener-extensions v0.0.0-20190906160200-5c329d46ae81/go.mod h1:OBUAbab8OMm8pvzr/1/cdwIQnQYuMoGDTNR2c+i9nYo= -github.com/gardener/gardener-resource-manager v0.0.0-20190802142246-6b2029430332/go.mod h1:kIPm1DlE5jChb4moAseumj1qr+IITKXPJcqyIKJp14Y= -github.com/gardener/gardener-resource-manager v0.0.0-20190802153254-ea0dc5872b6a/go.mod h1:kIPm1DlE5jChb4moAseumj1qr+IITKXPJcqyIKJp14Y= +github.com/gardener/gardener v0.0.0-20190913144920-5b4adb9f114d/go.mod h1:8LOLWzproKzfww5LdDo0/7FyVQR7gR1QA5LQc5buDzQ= +github.com/gardener/gardener v0.0.0-20191028054636-32cb0027c126/go.mod h1:Ow7vOXQYgQeHTRFn2HdbEIptelrSB5NLmVFIt2rgNeY= +github.com/gardener/gardener v0.33.6 h1:pb7T/zmMP4ZZqlvfzfvGQCweHwcLaMZv6S8WPJfuCpc= +github.com/gardener/gardener v0.33.6/go.mod h1:gId1y7wYKkBGHo8oRzpObfQsh14Tb6f2qs8SSt36Ydg= +github.com/gardener/gardener-extensions v0.0.0-20191028142629-438a3dcf5eca/go.mod h1:+Rhnk/Jjn0pvWi2WWpSiSnlugHGfBikMvnJI7Y56fJY= github.com/gardener/gardener-resource-manager v0.0.0-20190828115855-7ceeb3021993/go.mod h1:l18ykpXeMDrrrtiA99YTdvZPW2TOaHIR/LrtbVELIiU= +github.com/gardener/gardener-resource-manager v0.0.0-20191025075317-09173887c1a7/go.mod h1:sx7C8db4Q/gyddvFPZ+vbfCZpXmsRMpsfgqwn42PpXg= +github.com/gardener/gardener-resource-manager v0.8.1/go.mod h1:e6ORXVT0tt/rKz4CZwC/AVMCpebY0pkWcYrmShhULRw= github.com/gardener/hvpa-controller v0.0.0-20190924063424-ef5c3668949d/go.mod h1:YUvzinEboe8b9FTflj+wGvXaZBHHhQTd+R9Vk581wIE= -github.com/gardener/machine-controller-manager v0.0.0-20190228095106-36a42c48af0a/go.mod h1:/ZDmMugbfkBfOO6/nrdnqV+OMSEuF75wrF9KFIuqndM= +github.com/gardener/hvpa-controller v0.0.0-20191014062307-fad3bdf06a25/go.mod h1:yj7YJ6ijo4adcpXQKutPFZfQuKLdM5UMZZUlpbM3vig= github.com/gardener/machine-controller-manager v0.0.0-20190606071036-119056ee3fdd/go.mod h1:/ZDmMugbfkBfOO6/nrdnqV+OMSEuF75wrF9KFIuqndM= +github.com/gardener/machine-controller-manager v0.0.0-20191118095523-e30355bc7945/go.mod h1:anjeXFvK/AWZAhWT17qXFjt8SgZgbQaHbZIL7vf2lqo= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-ini/ini v1.42.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-ini/ini v1.44.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-ini/ini v1.46.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.36.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v0.1.1 h1:qXBXPDdNncunGs7XeEpsJt8wCjYBygluzfdLO0G5baE= github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0 h1:GlXgaiBkmrYMHco6t4j7SacKO4XUjvh5pwXh0f4uxXU= github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.5 h1:xpKq9ap8MbYfhuPCF0dBH854Gp9CxZjr/IocxELFflo= +github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/logger v1.0.1/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= github.com/gobuffalo/packr v1.25.0/go.mod h1:NqsGg8CSB2ZD+6RBIRs18G7aZqdYDlYNNvsSqP6T4/U= github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= github.com/gobuffalo/packr/v2 v2.1.0/go.mod h1:n90ZuXIc2KN2vFAOQascnPItp9A2g9QYSvYvS3AjQEM= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 h1:u4bArs140e9+AfE52mFHOXVFnOSBJBRlzTHrOPLOIhE= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= @@ -202,56 +260,58 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.3.0 h1:CcQijm0XKekKjP/YCz28LXVSpgguuB+nCxaSjCe09y0= github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= +github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/gophercloud/gophercloud v0.0.0-20190212181753-892256c46858/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.2.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/utils v0.0.0-20190527093828-25f1b77b8c03/go.mod h1:SZ9FTKibIotDtCrxAU/evccoyu1yhKST6hgBvwTB5Eg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= +github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.3/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.11.3/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw= github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= @@ -260,30 +320,36 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.2.0 h1:yPeWdRnmynF7p+lLYz0H2tthW9lqhMJrQV/U7yy4wX0= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= -github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/huandu/xstrings v1.2.1 h1:v6IdmkCnDhJG/S0ivr58PeIfg+tyhqQYy4YsCsQ0Pdc= +github.com/huandu/xstrings v1.2.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jetstack/cert-manager v0.6.2/go.mod h1:nbddmhjWxYGt04bxvwVGUSeLhZ2PCyNvd7MpXdq+yWY= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= +github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= @@ -294,100 +360,116 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= +github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20190414153302-2ae31c8b6b30/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= -github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/packethost/packngo v0.0.0-20181217122008-b3b45f1b4979/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= -github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c h1:MUyE44mTvnI5A0xrxIxaMqoWFzPfQvtE2IWUollMDMs= github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pierrec/lz4 v0.0.0-20190415111752-9419d2361c8a/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.3.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.0 h1:tXuTFVHC03mW0D+Ua1Q2d1EAVqLTuggX50V0VLICCzY= github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_golang v1.3.0 h1:miYCvYqFXtl/J9FIy8eNpBfYthAEFg+Ys0XyUVEcDsc= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54= +github.com/prometheus/client_model v0.1.0 h1:ElTg5tNp4DqfV7UQjDqv2+RJlNzsDtvNAWccbItceIE= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.3.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0= +github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190412120340-e22ddced7142/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/robfig/cron v0.0.0-20171101201047-2315d5715e36/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= +github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -400,6 +482,7 @@ github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4 github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= @@ -408,90 +491,112 @@ github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3 github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= +github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opencensus.io v0.17.0/go.mod h1:mp1VrMQxhlqqDpKvH4UcQUa4YwlzNmymAjPrDdfxNpI= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opencensus.io v0.18.1-0.20181204023538-aab39bd6a98b/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= -go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= +go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.5.1 h1:rsqfU5vBkVknbhUGbAUwQKR2H4ItV8tjJ+6kJX4cxHM= +go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -golang.org/x/build v0.0.0-20190314133821-5284462c4bec/go.mod h1:atTaCNAy0f16Ah5aV1gMSwgiKVHwu/JncqDpuRr7lS4= -golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac h1:7d7lG9fHOLdL6jZPtnV4LpI41SbohIJ1Atq7U991dMg= +go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191128160524-b544559bb6d1 h1:anGSYQpPhQwXlwsu5wmfq0nWkCNaMEMUwAv13Y92hd8= -golang.org/x/crypto v0.0.0-20191128160524-b544559bb6d1/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e h1:egKlR8l7Nu9vHGWbcUV8lqR4987UfUbBd7GbhqGzNYU= -golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876 h1:sKJQZMuxjOAR/Uo2LBfU90onWEf1dF4C+0hPJCc9Mpc= +golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180112015858-5ccada7d0a7b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -499,76 +604,72 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA= +golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933 h1:e6HwijUxhDe+hPNjZQQn9bA5PW3vNmnN64U2ZW759Lk= -golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191204025024-5ee1b9f4859a h1:+HHJiFUXVOIS9mr1ThqkQD1N8vpFCfCShqADBM12KTc= -golang.org/x/net v0.0.0-20191204025024-5ee1b9f4859a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7 h1:LepdCS8Gf/MVejFIt8lsiexZATdoGVyp5bcyS+rYoUI= -golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa h1:KIDDMLT1O0Nr7TSxp8xM5tJcdn8tgyAONntO829og1M= golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9 h1:ZBzSG/7F4eNKz2L3GE9o300RX0Az1Bw5HF7PDraD+qU= -golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e h1:9vRrk9YW2BTzLP0VCB9ZDjU4cPqkg+IDWL7XgxA1yxQ= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8 h1:JA8d3MPx/IToSyXZG/RhwYEtfrKO1Fxrqe8KrkiLXKM= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190213192042-740235f6c0d8/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -582,112 +683,163 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624180213-70d37148ca0c/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0 h1:Dh6fw+p6FyRl5x/FvNswO1ji0lIGzm3KP8Y9VkS9PTE= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191204193430-660eba4da30b h1:elgBWZKAc17JKH5hb6mZMAt+QOr0+v3Pvand8OOXAJs= -golang.org/x/tools v0.0.0-20191204193430-660eba4da30b/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361 h1:RIIXAeV6GvDBuADKumTODatUqANFZ+5BPMnzsy4hulY= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191230220329-2aa90c603ae3 h1:2+KluhQfJ1YhW+TB1KrISS2SfiG1pLEoseB0D4VF/bo= +golang.org/x/tools v0.0.0-20191230220329-2aa90c603ae3/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.0.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= +gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190227213309-4f5b463f9597/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= -google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.44.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531160350-a96e63847dc3/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.0.0/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966 h1:B0J02caTR6tpSJozBJyiAzT6CtBzjclw4pgm9gg8Ys0= +gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.0.0-20190313235455-40a48860b5ab h1:DG9A67baNpoeweOy2spF1OWHhnVY5KR7/Ek/+U1lVZc= -k8s.io/api v0.0.0-20190313235455-40a48860b5ab/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/apiextensions-apiserver v0.0.0-20190315093550-53c4693659ed h1:rCteec//ELIjZMfjIGQbVtZooyaofqDJwsmWwWKItNs= -k8s.io/apiextensions-apiserver v0.0.0-20190315093550-53c4693659ed/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= -k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1 h1:IS7K02iBkQXpCeieSiyJjGoLSdVOv2DbPaWHJ+ZtgKg= -k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/apiserver v0.0.0-20190313205120-8b27c41bdbb1/go.mod h1:6bqaTSOSJavUIXUtfaR9Os9JtTCm8ZqH2SUl2S60C4w= -k8s.io/autoscaler v0.0.0-20190425094807-60ef58f770d8/go.mod h1:QEXezc9uKPT91dwqhSJq3GNI3B1HxFRQHiku9kmrsSA= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +k8s.io/api v0.0.0-20190918155943-95b840bb6a1f h1:8FRUST8oUkEI45WYKyD8ed7Ad0Kg5v11zHyPkEVb2xo= +k8s.io/api v0.0.0-20190918155943-95b840bb6a1f/go.mod h1:uWuOHnjmNrtQomJrvEBg0c0HRNyQ+8KTEERVsK0PW48= +k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783 h1:V6ndwCPoao1yZ52agqOKaUAl7DYWVGiXjV7ePA2i610= +k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY= +k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655 h1:CS1tBQz3HOXiseWZu6ZicKX361CZLT97UFnnPx0aqBw= +k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4= +k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= k8s.io/autoscaler v0.0.0-20190805135949-100e91ba756e/go.mod h1:QEXezc9uKPT91dwqhSJq3GNI3B1HxFRQHiku9kmrsSA= -k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o= -k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -k8s.io/cluster-bootstrap v0.0.0-20190314002537-50662da99b70/go.mod h1:iBSm2nwo3OaiuW8VDvc3ySDXK5SKfUrxwPvBloKG7zg= -k8s.io/code-generator v0.0.0-20190311093542-50b561225d70/go.mod h1:MYiN+ZJZ9HkETbgVZdWw2AsuAi9PZ4V80cwfuf2axe8= -k8s.io/component-base v0.0.0-20190314000054-4a91899592f4/go.mod h1:DMaomcf3j3MM2j1FsvlLVVlc7wA2jPytEur3cP9zRxQ= +k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90 h1:mLmhKUm1X+pXu0zXMEzNsOF5E2kKFGe5o6BZBIIqA6A= +k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90/go.mod h1:J69/JveO6XESwVgG53q3Uz5OSfgsv4uxpScmmyYOOlk= +k8s.io/cluster-bootstrap v0.0.0-20190918163108-da9fdfce26bb/go.mod h1:mQVbtFRxlw/BzBqBaQwIMzjDTST1KrGtzWaR4CGlsTU= +k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE= +k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20190826232639-a874a240740c/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/helm v2.13.1+incompatible h1:qt0LBsHQ7uxCtS3F2r3XI0DNm8ml0xQeSJixUorDyn0= k8s.io/helm v2.13.1+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI= -k8s.io/klog v0.1.0 h1:I5HMfc/DtuVaGR1KPwUrTc476K8NCqNBldC7H4dYEzk= -k8s.io/klog v0.1.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/kube-aggregator v0.0.0-20190314000639-da8327669ac5/go.mod h1:8sbzT4QQKDEmSCIbfqjV0sd97GpUT7A4W626sBiYJmU= -k8s.io/kube-openapi v0.0.0-20190320154901-5e45bb682580 h1:fq0ZXW/BAIFZH+dazlups6JTVdwzRo5d9riFA103yuQ= -k8s.io/kube-openapi v0.0.0-20190320154901-5e45bb682580/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ= +k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-aggregator v0.0.0-20190918161219-8c8f079fddc3/go.mod h1:NJisPUqwlg1A99RhO1BTnNtwC4pKUyXJ2f3Xc4PxKQg= +k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf h1:EYm5AW/UUDbnmnI+gK0TJDVK9qPLhM+sRHYanNKw0EQ= +k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kubelet v0.0.0-20190314002251-f6da02f58325/go.mod h1:m6JOtVhjgs4GGnzhPpXuNF9VG+IjARwo/dHCNw4+QDA= k8s.io/metrics v0.0.0-20190816224245-c61a0d549e17/go.mod h1:a25VAbm3QT3xiVl1jtoF1ueAKQM149UdZ+L93ePfV3M= -k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 h1:VBM/0P5TWxwk+Nw6Z+lAw3DKgO76g90ETOiA6rfLV1Y= +k8s.io/metrics v0.0.0-20191004105854-2e8cf7d0888c/go.mod h1:a25VAbm3QT3xiVl1jtoF1ueAKQM149UdZ+L93ePfV3M= k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20190607212802-c55fbcfc754a/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20190712204705-3dccf664f023 h1:1H4Jyzb0z2X0GfBMTwRjnt5ejffRHrGftUgJcV/ZfDc= -k8s.io/utils v0.0.0-20190712204705-3dccf664f023/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1 h1:+ySTxfHnfzZb9ys375PXNlLhkJPLKgHajBU0N62BDvE= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20191218082557-f07c713de883 h1:TA8t8OLS8m3/0dtTckekO0pCQ7qMnD19fsZTQEgCSKQ= +k8s.io/utils v0.0.0-20191218082557-f07c713de883/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= sigs.k8s.io/controller-runtime v0.2.0-beta.2 h1:hOWldx1qmGI9TsU+uUsq1xTgVmUV7AZo08VAYX0dwGI= sigs.k8s.io/controller-runtime v0.2.0-beta.2/go.mod h1:TSH2R0nSz4WAlUUlNnOFcOR/VUhfwBLlmtq2X6AiQCA= +sigs.k8s.io/controller-runtime v0.2.0-beta.4/go.mod h1:HweyYKQ8fBuzdu2bdaeBJvsFgAi/OqBBnrVGXcqKhME= +sigs.k8s.io/controller-runtime v0.2.0-beta.5/go.mod h1:HweyYKQ8fBuzdu2bdaeBJvsFgAi/OqBBnrVGXcqKhME= +sigs.k8s.io/controller-runtime v0.2.2 h1:JT/vJJhUjjL9NZNwnm8AXmqCBUXSCFKmTaNjwDi28N0= +sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I= +sigs.k8s.io/controller-runtime v0.4.0 h1:wATM6/m+3w8lj8FXNaO6Fs/rq/vqoOjO1Q116Z9NPsg= +sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZwfXkME1Lajns= +sigs.k8s.io/controller-tools v0.2.4 h1:la1h46EzElvWefWLqfsXrnsO3lZjpkI0asTpX6h8PLA= +sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA= sigs.k8s.io/structured-merge-diff v0.0.0-20190302045857-e85c7b244fd2/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs= sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= +sigs.k8s.io/testing_frameworks v0.1.2 h1:vK0+tvjF0BZ/RYFeZ1E6BYBwHJJXhjuZ3TdsEKH+UQM= +sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/pkg/utils/miscellaneous.go b/pkg/utils/miscellaneous.go index b7745e80d..64e44622e 100644 --- a/pkg/utils/miscellaneous.go +++ b/pkg/utils/miscellaneous.go @@ -37,6 +37,7 @@ const ( gcs = "GCS" oss = "OSS" swift = "Swift" + local = "Local" ) // ValueExists returns true or false, depending on whether the given string @@ -157,6 +158,10 @@ func StorageProviderFromInfraProvider(infra druidv1alpha1.StorageProvider) strin storage = swift case gcp: storage = gcs + case local: + storage = local + default: + storage = "" } return storage } diff --git a/vendor/cloud.google.com/go/compute/metadata/.repo-metadata.json b/vendor/cloud.google.com/go/compute/metadata/.repo-metadata.json new file mode 100644 index 000000000..ca022ccc4 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/.repo-metadata.json @@ -0,0 +1,12 @@ +{ + "name": "metadata", + "name_pretty": "Google Compute Engine Metadata API", + "product_documentation": "https://cloud.google.com/compute/docs/storing-retrieving-metadata", + "client_documentation": "https://godoc.org/cloud.google.com/go/compute/metadata", + "release_level": "ga", + "language": "go", + "repo": "googleapis/google-cloud-go", + "distribution_name": "cloud.google.com/go/compute/metadata", + "api_id": "compute:metadata", + "requires_billing": false +} diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 125b7033c..0b50c2a7a 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -227,6 +227,9 @@ func InternalIP() (string, error) { return defaultClient.InternalIP() } // ExternalIP returns the instance's primary external (public) IP address. func ExternalIP() (string, error) { return defaultClient.ExternalIP() } +// Email calls Client.Email on the default client. +func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) } + // Hostname returns the instance's hostname. This will be of the form // ".c..internal". func Hostname() (string, error) { return defaultClient.Hostname() } @@ -301,7 +304,10 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { host = metadataIP } u := "http://" + host + "/computeMetadata/v1/" + suffix - req, _ := http.NewRequest("GET", u, nil) + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return "", "", err + } req.Header.Set("Metadata-Flavor", "Google") req.Header.Set("User-Agent", userAgent) res, err := c.hc.Do(req) @@ -367,6 +373,16 @@ func (c *Client) InternalIP() (string, error) { return c.getTrimmed("instance/network-interfaces/0/ip") } +// Email returns the email address associated with the service account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) Email(serviceAccount string) (string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email") +} + // ExternalIP returns the instance's primary external (public) IP address. func (c *Client) ExternalIP() (string, error) { return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") @@ -394,11 +410,7 @@ func (c *Client) InstanceTags() ([]string, error) { // InstanceName returns the current VM's instance ID string. func (c *Client) InstanceName() (string, error) { - host, err := c.Hostname() - if err != nil { - return "", err - } - return strings.Split(host, ".")[0], nil + return c.getTrimmed("instance/name") } // Zone returns the current VM's zone, such as "us-central1-b". diff --git a/vendor/github.com/Masterminds/semver/.travis.yml b/vendor/github.com/Masterminds/semver/.travis.yml index 3d9ebadb9..096369d44 100644 --- a/vendor/github.com/Masterminds/semver/.travis.yml +++ b/vendor/github.com/Masterminds/semver/.travis.yml @@ -6,6 +6,8 @@ go: - 1.8.x - 1.9.x - 1.10.x + - 1.11.x + - 1.12.x - tip # Setting sudo access to false will let Travis CI use containers rather than diff --git a/vendor/github.com/Masterminds/semver/CHANGELOG.md b/vendor/github.com/Masterminds/semver/CHANGELOG.md index b888e20ab..e405c9a84 100644 --- a/vendor/github.com/Masterminds/semver/CHANGELOG.md +++ b/vendor/github.com/Masterminds/semver/CHANGELOG.md @@ -1,3 +1,26 @@ +# 1.5.0 (2019-09-11) + +## Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +## Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +## Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + # 1.4.2 (2018-04-10) ## Changed diff --git a/vendor/github.com/Masterminds/semver/LICENSE.txt b/vendor/github.com/Masterminds/semver/LICENSE.txt index 0da4aeadb..9ff7da9c4 100644 --- a/vendor/github.com/Masterminds/semver/LICENSE.txt +++ b/vendor/github.com/Masterminds/semver/LICENSE.txt @@ -1,5 +1,4 @@ -The Masterminds -Copyright (C) 2014-2015, Matt Butcher and Matt Farina +Copyright (C) 2014-2019, Matt Butcher and Matt Farina Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/Masterminds/semver/README.md b/vendor/github.com/Masterminds/semver/README.md index 3e934ed71..1b52d2f43 100644 --- a/vendor/github.com/Masterminds/semver/README.md +++ b/vendor/github.com/Masterminds/semver/README.md @@ -11,6 +11,9 @@ The `semver` package provides the ability to work with [Semantic Versions](http: Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) [![Build Status](https://travis-ci.org/Masterminds/semver.svg)](https://travis-ci.org/Masterminds/semver) [![Build status](https://ci.appveyor.com/api/projects/status/jfk66lib7hb985k8/branch/master?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [![GoDoc](https://godoc.org/github.com/Masterminds/semver?status.svg)](https://godoc.org/github.com/Masterminds/semver) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) +If you are looking for a command line tool for version comparisons please see +[vert](https://github.com/Masterminds/vert) which uses this library. + ## Parsing Semantic Versions To parse a semantic version use the `NewVersion` function. For example, @@ -80,14 +83,32 @@ The basic comparisons are: * `>=`: greater than or equal to * `<=`: less than or equal to -_Note, according to the Semantic Version specification pre-releases may not be -API compliant with their release counterpart. It says,_ +## Working With Pre-release Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of pre-releases include +development, alpha, beta, and release candidate releases. A pre-release may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precidence, pre-releases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification pre-releases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. -> _A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version._ +SemVer comparisons without a pre-release comparator will skip pre-release versions. +For example, `>=1.2.3` will skip pre-releases when looking at a list of releases +while `>=1.2.3-0` will evaluate and find pre-releases. -_SemVer comparisons without a pre-release value will skip pre-release versions. -For example, `>1.2.3` will skip pre-releases when looking at a list of values -while `>1.2.3-alpha.1` will evaluate pre-releases._ +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the spec. The lowest character is a `0` in ASCII sort order (see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. ## Hyphen Range Comparisons @@ -105,7 +126,7 @@ back to the pack level comparison (see tilde below). For example, * `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` * `>= 1.2.x` is equivalent to `>= 1.2.0` -* `<= 2.x` is equivalent to `<= 3` +* `<= 2.x` is equivalent to `< 3` * `*` is equivalent to `>= 0.0.0` ## Tilde Range Comparisons (Patch) @@ -126,6 +147,7 @@ The caret (`^`) comparison operator is for major level changes. This is useful when comparisons of API versions as a major change is API breaking. For example, * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^0.0.1` is equivalent to `>= 0.0.1, < 1.0.0` * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` * `^2.3` is equivalent to `>= 2.3, < 3` * `^2.x` is equivalent to `>= 2.0.0, < 3` @@ -159,6 +181,13 @@ version didn't meet the constraint is returned. For example, } ``` +# Fuzzing + + [dvyukov/go-fuzz](https://github.com/dvyukov/go-fuzz) is used for fuzzing. + +1. `go-fuzz-build` +2. `go-fuzz -workdir=fuzz` + # Contribute If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) diff --git a/vendor/github.com/Masterminds/semver/constraints.go b/vendor/github.com/Masterminds/semver/constraints.go index a41a6a7a4..b94b93413 100644 --- a/vendor/github.com/Masterminds/semver/constraints.go +++ b/vendor/github.com/Masterminds/semver/constraints.go @@ -65,13 +65,30 @@ func (cs Constraints) Check(v *Version) bool { func (cs Constraints) Validate(v *Version) (bool, []error) { // loop over the ORs and check the inner ANDs var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool for _, o := range cs.constraints { joy := true for _, c := range o { - if !c.check(v) { - em := fmt.Errorf(c.msg, v, c.orig) - e = append(e, em) + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if c.con.pre == "" && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } joy = false + + } else { + + if !c.check(v) { + em := fmt.Errorf(c.msg, v, c.orig) + e = append(e, em) + joy = false + } } } @@ -233,12 +250,6 @@ func constraintNotEqual(v *Version, c *constraint) bool { func constraintGreaterThan(v *Version, c *constraint) bool { - // An edge case the constraint is 0.0.0 and the version is 0.0.0-someprerelease - // exists. This that case. - if !isNonZero(c.con) && isNonZero(v) { - return true - } - // If there is a pre-release on the version but the constraint isn't looking // for them assume that pre-releases are not compatible. See issue 21 for // more details. @@ -271,11 +282,6 @@ func constraintLessThan(v *Version, c *constraint) bool { } func constraintGreaterThanEqual(v *Version, c *constraint) bool { - // An edge case the constraint is 0.0.0 and the version is 0.0.0-someprerelease - // exists. This that case. - if !isNonZero(c.con) && isNonZero(v) { - return true - } // If there is a pre-release on the version but the constraint isn't looking // for them assume that pre-releases are not compatible. See issue 21 for @@ -415,12 +421,3 @@ func rewriteRange(i string) string { return o } - -// Detect if a version is not zero (0.0.0) -func isNonZero(v *Version) bool { - if v.Major() != 0 || v.Minor() != 0 || v.Patch() != 0 || v.Prerelease() != "" { - return true - } - - return false -} diff --git a/vendor/github.com/Masterminds/semver/doc.go b/vendor/github.com/Masterminds/semver/doc.go index e00f65eb7..6a6c24c6d 100644 --- a/vendor/github.com/Masterminds/semver/doc.go +++ b/vendor/github.com/Masterminds/semver/doc.go @@ -47,7 +47,7 @@ parts of the package. // Handle constraint not being parseable. } - v, _ := semver.NewVersion("1.3") + v, err := semver.NewVersion("1.3") if err != nil { // Handle version not being parseable. } diff --git a/vendor/github.com/Masterminds/semver/version.go b/vendor/github.com/Masterminds/semver/version.go index 9d22ea630..400d4f934 100644 --- a/vendor/github.com/Masterminds/semver/version.go +++ b/vendor/github.com/Masterminds/semver/version.go @@ -34,7 +34,7 @@ const SemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + // ValidPrerelease is the regular expression which validates // both prerelease and metadata values. -const ValidPrerelease string = `^([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*)` +const ValidPrerelease string = `^([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*)$` // Version represents a single semantic version. type Version struct { @@ -106,7 +106,7 @@ func MustParse(v string) *Version { // Note, if the original version contained a leading v this version will not. // See the Original() method to retrieve the original value. Semantic Versions // don't contain a leading v per the spec. Instead it's optional on -// impelementation. +// implementation. func (v *Version) String() string { var buf bytes.Buffer @@ -394,10 +394,14 @@ func comparePrePart(s, o string) int { } // When comparing strings "99" is greater than "103". To handle - // cases like this we need to detect numbers and compare them. - - oi, n1 := strconv.ParseInt(o, 10, 64) - si, n2 := strconv.ParseInt(s, 10, 64) + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) // The case where both are strings compare the strings if n1 != nil && n2 != nil { diff --git a/vendor/github.com/Masterminds/semver/version_fuzz.go b/vendor/github.com/Masterminds/semver/version_fuzz.go new file mode 100644 index 000000000..b42bcd62b --- /dev/null +++ b/vendor/github.com/Masterminds/semver/version_fuzz.go @@ -0,0 +1,10 @@ +// +build gofuzz + +package semver + +func Fuzz(data []byte) int { + if _, err := NewVersion(string(data)); err != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/Masterminds/sprig/.travis.yml b/vendor/github.com/Masterminds/sprig/.travis.yml index 482aa3cd0..b9da8b825 100644 --- a/vendor/github.com/Masterminds/sprig/.travis.yml +++ b/vendor/github.com/Masterminds/sprig/.travis.yml @@ -4,6 +4,8 @@ go: - 1.9.x - 1.10.x - 1.11.x + - 1.12.x + - 1.13.x - tip # Setting sudo access to false will let Travis CI use containers rather than diff --git a/vendor/github.com/Masterminds/sprig/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/CHANGELOG.md index 0a8069c0a..6a79fbde4 100644 --- a/vendor/github.com/Masterminds/sprig/CHANGELOG.md +++ b/vendor/github.com/Masterminds/sprig/CHANGELOG.md @@ -1,5 +1,36 @@ # Changelog +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + ## Release 2.20.0 (2019-06-18) ### Added diff --git a/vendor/github.com/Masterminds/sprig/crypto.go b/vendor/github.com/Masterminds/sprig/crypto.go index 7427deb83..7a418ba88 100644 --- a/vendor/github.com/Masterminds/sprig/crypto.go +++ b/vendor/github.com/Masterminds/sprig/crypto.go @@ -2,6 +2,8 @@ package sprig import ( "bytes" + "crypto/aes" + "crypto/cipher" "crypto/dsa" "crypto/ecdsa" "crypto/elliptic" @@ -19,6 +21,7 @@ import ( "encoding/pem" "errors" "fmt" + "io" "hash/adler32" "math/big" "net" @@ -439,3 +442,61 @@ func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) { } return alternateDNSStrs, nil } + +func encryptAES(password string, plaintext string) (string, error) { + if plaintext == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + content := []byte(plaintext) + blockSize := block.BlockSize() + padding := blockSize - len(content)%blockSize + padtext := bytes.Repeat([]byte{byte(padding)}, padding) + content = append(content, padtext...) + + ciphertext := make([]byte, aes.BlockSize+len(content)) + + iv := ciphertext[:aes.BlockSize] + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return "", err + } + + mode := cipher.NewCBCEncrypter(block, iv) + mode.CryptBlocks(ciphertext[aes.BlockSize:], content) + + return base64.StdEncoding.EncodeToString(ciphertext), nil +} + +func decryptAES(password string, crypt64 string) (string, error) { + if crypt64 == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + + crypt, err := base64.StdEncoding.DecodeString(crypt64) + if err != nil { + return "", err + } + + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + iv := crypt[:aes.BlockSize] + crypt = crypt[aes.BlockSize:] + decrypted := make([]byte, len(crypt)) + mode := cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(decrypted, crypt) + + return string(decrypted[:len(decrypted)-int(decrypted[len(decrypted)-1])]), nil +} diff --git a/vendor/github.com/Masterminds/sprig/dict.go b/vendor/github.com/Masterminds/sprig/dict.go index 026eccb73..738405b43 100644 --- a/vendor/github.com/Masterminds/sprig/dict.go +++ b/vendor/github.com/Masterminds/sprig/dict.go @@ -1,6 +1,9 @@ package sprig -import "github.com/imdario/mergo" +import ( + "github.com/imdario/mergo" + "github.com/mitchellh/copystructure" +) func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { d[key] = value @@ -88,13 +91,13 @@ func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface } func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { - for _, src := range srcs { + for _, src := range srcs { if err := mergo.MergeWithOverwrite(&dst, src); err != nil { // Swallow errors inside of a template. return "" } - } - return dst + } + return dst } func values(dict map[string]interface{}) []interface{} { @@ -105,3 +108,12 @@ func values(dict map[string]interface{}) []interface{} { return values } + +func deepCopy(i interface{}) interface{} { + c, err := copystructure.Copy(i) + if err != nil { + panic("deepCopy error: " + err.Error()) + } + + return c +} diff --git a/vendor/github.com/Masterminds/sprig/functions.go b/vendor/github.com/Masterminds/sprig/functions.go index 6bf15cf18..7b5b0af86 100644 --- a/vendor/github.com/Masterminds/sprig/functions.go +++ b/vendor/github.com/Masterminds/sprig/functions.go @@ -5,6 +5,7 @@ import ( "html/template" "os" "path" + "reflect" "strconv" "strings" ttemplate "text/template" @@ -84,6 +85,9 @@ var nonhermeticFunctions = []string{ // OS "env", "expandenv", + + // Network + "getHostByName", } var genericMap = map[string]interface{}{ @@ -134,26 +138,27 @@ var genericMap = map[string]interface{}{ "wrap": func(l int, s string) string { return util.Wrap(s, l) }, "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, // Switch order so that "foobar" | contains "foo" - "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, - "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, - "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, - "quote": quote, - "squote": squote, - "cat": cat, - "indent": indent, - "nindent": nindent, - "replace": replace, - "plural": plural, - "sha1sum": sha1sum, - "sha256sum": sha256sum, + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, "adler32sum": adler32sum, - "toString": strval, + "toString": strval, // Wrap Atoi to stop errors. - "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, - "int64": toInt64, - "int": toInt, - "float64": toFloat64, + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "toDecimal": toDecimal, //"gt": func(a, b int) bool {return a > b}, //"gte": func(a, b int) bool {return a >= b}, @@ -206,6 +211,7 @@ var genericMap = map[string]interface{}{ "empty": empty, "coalesce": coalesce, "compact": compact, + "deepCopy": deepCopy, "toJson": toJson, "toPrettyJson": toPrettyJson, "ternary": ternary, @@ -216,11 +222,15 @@ var genericMap = map[string]interface{}{ "typeIsLike": typeIsLike, "kindOf": kindOf, "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, // OS: "env": func(s string) string { return os.Getenv(s) }, "expandenv": func(s string) string { return os.ExpandEnv(s) }, + // Network: + "getHostByName": getHostByName, + // File Paths: "base": path.Base, "dir": path.Dir, @@ -235,19 +245,19 @@ var genericMap = map[string]interface{}{ "b32dec": base32decode, // Data Structures: - "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. - "list": list, - "dict": dict, - "set": set, - "unset": unset, - "hasKey": hasKey, - "pluck": pluck, - "keys": keys, - "pick": pick, - "omit": omit, - "merge": merge, + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "merge": merge, "mergeOverwrite": mergeOverwrite, - "values": values, + "values": values, "append": push, "push": push, "prepend": prepend, @@ -260,6 +270,7 @@ var genericMap = map[string]interface{}{ "without": without, "has": has, "slice": slice, + "concat": concat, // Crypto: "genPrivateKey": generatePrivateKey, @@ -268,6 +279,8 @@ var genericMap = map[string]interface{}{ "genCA": generateCertificateAuthority, "genSelfSignedCert": generateSelfSignedCertificate, "genSignedCert": generateSignedCertificate, + "encryptAES": encryptAES, + "decryptAES": decryptAES, // UUIDs: "uuidv4": uuidv4, @@ -286,4 +299,8 @@ var genericMap = map[string]interface{}{ "regexReplaceAll": regexReplaceAll, "regexReplaceAllLiteral": regexReplaceAllLiteral, "regexSplit": regexSplit, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, } diff --git a/vendor/github.com/Masterminds/sprig/glide.lock b/vendor/github.com/Masterminds/sprig/glide.lock deleted file mode 100644 index 32908e4ed..000000000 --- a/vendor/github.com/Masterminds/sprig/glide.lock +++ /dev/null @@ -1,31 +0,0 @@ -hash: 6a3f4f83c443958625ff1bafadd95c96d20d729f34e8e8c2fa72782194fc4807 -updated: 2019-01-30T20:16:27.780177826+01:00 -imports: -- name: github.com/google/uuid - version: 064e2069ce9c359c118179501254f67d7d37ba24 -- name: github.com/huandu/xstrings - version: f02667b379e2fb5916c3cda2cf31e0eb885d79f8 -- name: github.com/imdario/mergo - version: 7c29201646fa3de8506f701213473dd407f19646 -- name: github.com/Masterminds/goutils - version: 41ac8693c5c10a92ea1ff5ac3a7f95646f6123b0 -- name: github.com/Masterminds/semver - version: 59c29afe1a994eacb71c833025ca7acf874bb1da -- name: github.com/stretchr/testify - version: c679ae2cc0cb27ec3293fea7e254e47386f05d69 - subpackages: - - assert -- name: golang.org/x/crypto - version: de0752318171da717af4ce24d0a2e8626afaeb11 - subpackages: - - pbkdf2 - - scrypt -testImports: -- name: github.com/davecgh/go-spew - version: 782f4967f2dc4564575ca782fe2d04090b5faca8 - subpackages: - - spew -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib diff --git a/vendor/github.com/Masterminds/sprig/glide.yaml b/vendor/github.com/Masterminds/sprig/glide.yaml index 712b2ba88..f317d2b2b 100644 --- a/vendor/github.com/Masterminds/sprig/glide.yaml +++ b/vendor/github.com/Masterminds/sprig/glide.yaml @@ -3,14 +3,17 @@ import: - package: github.com/Masterminds/goutils version: ^1.0.0 - package: github.com/google/uuid - version: ^0.2 + version: ^1.0.0 - package: golang.org/x/crypto subpackages: - scrypt - package: github.com/Masterminds/semver - version: v1.2.2 + version: ^v1.2.2 - package: github.com/stretchr/testify + version: ^v1.2.2 - package: github.com/imdario/mergo version: ~0.3.7 - package: github.com/huandu/xstrings version: ^1.2 +- package: github.com/mitchellh/copystructure + version: ^1.0.0 diff --git a/vendor/github.com/Masterminds/sprig/list.go b/vendor/github.com/Masterminds/sprig/list.go index 41e136625..c0381bbb6 100644 --- a/vendor/github.com/Masterminds/sprig/list.go +++ b/vendor/github.com/Masterminds/sprig/list.go @@ -292,3 +292,20 @@ func slice(list interface{}, indices ...interface{}) interface{} { panic(fmt.Sprintf("list should be type of slice or array but %s", tp)) } } + +func concat(lists ...interface{}) interface{} { + var res []interface{} + for _, list := range lists { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + for i := 0; i < l2.Len(); i++ { + res = append(res, l2.Index(i).Interface()) + } + default: + panic(fmt.Sprintf("Cannot concat type %s as list", tp)) + } + } + return res +} diff --git a/vendor/github.com/Masterminds/sprig/network.go b/vendor/github.com/Masterminds/sprig/network.go new file mode 100644 index 000000000..d786cc736 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/network.go @@ -0,0 +1,12 @@ +package sprig + +import ( + "math/rand" + "net" +) + +func getHostByName(name string) string { + addrs, _ := net.LookupHost(name) + //TODO: add error handing when release v3 cames out + return addrs[rand.Intn(len(addrs))] +} diff --git a/vendor/github.com/Masterminds/sprig/numeric.go b/vendor/github.com/Masterminds/sprig/numeric.go index 4bd89bf7f..f4af4af2a 100644 --- a/vendor/github.com/Masterminds/sprig/numeric.go +++ b/vendor/github.com/Masterminds/sprig/numeric.go @@ -1,6 +1,7 @@ package sprig import ( + "fmt" "math" "reflect" "strconv" @@ -157,3 +158,12 @@ func round(a interface{}, p int, r_opt ...float64) float64 { } return round / pow } + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} diff --git a/vendor/github.com/Masterminds/sprig/url.go b/vendor/github.com/Masterminds/sprig/url.go new file mode 100644 index 000000000..5f22d801f --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/url.go @@ -0,0 +1,66 @@ +package sprig + +import ( + "fmt" + "net/url" + "reflect" +) + +func dictGetOrEmpty(dict map[string]interface{}, key string) string { + value, ok := dict[key]; if !ok { + return "" + } + tp := reflect.TypeOf(value).Kind() + if tp != reflect.String { + panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) + } + return reflect.ValueOf(value).String() +} + +// parses given URL to return dict object +func urlParse(v string) map[string]interface{} { + dict := map[string]interface{}{} + parsedUrl, err := url.Parse(v) + if err != nil { + panic(fmt.Sprintf("unable to parse url: %s", err)) + } + dict["scheme"] = parsedUrl.Scheme + dict["host"] = parsedUrl.Host + dict["hostname"] = parsedUrl.Hostname() + dict["path"] = parsedUrl.Path + dict["query"] = parsedUrl.RawQuery + dict["opaque"] = parsedUrl.Opaque + dict["fragment"] = parsedUrl.Fragment + if parsedUrl.User != nil { + dict["userinfo"] = parsedUrl.User.String() + } else { + dict["userinfo"] = "" + } + + return dict +} + +// join given dict to URL string +func urlJoin(d map[string]interface{}) string { + resUrl := url.URL{ + Scheme: dictGetOrEmpty(d, "scheme"), + Host: dictGetOrEmpty(d, "host"), + Path: dictGetOrEmpty(d, "path"), + RawQuery: dictGetOrEmpty(d, "query"), + Opaque: dictGetOrEmpty(d, "opaque"), + Fragment: dictGetOrEmpty(d, "fragment"), + + } + userinfo := dictGetOrEmpty(d, "userinfo") + var user *url.Userinfo = nil + if userinfo != "" { + tempUrl, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + if err != nil { + panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) + } + user = tempUrl.User + } + + resUrl.User = user + return resUrl.String() +} diff --git a/vendor/github.com/appscode/jsonpatch/.gitignore b/vendor/github.com/appscode/jsonpatch/.gitignore deleted file mode 100644 index 0e9448e05..000000000 --- a/vendor/github.com/appscode/jsonpatch/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -.idea/ diff --git a/vendor/github.com/appscode/jsonpatch/.travis.yml b/vendor/github.com/appscode/jsonpatch/.travis.yml deleted file mode 100644 index 92f2439d7..000000000 --- a/vendor/github.com/appscode/jsonpatch/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.x - - tip - -env: - - GO111MODULE=on - -script: - - go test -v diff --git a/vendor/github.com/appscode/jsonpatch/README.md b/vendor/github.com/appscode/jsonpatch/README.md deleted file mode 100644 index bbbf72921..000000000 --- a/vendor/github.com/appscode/jsonpatch/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# jsonpatch - -[![Build Status](https://travis-ci.org/appscode/jsonpatch.svg?branch=master)](https://travis-ci.org/appscode/jsonpatch) -[![Go Report Card](https://goreportcard.com/badge/appscode/jsonpatch "Go Report Card")](https://goreportcard.com/report/appscode/jsonpatch) -[![GoDoc](https://godoc.org/github.com/appscode/jsonpatch?status.svg "GoDoc")](https://godoc.org/github.com/appscode/jsonpatch) - -As per http://jsonpatch.com JSON Patch is specified in RFC 6902 from the IETF. - -JSON Patch allows you to generate JSON that describes changes you want to make to a document, so you don't have to send the whole doc. JSON Patch format is supported by HTTP PATCH method, allowing for standards based partial updates via REST APIs. - -```console -go get github.com/appscode/jsonpatch -``` - -I tried some of the other "jsonpatch" go implementations, but none of them could diff two json documents and -generate format like jsonpatch.com specifies. Here's an example of the patch format: - -```json -[ - { "op": "replace", "path": "/baz", "value": "boo" }, - { "op": "add", "path": "/hello", "value": ["world"] }, - { "op": "remove", "path": "/foo"} -] - -``` -The API is super simple - -## example - -```go -package main - -import ( - "fmt" - "github.com/appscode/jsonpatch" -) - -var simpleA = `{"a":100, "b":200, "c":"hello"}` -var simpleB = `{"a":100, "b":200, "c":"goodbye"}` - -func main() { - patch, e := jsonpatch.CreatePatch([]byte(simpleA), []byte(simpleA)) - if e != nil { - fmt.Printf("Error creating JSON patch:%v", e) - return - } - for _, operation := range patch { - fmt.Printf("%s\n", operation.Json()) - } -} -``` - -This code needs more tests, as it's a highly recursive, type-fiddly monster. It's not a lot of code, but it has to deal with a lot of complexity. diff --git a/vendor/github.com/appscode/jsonpatch/go.mod b/vendor/github.com/appscode/jsonpatch/go.mod deleted file mode 100644 index 458d129ec..000000000 --- a/vendor/github.com/appscode/jsonpatch/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/appscode/jsonpatch - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/evanphx/json-patch v4.0.0+incompatible - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 -) diff --git a/vendor/github.com/appscode/jsonpatch/go.sum b/vendor/github.com/appscode/jsonpatch/go.sum deleted file mode 100644 index 0972e0e1a..000000000 --- a/vendor/github.com/appscode/jsonpatch/go.sum +++ /dev/null @@ -1,8 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/evanphx/json-patch v4.0.0+incompatible h1:xregGRMLBeuRcwiOTHRCsPPuzCQlqhxUPbqdw+zNkLc= -github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/vendor/github.com/cespare/xxhash/v2/.travis.yml b/vendor/github.com/cespare/xxhash/v2/.travis.yml new file mode 100644 index 000000000..c516ea88d --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: + - "1.x" + - master +env: + - TAGS="" + - TAGS="-tags purego" +script: go test $TAGS -v ./... diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt new file mode 100644 index 000000000..24b53065f --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md new file mode 100644 index 000000000..2fd8693c2 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -0,0 +1,67 @@ +# xxhash + +[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) +[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| --- | --- | --- | +| 5 B | 979.66 MB/s | 1291.17 MB/s | +| 100 B | 7475.26 MB/s | 7973.40 MB/s | +| 4 KB | 17573.46 MB/s | 17602.65 MB/s | +| 10 MB | 17131.46 MB/s | 17142.16 MB/s | + +These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using +the following commands under Go 1.11.2: + +``` +$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' +$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/cespare/xxhash/v2/go.mod b/vendor/github.com/cespare/xxhash/v2/go.mod new file mode 100644 index 000000000..49f67608b --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/go.mod @@ -0,0 +1,3 @@ +module github.com/cespare/xxhash/v2 + +go 1.11 diff --git a/vendor/github.com/cespare/xxhash/v2/go.sum b/vendor/github.com/cespare/xxhash/v2/go.sum new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go new file mode 100644 index 000000000..db0b35fbe --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -0,0 +1,236 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = prime1v + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -prime1v + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(d.mem[d.n:], b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + copy(d.mem[d.n:], b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[32-d.n:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + i, end := 0, d.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(d.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(d.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(d.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + b = b[len(d.mem):] + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go new file mode 100644 index 000000000..ad14b807f --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go @@ -0,0 +1,13 @@ +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s new file mode 100644 index 000000000..d580e32ae --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -0,0 +1,215 @@ +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Register allocation: +// AX h +// CX pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// R15 prime4v + +// round reads from and advances the buffer pointer in CX. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (CX), R12 \ + ADDQ $8, CX \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ R15, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), R15 + + // Load slice. + MOVQ b_base+0(FP), CX + MOVQ b_len+8(FP), DX + LEAQ (CX)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until CX > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + ADDQ $24, BX + + CMPQ CX, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (CX), R8 + ADDQ $8, CX + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ R15, AX + + CMPQ CX, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ CX, BX + JG singles + + MOVL (CX), R8 + ADDQ $4, CX + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ CX, BX + JGE finalize + +singlesLoop: + MOVBQZX (CX), R12 + ADDQ $1, CX + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ CX, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the d pointer. + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT, $0-40 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ b_base+8(FP), CX + MOVQ b_len+16(FP), DX + LEAQ (CX)(DX*1), BX + SUBQ $32, BX + + // Load vN from d. + MOVQ d+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + // Copy vN back to d. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // The number of bytes written is CX minus the old base pointer. + SUBQ b_base+8(FP), CX + MOVQ CX, ret+32(FP) + + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go new file mode 100644 index 000000000..4a5a82160 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -0,0 +1,76 @@ +// +build !amd64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go new file mode 100644 index 000000000..fc9bea7a3 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -0,0 +1,15 @@ +// +build appengine + +// This file contains the safe implementations of otherwise unsafe-using code. + +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go new file mode 100644 index 000000000..53bf76efb --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -0,0 +1,46 @@ +// +build !appengine + +// This file encapsulates usage of unsafe. +// xxhash_safe.go contains the safe implementations. + +package xxhash + +import ( + "reflect" + "unsafe" +) + +// Notes: +// +// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ +// for some discussion about these unsafe conversions. +// +// In the future it's possible that compiler optimizations will make these +// unsafe operations unnecessary: https://golang.org/issue/2205. +// +// Both of these wrapper functions still incur function call overhead since they +// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write +// for strings to squeeze out a bit more speed. Mid-stack inlining should +// eventually fix this. + +// Sum64String computes the 64-bit xxHash digest of s. +// It may be faster than Sum64([]byte(s)) by avoiding a copy. +func Sum64String(s string) uint64 { + var b []byte + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data + bh.Len = len(s) + bh.Cap = len(s) + return Sum64(b) +} + +// WriteString adds more data to d. It always returns len(s), nil. +// It may be faster than Write([]byte(s)) by avoiding a copy. +func (d *Digest) WriteString(s string) (n int, err error) { + var b []byte + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data + bh.Len = len(s) + bh.Cap = len(s) + return d.Write(b) +} diff --git a/vendor/github.com/gardener/gardener/LICENSE.md b/vendor/github.com/gardener/gardener/LICENSE.md index d6df5c437..1a9f9ce9f 100644 --- a/vendor/github.com/gardener/gardener/LICENSE.md +++ b/vendor/github.com/gardener/gardener/LICENSE.md @@ -252,11 +252,6 @@ https://github.com/kubernetes-sigs/controller-runtime Copyright 2018 The Kubernetes Authors. Apache 2 license (https://github.com/kubernetes-sigs/controller-runtime/blob/master/LICENSE). -AWS SDK Go. -https://github.com/aws/aws-sdk-go. -Copyright 2017 Amazon.com, Inc. or its affiliates. -Apache 2 license (https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) - Cobra https://github.com/spf13/cobra. Copyright 2015 Steve Francia. @@ -302,10 +297,6 @@ https://github.com/jmespath/go-jmespath Copyright 2015 James Saryerwinnie Apache License, Version 2 (https://github.com/jmespath/go-jmespath/blob/master/LICENSE) -alibaba-cloud-sdk-go -https://github.com/aliyun/alibaba-cloud-sdk-go -Apache License 2.0 (https://github.com/aliyun/alibaba-cloud-sdk-go/blob/master/LICENSE) - ------ ## MIT License diff --git a/vendor/github.com/gardener/gardener/NOTICE.md b/vendor/github.com/gardener/gardener/NOTICE.md index 85252d71c..e1fa594eb 100644 --- a/vendor/github.com/gardener/gardener/NOTICE.md +++ b/vendor/github.com/gardener/gardener/NOTICE.md @@ -21,9 +21,7 @@ Apache 2 license (https://git.k8s.io/kubernetes/LICENSE). The following charts of the [`charts`](charts) directory were seeded based on a copy of the following origins: -* [git.k8s.io/charts/stable/kube-lego](https://git.k8s.io/charts/stable/kube-lego) * [git.k8s.io/charts/stable/kube-state-metrics](https://git.k8s.io/charts/stable/kube-state-metrics) -* [git.k8s.io/charts/stable/kube2iam](https://git.k8s.io/charts/stable/kube2iam) * [git.k8s.io/charts/stable/nginx-ingress](https://git.k8s.io/charts/stable/nginx-ingress) * [git.k8s.io/charts/stable/prometheus](https://git.k8s.io/charts/stable/prometheus) * [git.k8s.io/charts/stable/kubernetes-dashboard](https://git.k8s.io/charts/stable/kubernetes-dashboard) diff --git a/vendor/github.com/gardener/gardener/pkg/utils/miscellaneous.go b/vendor/github.com/gardener/gardener/pkg/utils/miscellaneous.go index 02581c3e6..8edaf5e70 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/miscellaneous.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/miscellaneous.go @@ -127,3 +127,10 @@ func TestEmail(email string) bool { match, _ := regexp.MatchString(`^[^@]+@(?:[a-zA-Z-0-9]+\.)+[a-zA-Z]{2,}$`, email) return match } + +// NetworksIntersect returns true if the given network CIDRs intersect. +func NetworksIntersect(cidr1, cidr2 string) bool { + _, net1, err1 := net.ParseCIDR(cidr1) + _, net2, err2 := net.ParseCIDR(cidr2) + return err1 != nil || err2 != nil || net2.Contains(net1.IP) || net1.Contains(net2.IP) +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go index 3abfed2cf..9581ccd30 100644 --- a/vendor/github.com/gogo/protobuf/proto/encode.go +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -189,6 +189,8 @@ type Marshaler interface { // prefixed by a varint-encoded length. func (p *Buffer) EncodeMessage(pb Message) error { siz := Size(pb) + sizVar := SizeVarint(uint64(siz)) + p.grow(siz + sizVar) p.EncodeVarint(uint64(siz)) return p.Marshal(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go index 686bd2a09..341c6f57f 100644 --- a/vendor/github.com/gogo/protobuf/proto/extensions.go +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -527,6 +527,7 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { // SetExtension sets the specified extension of pb to the specified value. func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { if epb, ok := pb.(extensionsBytes); ok { + ClearExtension(pb, extension) newb, err := encodeExtension(extension, value) if err != nil { return err diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go index 53ebd8cca..6f1ae120e 100644 --- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -154,6 +154,10 @@ func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) return EncodeExtensionMap(m.extensionsWrite(), data) } +func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMapBackwards(m.extensionsWrite(), data) +} + func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { o := 0 for _, e := range m { @@ -169,6 +173,23 @@ func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { return o, nil } +func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + end := len(data) + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[end-len(e.enc):], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + end -= n + o += n + } + return o, nil +} + func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { e := m[id] if err := e.Encode(); err != nil { diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go index d17f80209..80db1c155 100644 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -948,13 +948,19 @@ func isProto3Zero(v reflect.Value) bool { return false } -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const GoGoProtoPackageIsVersion2 = true - -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const GoGoProtoPackageIsVersion1 = true +const ( + // ProtoPackageIsVersion3 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + GoGoProtoPackageIsVersion3 = true + + // ProtoPackageIsVersion2 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + GoGoProtoPackageIsVersion2 = true + + // ProtoPackageIsVersion1 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + GoGoProtoPackageIsVersion1 = true +) // InternalMessageInfo is a type used internally by generated .pb.go files. // This type is not intended to be used by non-generated code. diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go index c9e5fa020..28da1475f 100644 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -43,7 +43,6 @@ package proto import ( "fmt" "log" - "os" "reflect" "sort" "strconv" @@ -205,7 +204,7 @@ func (p *Properties) Parse(s string) { // "bytes,49,opt,name=foo,def=hello!" fields := strings.Split(s, ",") // breaks def=, but handled below. if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + log.Printf("proto: tag has too few fields: %q", s) return } @@ -225,7 +224,7 @@ func (p *Properties) Parse(s string) { p.WireType = WireBytes // no numeric converter for non-numeric types default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + log.Printf("proto: tag has unknown wire type: %q", s) return } @@ -400,6 +399,15 @@ func GetProperties(t reflect.Type) *StructProperties { return sprop } +type ( + oneofFuncsIface interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + oneofWrappersIface interface { + XXX_OneofWrappers() []interface{} + } +) + // getPropertiesLocked requires that propertiesMu is held. func getPropertiesLocked(t reflect.Type) *StructProperties { if prop, ok := propertiesMap[t]; ok { @@ -441,37 +449,40 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { // Re-order prop.order. sort.Sort(prop) - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok { + if isOneofMessage { var oots []interface{} - _, _, _, oots = om.XXX_OneofFuncs() - - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oots = m.XXX_OneofFuncs() + case oneofWrappersIface: + oots = m.XXX_OneofWrappers() + } + if len(oots) > 0 { + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), } - if !oop.Type.AssignableTo(f.Type) { - continue + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break } - oop.Field = i - break + prop.OneofTypes[oop.Prop.OrigName] = oop } - prop.OneofTypes[oop.Prop.OrigName] = oop } } diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go index 9b1538d05..f8babdefa 100644 --- a/vendor/github.com/gogo/protobuf/proto/table_marshal.go +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go @@ -389,8 +389,13 @@ func (u *marshalInfo) computeMarshalInfo() { // get oneof implementers var oneofImplementers []interface{} // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler - if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok && isOneofMessage { - _, _, _, oneofImplementers = m.XXX_OneofFuncs() + if isOneofMessage { + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } } // normal fields @@ -519,10 +524,6 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI } } -type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) -} - // wiretype returns the wire encoding of the type. func wiretype(encoding string) uint64 { switch encoding { @@ -2968,7 +2969,9 @@ func (p *Buffer) Marshal(pb Message) error { if m, ok := pb.(newMarshaler); ok { siz := m.XXX_Size() p.grow(siz) // make sure buf has enough capacity - p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + pp := p.buf[len(p.buf) : len(p.buf) : len(p.buf)+siz] + pp, err = m.XXX_Marshal(pp, p.deterministic) + p.buf = append(p.buf, pp...) return err } if m, ok := pb.(Marshaler); ok { diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go index f520106e0..60dcf70d1 100644 --- a/vendor/github.com/gogo/protobuf/proto/table_merge.go +++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go @@ -530,6 +530,25 @@ func (mi *mergeInfo) computeMergeInfo() { } case reflect.Struct: switch { + case isSlice && !isPointer: // E.g. []pb.T + mergeInfo := getMergeInfo(tf) + zero := reflect.Zero(tf) + mfi.merge = func(dst, src pointer) { + // TODO: Make this faster? + dstsp := dst.asPointerTo(f.Type) + dsts := dstsp.Elem() + srcs := src.asPointerTo(f.Type).Elem() + for i := 0; i < srcs.Len(); i++ { + dsts = reflect.Append(dsts, zero) + srcElement := srcs.Index(i).Addr() + dstElement := dsts.Index(dsts.Len() - 1).Addr() + mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement)) + } + if dsts.IsNil() { + dsts = reflect.MakeSlice(f.Type, 0, 0) + } + dstsp.Elem().Set(dsts) + } case !isPointer: mergeInfo := getMergeInfo(tf) mfi.merge = func(dst, src pointer) { diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go index bb2622f28..937229386 100644 --- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go @@ -371,15 +371,18 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { } // Find any types associated with oneof fields. - // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler - if fn.IsValid() && len(oneofFields) > 0 { - res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} - for i := res.Len() - 1; i >= 0; i-- { - v := res.Index(i) // interface{} - tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X - typ := tptr.Elem() // Msg_X + if len(oneofFields) > 0 { + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + for _, v := range oneofImplementers { + tptr := reflect.TypeOf(v) // *Msg_X + typ := tptr.Elem() // Msg_X f := typ.Field(0) // oneof implementers have one field baseUnmarshal := fieldUnmarshaler(&f) @@ -407,11 +410,12 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { u.setTag(fieldNum, of.field, unmarshal, 0, name) } } + } } // Get extension ranges, if any. - fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") if fn.IsValid() { if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { panic("a message with extensions, but no extensions field in " + t.Name()) diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go index 0407ba85d..87416afe9 100644 --- a/vendor/github.com/gogo/protobuf/proto/text.go +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -476,6 +476,8 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return nil } +var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + // writeAny writes an arbitrary field. func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) @@ -589,8 +591,8 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert // mutating this value. v = v.Addr() } - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() + if v.Type().Implements(textMarshalerType) { + text, err := v.Interface().(encoding.TextMarshaler).MarshalText() if err != nil { return err } diff --git a/vendor/github.com/pborman/uuid/LICENSE b/vendor/github.com/google/go-cmp/LICENSE similarity index 96% rename from vendor/github.com/pborman/uuid/LICENSE rename to vendor/github.com/google/go-cmp/LICENSE index 5dc68268d..32017f8fa 100644 --- a/vendor/github.com/pborman/uuid/LICENSE +++ b/vendor/github.com/google/go-cmp/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009,2014 Google Inc. All rights reserved. +Copyright (c) 2017 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go new file mode 100644 index 000000000..2133562b0 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -0,0 +1,616 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package cmp determines equality of values. +// +// This package is intended to be a more powerful and safer alternative to +// reflect.DeepEqual for comparing whether two values are semantically equal. +// +// The primary features of cmp are: +// +// • When the default behavior of equality does not suit the needs of the test, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as they +// are within some tolerance of each other. +// +// • Types that have an Equal method may use that method to determine equality. +// This allows package authors to determine the equality operation for the types +// that they define. +// +// • If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on both +// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported +// fields are not compared by default; they result in panics unless suppressed +// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared +// using the AllowUnexported option. +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/diff" + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/function" + "github.com/google/go-cmp/cmp/internal/value" +) + +// Equal reports whether x and y are equal by recursively applying the +// following rules in the given order to x and y and all of their sub-values: +// +// • Let S be the set of all Ignore, Transformer, and Comparer options that +// remain after applying all path filters, value filters, and type filters. +// If at least one Ignore exists in S, then the comparison is ignored. +// If the number of Transformer and Comparer options in S is greater than one, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single Transformer, then use that to transform the current +// values and recursively call Equal on the output values. +// If S contains a single Comparer, then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. +// +// • If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. +// +// • Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, and +// channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. +// +// Structs are equal if recursively calling Equal on all fields report equal. +// If a struct contains unexported fields, Equal panics unless an Ignore option +// (e.g., cmpopts.IgnoreUnexported) ignores that field or the AllowUnexported +// option explicitly permits comparing the unexported field. +// +// Slices are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored slice or array elements report equal. +// Empty non-nil slices and nil slices are not equal; to equate empty slices, +// consider using cmpopts.EquateEmpty. +// +// Maps are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored map entries report equal. +// Map keys are equal according to the == operator. +// To use custom comparisons for map keys, consider using cmpopts.SortMaps. +// Empty non-nil maps and nil maps are not equal; to equate empty maps, +// consider using cmpopts.EquateEmpty. +// +// Pointers and interfaces are equal if they are both nil or both non-nil, +// where they have the same underlying concrete type and recursively +// calling Equal on the underlying values reports equal. +func Equal(x, y interface{}, opts ...Option) bool { + vx := reflect.ValueOf(x) + vy := reflect.ValueOf(y) + + // If the inputs are different types, auto-wrap them in an empty interface + // so that they have the same parent type. + var t reflect.Type + if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { + t = reflect.TypeOf((*interface{})(nil)).Elem() + if vx.IsValid() { + vvx := reflect.New(t).Elem() + vvx.Set(vx) + vx = vvx + } + if vy.IsValid() { + vvy := reflect.New(t).Elem() + vvy.Set(vy) + vy = vvy + } + } else { + t = vx.Type() + } + + s := newState(opts) + s.compareAny(&pathStep{t, vx, vy}) + return s.result.Equal() +} + +// Diff returns a human-readable report of the differences between two values. +// It returns an empty string if and only if Equal returns true for the same +// input values and options. +// +// The output is displayed as a literal in pseudo-Go syntax. +// At the start of each line, a "-" prefix indicates an element removed from x, +// a "+" prefix to indicates an element added to y, and the lack of a prefix +// indicates an element common to both x and y. If possible, the output +// uses fmt.Stringer.String or error.Error methods to produce more humanly +// readable outputs. In such cases, the string is prefixed with either an +// 's' or 'e' character, respectively, to indicate that the method was called. +// +// Do not depend on this output being stable. If you need the ability to +// programmatically interpret the difference, consider using a custom Reporter. +func Diff(x, y interface{}, opts ...Option) string { + r := new(defaultReporter) + eq := Equal(x, y, Options(opts), Reporter(r)) + d := r.String() + if (d == "") != eq { + panic("inconsistent difference and equality results") + } + return d +} + +type state struct { + // These fields represent the "comparison state". + // Calling statelessCompare must not result in observable changes to these. + result diff.Result // The current result of comparison + curPath Path // The current path in the value tree + reporters []reporter // Optional reporters + + // recChecker checks for infinite cycles applying the same set of + // transformers upon the output of itself. + recChecker recChecker + + // dynChecker triggers pseudo-random checks for option correctness. + // It is safe for statelessCompare to mutate this value. + dynChecker dynChecker + + // These fields, once set by processOption, will not change. + exporters map[reflect.Type]bool // Set of structs with unexported field visibility + opts Options // List of all fundamental and filter options +} + +func newState(opts []Option) *state { + // Always ensure a validator option exists to validate the inputs. + s := &state{opts: Options{validator{}}} + s.processOption(Options(opts)) + return s +} + +func (s *state) processOption(opt Option) { + switch opt := opt.(type) { + case nil: + case Options: + for _, o := range opt { + s.processOption(o) + } + case coreOption: + type filtered interface { + isFiltered() bool + } + if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() { + panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) + } + s.opts = append(s.opts, opt) + case visibleStructs: + if s.exporters == nil { + s.exporters = make(map[reflect.Type]bool) + } + for t := range opt { + s.exporters[t] = true + } + case reporter: + s.reporters = append(s.reporters, opt) + default: + panic(fmt.Sprintf("unknown option %T", opt)) + } +} + +// statelessCompare compares two values and returns the result. +// This function is stateless in that it does not alter the current result, +// or output to any registered reporters. +func (s *state) statelessCompare(step PathStep) diff.Result { + // We do not save and restore the curPath because all of the compareX + // methods should properly push and pop from the path. + // It is an implementation bug if the contents of curPath differs from + // when calling this function to when returning from it. + + oldResult, oldReporters := s.result, s.reporters + s.result = diff.Result{} // Reset result + s.reporters = nil // Remove reporters to avoid spurious printouts + s.compareAny(step) + res := s.result + s.result, s.reporters = oldResult, oldReporters + return res +} + +func (s *state) compareAny(step PathStep) { + // Update the path stack. + s.curPath.push(step) + defer s.curPath.pop() + for _, r := range s.reporters { + r.PushStep(step) + defer r.PopStep() + } + s.recChecker.Check(s.curPath) + + // Obtain the current type and values. + t := step.Type() + vx, vy := step.Values() + + // Rule 1: Check whether an option applies on this node in the value tree. + if s.tryOptions(t, vx, vy) { + return + } + + // Rule 2: Check whether the type has a valid Equal method. + if s.tryMethod(t, vx, vy) { + return + } + + // Rule 3: Compare based on the underlying kind. + switch t.Kind() { + case reflect.Bool: + s.report(vx.Bool() == vy.Bool(), 0) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s.report(vx.Int() == vy.Int(), 0) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s.report(vx.Uint() == vy.Uint(), 0) + case reflect.Float32, reflect.Float64: + s.report(vx.Float() == vy.Float(), 0) + case reflect.Complex64, reflect.Complex128: + s.report(vx.Complex() == vy.Complex(), 0) + case reflect.String: + s.report(vx.String() == vy.String(), 0) + case reflect.Chan, reflect.UnsafePointer: + s.report(vx.Pointer() == vy.Pointer(), 0) + case reflect.Func: + s.report(vx.IsNil() && vy.IsNil(), 0) + case reflect.Struct: + s.compareStruct(t, vx, vy) + case reflect.Slice, reflect.Array: + s.compareSlice(t, vx, vy) + case reflect.Map: + s.compareMap(t, vx, vy) + case reflect.Ptr: + s.comparePtr(t, vx, vy) + case reflect.Interface: + s.compareInterface(t, vx, vy) + default: + panic(fmt.Sprintf("%v kind not handled", t.Kind())) + } +} + +func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool { + // Evaluate all filters and apply the remaining options. + if opt := s.opts.filter(s, t, vx, vy); opt != nil { + opt.apply(s, vx, vy) + return true + } + return false +} + +func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { + // Check if this type even has an Equal method. + m, ok := t.MethodByName("Equal") + if !ok || !function.IsType(m.Type, function.EqualAssignable) { + return false + } + + eq := s.callTTBFunc(m.Func, vx, vy) + s.report(eq, reportByMethod) + return true +} + +func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { + v = sanitizeValue(v, f.Type().In(0)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{v})[0] + } + + // Run the function twice and ensure that we get the same results back. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, v) + got := <-c + want := f.Call([]reflect.Value{v})[0] + if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() { + // To avoid false-positives with non-reflexive equality operations, + // we sanity check whether a value is equal to itself. + if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() { + return want + } + panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f))) + } + return want +} + +func (s *state) callTTBFunc(f, x, y reflect.Value) bool { + x = sanitizeValue(x, f.Type().In(0)) + y = sanitizeValue(y, f.Type().In(1)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{x, y})[0].Bool() + } + + // Swapping the input arguments is sufficient to check that + // f is symmetric and deterministic. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, y, x) + got := <-c + want := f.Call([]reflect.Value{x, y})[0].Bool() + if !got.IsValid() || got.Bool() != want { + panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f))) + } + return want +} + +func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { + var ret reflect.Value + defer func() { + recover() // Ignore panics, let the other call to f panic instead + c <- ret + }() + ret = f.Call(vs)[0] +} + +// sanitizeValue converts nil interfaces of type T to those of type R, +// assuming that T is assignable to R. +// Otherwise, it returns the input value as is. +func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { + // TODO(dsnet): Workaround for reflect bug (https://golang.org/issue/22143). + if !flags.AtLeastGo110 { + if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { + return reflect.New(t).Elem() + } + } + return v +} + +func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { + var vax, vay reflect.Value // Addressable versions of vx and vy + + step := StructField{&structField{}} + for i := 0; i < t.NumField(); i++ { + step.typ = t.Field(i).Type + step.vx = vx.Field(i) + step.vy = vy.Field(i) + step.name = t.Field(i).Name + step.idx = i + step.unexported = !isExported(step.name) + if step.unexported { + if step.name == "_" { + continue + } + // Defer checking of unexported fields until later to give an + // Ignore a chance to ignore the field. + if !vax.IsValid() || !vay.IsValid() { + // For retrieveUnexportedField to work, the parent struct must + // be addressable. Create a new copy of the values if + // necessary to make them addressable. + vax = makeAddressable(vx) + vay = makeAddressable(vy) + } + step.mayForce = s.exporters[t] + step.pvx = vax + step.pvy = vay + step.field = t.Field(i) + } + s.compareAny(step) + } +} + +func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { + isSlice := t.Kind() == reflect.Slice + if isSlice && (vx.IsNil() || vy.IsNil()) { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // TODO: Support cyclic data structures. + + step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}}} + withIndexes := func(ix, iy int) SliceIndex { + if ix >= 0 { + step.vx, step.xkey = vx.Index(ix), ix + } else { + step.vx, step.xkey = reflect.Value{}, -1 + } + if iy >= 0 { + step.vy, step.ykey = vy.Index(iy), iy + } else { + step.vy, step.ykey = reflect.Value{}, -1 + } + return step + } + + // Ignore options are able to ignore missing elements in a slice. + // However, detecting these reliably requires an optimal differencing + // algorithm, for which diff.Difference is not. + // + // Instead, we first iterate through both slices to detect which elements + // would be ignored if standing alone. The index of non-discarded elements + // are stored in a separate slice, which diffing is then performed on. + var indexesX, indexesY []int + var ignoredX, ignoredY []bool + for ix := 0; ix < vx.Len(); ix++ { + ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0 + if !ignored { + indexesX = append(indexesX, ix) + } + ignoredX = append(ignoredX, ignored) + } + for iy := 0; iy < vy.Len(); iy++ { + ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0 + if !ignored { + indexesY = append(indexesY, iy) + } + ignoredY = append(ignoredY, ignored) + } + + // Compute an edit-script for slices vx and vy (excluding ignored elements). + edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result { + return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy])) + }) + + // Replay the ignore-scripts and the edit-script. + var ix, iy int + for ix < vx.Len() || iy < vy.Len() { + var e diff.EditType + switch { + case ix < len(ignoredX) && ignoredX[ix]: + e = diff.UniqueX + case iy < len(ignoredY) && ignoredY[iy]: + e = diff.UniqueY + default: + e, edits = edits[0], edits[1:] + } + switch e { + case diff.UniqueX: + s.compareAny(withIndexes(ix, -1)) + ix++ + case diff.UniqueY: + s.compareAny(withIndexes(-1, iy)) + iy++ + default: + s.compareAny(withIndexes(ix, iy)) + ix++ + iy++ + } + } +} + +func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // TODO: Support cyclic data structures. + + // We combine and sort the two map keys so that we can perform the + // comparisons in a deterministic order. + step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}} + for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { + step.vx = vx.MapIndex(k) + step.vy = vy.MapIndex(k) + step.key = k + if !step.vx.IsValid() && !step.vy.IsValid() { + // It is possible for both vx and vy to be invalid if the + // key contained a NaN value in it. + // + // Even with the ability to retrieve NaN keys in Go 1.12, + // there still isn't a sensible way to compare the values since + // a NaN key may map to multiple unordered values. + // The most reasonable way to compare NaNs would be to compare the + // set of values. However, this is impossible to do efficiently + // since set equality is provably an O(n^2) operation given only + // an Equal function. If we had a Less function or Hash function, + // this could be done in O(n*log(n)) or O(n), respectively. + // + // Rather than adding complex logic to deal with NaNs, make it + // the user's responsibility to compare such obscure maps. + const help = "consider providing a Comparer to compare the map" + panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help)) + } + s.compareAny(step) + } +} + +func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // TODO: Support cyclic data structures. + + vx, vy = vx.Elem(), vy.Elem() + s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) +} + +func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + vx, vy = vx.Elem(), vy.Elem() + if vx.Type() != vy.Type() { + s.report(false, 0) + return + } + s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}}) +} + +func (s *state) report(eq bool, rf resultFlags) { + if rf&reportByIgnore == 0 { + if eq { + s.result.NumSame++ + rf |= reportEqual + } else { + s.result.NumDiff++ + rf |= reportUnequal + } + } + for _, r := range s.reporters { + r.Report(Result{flags: rf}) + } +} + +// recChecker tracks the state needed to periodically perform checks that +// user provided transformers are not stuck in an infinitely recursive cycle. +type recChecker struct{ next int } + +// Check scans the Path for any recursive transformers and panics when any +// recursive transformers are detected. Note that the presence of a +// recursive Transformer does not necessarily imply an infinite cycle. +// As such, this check only activates after some minimal number of path steps. +func (rc *recChecker) Check(p Path) { + const minLen = 1 << 16 + if rc.next == 0 { + rc.next = minLen + } + if len(p) < rc.next { + return + } + rc.next <<= 1 + + // Check whether the same transformer has appeared at least twice. + var ss []string + m := map[Option]int{} + for _, ps := range p { + if t, ok := ps.(Transform); ok { + t := t.Option() + if m[t] == 1 { // Transformer was used exactly once before + tf := t.(*transformer).fnc.Type() + ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0))) + } + m[t]++ + } + } + if len(ss) > 0 { + const warning = "recursive set of Transformers detected" + const help = "consider using cmpopts.AcyclicTransformer" + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help)) + } +} + +// dynChecker tracks the state needed to periodically perform checks that +// user provided functions are symmetric and deterministic. +// The zero value is safe for immediate use. +type dynChecker struct{ curr, next int } + +// Next increments the state and reports whether a check should be performed. +// +// Checks occur every Nth function call, where N is a triangular number: +// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// See https://en.wikipedia.org/wiki/Triangular_number +// +// This sequence ensures that the cost of checks drops significantly as +// the number of functions calls grows larger. +func (dc *dynChecker) Next() bool { + ok := dc.curr == dc.next + if ok { + dc.curr = 0 + dc.next++ + } + dc.curr++ + return ok +} + +// makeAddressable returns a value that is always addressable. +// It returns the input verbatim if it is already addressable, +// otherwise it creates a new value and returns an addressable copy. +func makeAddressable(v reflect.Value) reflect.Value { + if v.CanAddr() { + return v + } + vc := reflect.New(v.Type()).Elem() + vc.Set(v) + return vc +} diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go new file mode 100644 index 000000000..abc3a1c3e --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -0,0 +1,15 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build purego + +package cmp + +import "reflect" + +const supportAllowUnexported = false + +func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value { + panic("retrieveUnexportedField is not implemented") +} diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go new file mode 100644 index 000000000..59d4ee91b --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -0,0 +1,23 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !purego + +package cmp + +import ( + "reflect" + "unsafe" +) + +const supportAllowUnexported = true + +// retrieveUnexportedField uses unsafe to forcibly retrieve any field from +// a struct such that the value has read-write permissions. +// +// The parent struct, v, must be addressable, while f must be a StructField +// describing the field to retrieve. +func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value { + return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem() +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go new file mode 100644 index 000000000..fe98dcc67 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -0,0 +1,17 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !cmp_debug + +package diff + +var debug debugger + +type debugger struct{} + +func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc { + return f +} +func (debugger) Update() {} +func (debugger) Finish() {} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go new file mode 100644 index 000000000..597b6ae56 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -0,0 +1,122 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build cmp_debug + +package diff + +import ( + "fmt" + "strings" + "sync" + "time" +) + +// The algorithm can be seen running in real-time by enabling debugging: +// go test -tags=cmp_debug -v +// +// Example output: +// === RUN TestDifference/#34 +// ┌───────────────────────────────┐ +// │ \ · · · · · · · · · · · · · · │ +// │ · # · · · · · · · · · · · · · │ +// │ · \ · · · · · · · · · · · · · │ +// │ · · \ · · · · · · · · · · · · │ +// │ · · · X # · · · · · · · · · · │ +// │ · · · # \ · · · · · · · · · · │ +// │ · · · · · # # · · · · · · · · │ +// │ · · · · · # \ · · · · · · · · │ +// │ · · · · · · · \ · · · · · · · │ +// │ · · · · · · · · \ · · · · · · │ +// │ · · · · · · · · · \ · · · · · │ +// │ · · · · · · · · · · \ · · # · │ +// │ · · · · · · · · · · · \ # # · │ +// │ · · · · · · · · · · · # # # · │ +// │ · · · · · · · · · · # # # # · │ +// │ · · · · · · · · · # # # # # · │ +// │ · · · · · · · · · · · · · · \ │ +// └───────────────────────────────┘ +// [.Y..M.XY......YXYXY.|] +// +// The grid represents the edit-graph where the horizontal axis represents +// list X and the vertical axis represents list Y. The start of the two lists +// is the top-left, while the ends are the bottom-right. The '·' represents +// an unexplored node in the graph. The '\' indicates that the two symbols +// from list X and Y are equal. The 'X' indicates that two symbols are similar +// (but not exactly equal) to each other. The '#' indicates that the two symbols +// are different (and not similar). The algorithm traverses this graph trying to +// make the paths starting in the top-left and the bottom-right connect. +// +// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents +// the currently established path from the forward and reverse searches, +// separated by a '|' character. + +const ( + updateDelay = 100 * time.Millisecond + finishDelay = 500 * time.Millisecond + ansiTerminal = true // ANSI escape codes used to move terminal cursor +) + +var debug debugger + +type debugger struct { + sync.Mutex + p1, p2 EditScript + fwdPath, revPath *EditScript + grid []byte + lines int +} + +func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc { + dbg.Lock() + dbg.fwdPath, dbg.revPath = p1, p2 + top := "┌─" + strings.Repeat("──", nx) + "┐\n" + row := "│ " + strings.Repeat("· ", nx) + "│\n" + btm := "└─" + strings.Repeat("──", nx) + "┘\n" + dbg.grid = []byte(top + strings.Repeat(row, ny) + btm) + dbg.lines = strings.Count(dbg.String(), "\n") + fmt.Print(dbg) + + // Wrap the EqualFunc so that we can intercept each result. + return func(ix, iy int) (r Result) { + cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")] + for i := range cell { + cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot + } + switch r = f(ix, iy); { + case r.Equal(): + cell[0] = '\\' + case r.Similar(): + cell[0] = 'X' + default: + cell[0] = '#' + } + return + } +} + +func (dbg *debugger) Update() { + dbg.print(updateDelay) +} + +func (dbg *debugger) Finish() { + dbg.print(finishDelay) + dbg.Unlock() +} + +func (dbg *debugger) String() string { + dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0] + for i := len(*dbg.revPath) - 1; i >= 0; i-- { + dbg.p2 = append(dbg.p2, (*dbg.revPath)[i]) + } + return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2) +} + +func (dbg *debugger) print(d time.Duration) { + if ansiTerminal { + fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor + } + fmt.Print(dbg) + time.Sleep(d) +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go new file mode 100644 index 000000000..3d2e42662 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -0,0 +1,372 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package diff implements an algorithm for producing edit-scripts. +// The edit-script is a sequence of operations needed to transform one list +// of symbols into another (or vice-versa). The edits allowed are insertions, +// deletions, and modifications. The summation of all edits is called the +// Levenshtein distance as this problem is well-known in computer science. +// +// This package prioritizes performance over accuracy. That is, the run time +// is more important than obtaining a minimal Levenshtein distance. +package diff + +// EditType represents a single operation within an edit-script. +type EditType uint8 + +const ( + // Identity indicates that a symbol pair is identical in both list X and Y. + Identity EditType = iota + // UniqueX indicates that a symbol only exists in X and not Y. + UniqueX + // UniqueY indicates that a symbol only exists in Y and not X. + UniqueY + // Modified indicates that a symbol pair is a modification of each other. + Modified +) + +// EditScript represents the series of differences between two lists. +type EditScript []EditType + +// String returns a human-readable string representing the edit-script where +// Identity, UniqueX, UniqueY, and Modified are represented by the +// '.', 'X', 'Y', and 'M' characters, respectively. +func (es EditScript) String() string { + b := make([]byte, len(es)) + for i, e := range es { + switch e { + case Identity: + b[i] = '.' + case UniqueX: + b[i] = 'X' + case UniqueY: + b[i] = 'Y' + case Modified: + b[i] = 'M' + default: + panic("invalid edit-type") + } + } + return string(b) +} + +// stats returns a histogram of the number of each type of edit operation. +func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) { + for _, e := range es { + switch e { + case Identity: + s.NI++ + case UniqueX: + s.NX++ + case UniqueY: + s.NY++ + case Modified: + s.NM++ + default: + panic("invalid edit-type") + } + } + return +} + +// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if +// lists X and Y are equal. +func (es EditScript) Dist() int { return len(es) - es.stats().NI } + +// LenX is the length of the X list. +func (es EditScript) LenX() int { return len(es) - es.stats().NY } + +// LenY is the length of the Y list. +func (es EditScript) LenY() int { return len(es) - es.stats().NX } + +// EqualFunc reports whether the symbols at indexes ix and iy are equal. +// When called by Difference, the index is guaranteed to be within nx and ny. +type EqualFunc func(ix int, iy int) Result + +// Result is the result of comparison. +// NumSame is the number of sub-elements that are equal. +// NumDiff is the number of sub-elements that are not equal. +type Result struct{ NumSame, NumDiff int } + +// BoolResult returns a Result that is either Equal or not Equal. +func BoolResult(b bool) Result { + if b { + return Result{NumSame: 1} // Equal, Similar + } else { + return Result{NumDiff: 2} // Not Equal, not Similar + } +} + +// Equal indicates whether the symbols are equal. Two symbols are equal +// if and only if NumDiff == 0. If Equal, then they are also Similar. +func (r Result) Equal() bool { return r.NumDiff == 0 } + +// Similar indicates whether two symbols are similar and may be represented +// by using the Modified type. As a special case, we consider binary comparisons +// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. +// +// The exact ratio of NumSame to NumDiff to determine similarity may change. +func (r Result) Similar() bool { + // Use NumSame+1 to offset NumSame so that binary comparisons are similar. + return r.NumSame+1 >= r.NumDiff +} + +// Difference reports whether two lists of lengths nx and ny are equal +// given the definition of equality provided as f. +// +// This function returns an edit-script, which is a sequence of operations +// needed to convert one list into the other. The following invariants for +// the edit-script are maintained: +// • eq == (es.Dist()==0) +// • nx == es.LenX() +// • ny == es.LenY() +// +// This algorithm is not guaranteed to be an optimal solution (i.e., one that +// produces an edit-script with a minimal Levenshtein distance). This algorithm +// favors performance over optimality. The exact output is not guaranteed to +// be stable and may change over time. +func Difference(nx, ny int, f EqualFunc) (es EditScript) { + // This algorithm is based on traversing what is known as an "edit-graph". + // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations" + // by Eugene W. Myers. Since D can be as large as N itself, this is + // effectively O(N^2). Unlike the algorithm from that paper, we are not + // interested in the optimal path, but at least some "decent" path. + // + // For example, let X and Y be lists of symbols: + // X = [A B C A B B A] + // Y = [C B A B A C] + // + // The edit-graph can be drawn as the following: + // A B C A B B A + // ┌─────────────┐ + // C │_|_|\|_|_|_|_│ 0 + // B │_|\|_|_|\|\|_│ 1 + // A │\|_|_|\|_|_|\│ 2 + // B │_|\|_|_|\|\|_│ 3 + // A │\|_|_|\|_|_|\│ 4 + // C │ | |\| | | | │ 5 + // └─────────────┘ 6 + // 0 1 2 3 4 5 6 7 + // + // List X is written along the horizontal axis, while list Y is written + // along the vertical axis. At any point on this grid, if the symbol in + // list X matches the corresponding symbol in list Y, then a '\' is drawn. + // The goal of any minimal edit-script algorithm is to find a path from the + // top-left corner to the bottom-right corner, while traveling through the + // fewest horizontal or vertical edges. + // A horizontal edge is equivalent to inserting a symbol from list X. + // A vertical edge is equivalent to inserting a symbol from list Y. + // A diagonal edge is equivalent to a matching symbol between both X and Y. + + // Invariants: + // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // + // In general: + // • fwdFrontier.X < revFrontier.X + // • fwdFrontier.Y < revFrontier.Y + // Unless, it is time for the algorithm to terminate. + fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} + revPath := path{-1, point{nx, ny}, make(EditScript, 0)} + fwdFrontier := fwdPath.point // Forward search frontier + revFrontier := revPath.point // Reverse search frontier + + // Search budget bounds the cost of searching for better paths. + // The longest sequence of non-matching symbols that can be tolerated is + // approximately the square-root of the search budget. + searchBudget := 4 * (nx + ny) // O(n) + + // The algorithm below is a greedy, meet-in-the-middle algorithm for + // computing sub-optimal edit-scripts between two lists. + // + // The algorithm is approximately as follows: + // • Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). The goal of + // the search is connect with the search from the opposite corner. + // • As we search, we build a path in a greedy manner, where the first + // match seen is added to the path (this is sub-optimal, but provides a + // decent result in practice). When matches are found, we try the next pair + // of symbols in the lists and follow all matches as far as possible. + // • When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, we advance the + // frontier towards the opposite corner. + // • This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. + // + // This algorithm is correct even if searching only in the forward direction + // or in the reverse direction. We do both because it is commonly observed + // that two lists commonly differ because elements were added to the front + // or end of the other list. + // + // Running the tests with the "cmp_debug" build tag prints a visualization + // of the algorithm running in real-time. This is educational for + // understanding how the algorithm works. See debug_enable.go. + f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) + for { + // Forward search from the beginning. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{fwdFrontier.X + z, fwdFrontier.Y - z} + switch { + case p.X >= revPath.X || p.Y < fwdPath.Y: + stop1 = true // Hit top-right corner + case p.Y >= revPath.Y || p.X < fwdPath.X: + stop2 = true // Hit bottom-left corner + case f(p.X, p.Y).Equal(): + // Match found, so connect the path to this point. + fwdPath.connect(p, f) + fwdPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(fwdPath.X, fwdPath.Y).Equal() { + break + } + fwdPath.append(Identity) + } + fwdFrontier = fwdPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards reverse point. + if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y { + fwdFrontier.X++ + } else { + fwdFrontier.Y++ + } + + // Reverse search from the end. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{revFrontier.X - z, revFrontier.Y + z} + switch { + case fwdPath.X >= p.X || revPath.Y < p.Y: + stop1 = true // Hit bottom-left corner + case fwdPath.Y >= p.Y || revPath.X < p.X: + stop2 = true // Hit top-right corner + case f(p.X-1, p.Y-1).Equal(): + // Match found, so connect the path to this point. + revPath.connect(p, f) + revPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(revPath.X-1, revPath.Y-1).Equal() { + break + } + revPath.append(Identity) + } + revFrontier = revPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards forward point. + if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y { + revFrontier.X-- + } else { + revFrontier.Y-- + } + } + + // Join the forward and reverse paths and then append the reverse path. + fwdPath.connect(revPath.point, f) + for i := len(revPath.es) - 1; i >= 0; i-- { + t := revPath.es[i] + revPath.es = revPath.es[:i] + fwdPath.append(t) + } + debug.Finish() + return fwdPath.es +} + +type path struct { + dir int // +1 if forward, -1 if reverse + point // Leading point of the EditScript path + es EditScript +} + +// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types +// to the edit-script to connect p.point to dst. +func (p *path) connect(dst point, f EqualFunc) { + if p.dir > 0 { + // Connect in forward direction. + for dst.X > p.X && dst.Y > p.Y { + switch r := f(p.X, p.Y); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case dst.X-p.X >= dst.Y-p.Y: + p.append(UniqueX) + default: + p.append(UniqueY) + } + } + for dst.X > p.X { + p.append(UniqueX) + } + for dst.Y > p.Y { + p.append(UniqueY) + } + } else { + // Connect in reverse direction. + for p.X > dst.X && p.Y > dst.Y { + switch r := f(p.X-1, p.Y-1); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case p.Y-dst.Y >= p.X-dst.X: + p.append(UniqueY) + default: + p.append(UniqueX) + } + } + for p.X > dst.X { + p.append(UniqueX) + } + for p.Y > dst.Y { + p.append(UniqueY) + } + } +} + +func (p *path) append(t EditType) { + p.es = append(p.es, t) + switch t { + case Identity, Modified: + p.add(p.dir, p.dir) + case UniqueX: + p.add(p.dir, 0) + case UniqueY: + p.add(0, p.dir) + } + debug.Update() +} + +type point struct{ X, Y int } + +func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } + +// zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] +func zigzag(x int) int { + if x&1 != 0 { + x = ^x + } + return x >> 1 +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go new file mode 100644 index 000000000..a9e7fc0b5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go @@ -0,0 +1,9 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package flags + +// Deterministic controls whether the output of Diff should be deterministic. +// This is only used for testing. +var Deterministic bool diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go new file mode 100644 index 000000000..01aed0a15 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = false diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go new file mode 100644 index 000000000..c0b667f58 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = true diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go new file mode 100644 index 000000000..ace1dbe86 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -0,0 +1,99 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package function provides functionality for identifying function types. +package function + +import ( + "reflect" + "regexp" + "runtime" + "strings" +) + +type funcType int + +const ( + _ funcType = iota + + tbFunc // func(T) bool + ttbFunc // func(T, T) bool + trbFunc // func(T, R) bool + tibFunc // func(T, I) bool + trFunc // func(T) R + + Equal = ttbFunc // func(T, T) bool + EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool + Transformer = trFunc // func(T) R + ValueFilter = ttbFunc // func(T, T) bool + Less = ttbFunc // func(T, T) bool + ValuePredicate = tbFunc // func(T) bool + KeyValuePredicate = trbFunc // func(T, R) bool +) + +var boolType = reflect.TypeOf(true) + +// IsType reports whether the reflect.Type is of the specified function type. +func IsType(t reflect.Type, ft funcType) bool { + if t == nil || t.Kind() != reflect.Func || t.IsVariadic() { + return false + } + ni, no := t.NumIn(), t.NumOut() + switch ft { + case tbFunc: // func(T) bool + if ni == 1 && no == 1 && t.Out(0) == boolType { + return true + } + case ttbFunc: // func(T, T) bool + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { + return true + } + case trbFunc: // func(T, R) bool + if ni == 2 && no == 1 && t.Out(0) == boolType { + return true + } + case tibFunc: // func(T, I) bool + if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { + return true + } + case trFunc: // func(T) R + if ni == 1 && no == 1 { + return true + } + } + return false +} + +var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`) + +// NameOf returns the name of the function value. +func NameOf(v reflect.Value) string { + fnc := runtime.FuncForPC(v.Pointer()) + if fnc == nil { + return "" + } + fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm" + + // Method closures have a "-fm" suffix. + fullName = strings.TrimSuffix(fullName, "-fm") + + var name string + for len(fullName) > 0 { + inParen := strings.HasSuffix(fullName, ")") + fullName = strings.TrimSuffix(fullName, ")") + + s := lastIdentRx.FindString(fullName) + if s == "" { + break + } + name = s + "." + name + fullName = strings.TrimSuffix(fullName, s) + + if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 { + fullName = fullName[:i] + } + fullName = strings.TrimSuffix(fullName, ".") + } + return strings.TrimSuffix(name, ".") +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go new file mode 100644 index 000000000..0a01c4796 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -0,0 +1,23 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build purego + +package value + +import "reflect" + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p uintptr + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // NOTE: Storing a pointer as an uintptr is technically incorrect as it + // assumes that the GC implementation does not use a moving collector. + return Pointer{v.Pointer(), v.Type()} +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go new file mode 100644 index 000000000..da134ae2a --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -0,0 +1,26 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !purego + +package value + +import ( + "reflect" + "unsafe" +) + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p unsafe.Pointer + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // The proper representation of a pointer is unsafe.Pointer, + // which is necessary if the GC ever uses a moving collector. + return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go new file mode 100644 index 000000000..24fbae6e3 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go @@ -0,0 +1,106 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// SortKeys sorts a list of map keys, deduplicating keys if necessary. +// The type of each value must be comparable. +func SortKeys(vs []reflect.Value) []reflect.Value { + if len(vs) == 0 { + return vs + } + + // Sort the map keys. + sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) }) + + // Deduplicate keys (fails for NaNs). + vs2 := vs[:1] + for _, v := range vs[1:] { + if isLess(vs2[len(vs2)-1], v) { + vs2 = append(vs2, v) + } + } + return vs2 +} + +// isLess is a generic function for sorting arbitrary map keys. +// The inputs must be of the same type and must be comparable. +func isLess(x, y reflect.Value) bool { + switch x.Type().Kind() { + case reflect.Bool: + return !x.Bool() && y.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return x.Int() < y.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return x.Uint() < y.Uint() + case reflect.Float32, reflect.Float64: + // NOTE: This does not sort -0 as less than +0 + // since Go maps treat -0 and +0 as equal keys. + fx, fy := x.Float(), y.Float() + return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy) + case reflect.Complex64, reflect.Complex128: + cx, cy := x.Complex(), y.Complex() + rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy) + if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) { + return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy) + } + return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry) + case reflect.Ptr, reflect.UnsafePointer, reflect.Chan: + return x.Pointer() < y.Pointer() + case reflect.String: + return x.String() < y.String() + case reflect.Array: + for i := 0; i < x.Len(); i++ { + if isLess(x.Index(i), y.Index(i)) { + return true + } + if isLess(y.Index(i), x.Index(i)) { + return false + } + } + return false + case reflect.Struct: + for i := 0; i < x.NumField(); i++ { + if isLess(x.Field(i), y.Field(i)) { + return true + } + if isLess(y.Field(i), x.Field(i)) { + return false + } + } + return false + case reflect.Interface: + vx, vy := x.Elem(), y.Elem() + if !vx.IsValid() || !vy.IsValid() { + return !vx.IsValid() && vy.IsValid() + } + tx, ty := vx.Type(), vy.Type() + if tx == ty { + return isLess(x.Elem(), y.Elem()) + } + if tx.Kind() != ty.Kind() { + return vx.Kind() < vy.Kind() + } + if tx.String() != ty.String() { + return tx.String() < ty.String() + } + if tx.PkgPath() != ty.PkgPath() { + return tx.PkgPath() < ty.PkgPath() + } + // This can happen in rare situations, so we fallback to just comparing + // the unique pointer for a reflect.Type. This guarantees deterministic + // ordering within a program, but it is obviously not stable. + return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer() + default: + // Must be Func, Map, or Slice; which are not comparable. + panic(fmt.Sprintf("%T is not comparable", x.Type())) + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go new file mode 100644 index 000000000..06a8ffd03 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go @@ -0,0 +1,48 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "math" + "reflect" +) + +// IsZero reports whether v is the zero value. +// This does not rely on Interface and so can be used on unexported fields. +func IsZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return v.Bool() == false + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(v.Float()) == 0 + case reflect.Complex64, reflect.Complex128: + return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0 + case reflect.String: + return v.String() == "" + case reflect.UnsafePointer: + return v.Pointer() == 0 + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !IsZero(v.Index(i)) { + return false + } + } + return true + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !IsZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go new file mode 100644 index 000000000..793448160 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -0,0 +1,524 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/google/go-cmp/cmp/internal/function" +) + +// Option configures for specific behavior of Equal and Diff. In particular, +// the fundamental Option functions (Ignore, Transformer, and Comparer), +// configure how equality is determined. +// +// The fundamental options may be composed with filters (FilterPath and +// FilterValues) to control the scope over which they are applied. +// +// The cmp/cmpopts package provides helper functions for creating options that +// may be used with Equal and Diff. +type Option interface { + // filter applies all filters and returns the option that remains. + // Each option may only read s.curPath and call s.callTTBFunc. + // + // An Options is returned only if multiple comparers or transformers + // can apply simultaneously and will only contain values of those types + // or sub-Options containing values of those types. + filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption +} + +// applicableOption represents the following types: +// Fundamental: ignore | validator | *comparer | *transformer +// Grouping: Options +type applicableOption interface { + Option + + // apply executes the option, which may mutate s or panic. + apply(s *state, vx, vy reflect.Value) +} + +// coreOption represents the following types: +// Fundamental: ignore | validator | *comparer | *transformer +// Filters: *pathFilter | *valuesFilter +type coreOption interface { + Option + isCore() +} + +type core struct{} + +func (core) isCore() {} + +// Options is a list of Option values that also satisfies the Option interface. +// Helper comparison packages may return an Options value when packing multiple +// Option values into a single Option. When this package processes an Options, +// it will be implicitly expanded into a flat list. +// +// Applying a filter on an Options is equivalent to applying that same filter +// on all individual options held within. +type Options []Option + +func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) { + for _, opt := range opts { + switch opt := opt.filter(s, t, vx, vy); opt.(type) { + case ignore: + return ignore{} // Only ignore can short-circuit evaluation + case validator: + out = validator{} // Takes precedence over comparer or transformer + case *comparer, *transformer, Options: + switch out.(type) { + case nil: + out = opt + case validator: + // Keep validator + case *comparer, *transformer, Options: + out = Options{out, opt} // Conflicting comparers or transformers + } + } + } + return out +} + +func (opts Options) apply(s *state, _, _ reflect.Value) { + const warning = "ambiguous set of applicable options" + const help = "consider using filters to ensure at most one Comparer or Transformer may apply" + var ss []string + for _, opt := range flattenOptions(nil, opts) { + ss = append(ss, fmt.Sprint(opt)) + } + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help)) +} + +func (opts Options) String() string { + var ss []string + for _, opt := range opts { + ss = append(ss, fmt.Sprint(opt)) + } + return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) +} + +// FilterPath returns a new Option where opt is only evaluated if filter f +// returns true for the current Path in the value tree. +// +// This filter is called even if a slice element or map entry is missing and +// provides an opportunity to ignore such cases. The filter function must be +// symmetric such that the filter result is identical regardless of whether the +// missing value is from x or y. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterPath(f func(Path) bool, opt Option) Option { + if f == nil { + panic("invalid path filter function") + } + if opt := normalizeOption(opt); opt != nil { + return &pathFilter{fnc: f, opt: opt} + } + return nil +} + +type pathFilter struct { + core + fnc func(Path) bool + opt Option +} + +func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if f.fnc(s.curPath) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f pathFilter) String() string { + return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) +} + +// FilterValues returns a new Option where opt is only evaluated if filter f, +// which is a function of the form "func(T, T) bool", returns true for the +// current pair of values being compared. If either value is invalid or +// the type of the values is not assignable to T, then this filter implicitly +// returns false. +// +// The filter function must be +// symmetric (i.e., agnostic to the order of the inputs) and +// deterministic (i.e., produces the same result when given the same inputs). +// If T is an interface, it is possible that f is called with two values with +// different concrete types that both implement T. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterValues(f interface{}, opt Option) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { + panic(fmt.Sprintf("invalid values filter function: %T", f)) + } + if opt := normalizeOption(opt); opt != nil { + vf := &valuesFilter{fnc: v, opt: opt} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + vf.typ = ti + } + return vf + } + return nil +} + +type valuesFilter struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool + opt Option +} + +func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() { + return nil + } + if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f valuesFilter) String() string { + return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) +} + +// Ignore is an Option that causes all comparisons to be ignored. +// This value is intended to be combined with FilterPath or FilterValues. +// It is an error to pass an unfiltered Ignore option to Equal. +func Ignore() Option { return ignore{} } + +type ignore struct{ core } + +func (ignore) isFiltered() bool { return false } +func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} } +func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) } +func (ignore) String() string { return "Ignore()" } + +// validator is a sentinel Option type to indicate that some options could not +// be evaluated due to unexported fields, missing slice elements, or +// missing map entries. Both values are validator only for unexported fields. +type validator struct{ core } + +func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vy.IsValid() { + return validator{} + } + if !vx.CanInterface() || !vy.CanInterface() { + return validator{} + } + return nil +} +func (validator) apply(s *state, vx, vy reflect.Value) { + // Implies missing slice element or map entry. + if !vx.IsValid() || !vy.IsValid() { + s.report(vx.IsValid() == vy.IsValid(), 0) + return + } + + // Unable to Interface implies unexported field without visibility access. + if !vx.CanInterface() || !vy.CanInterface() { + const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider AllowUnexported or cmpopts.IgnoreUnexported" + panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help)) + } + + panic("not reachable") +} + +// identRx represents a valid identifier according to the Go specification. +const identRx = `[_\p{L}][_\p{L}\p{N}]*` + +var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) + +// Transformer returns an Option that applies a transformation function that +// converts values of a certain type into that of another. +// +// The transformer f must be a function "func(T) R" that converts values of +// type T to those of type R and is implicitly filtered to input values +// assignable to T. The transformer must not mutate T in any way. +// +// To help prevent some cases of infinite recursive cycles applying the +// same transform to the output of itself (e.g., in the case where the +// input and output types are the same), an implicit filter is added such that +// a transformer is applicable only if that exact transformer is not already +// in the tail of the Path since the last non-Transform step. +// For situations where the implicit filter is still insufficient, +// consider using cmpopts.AcyclicTransformer, which adds a filter +// to prevent the transformer from being recursively applied upon itself. +// +// The name is a user provided label that is used as the Transform.Name in the +// transformation PathStep (and eventually shown in the Diff output). +// The name must be a valid identifier or qualified identifier in Go syntax. +// If empty, an arbitrary name is used. +func Transformer(name string, f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { + panic(fmt.Sprintf("invalid transformer function: %T", f)) + } + if name == "" { + name = function.NameOf(v) + if !identsRx.MatchString(name) { + name = "λ" // Lambda-symbol as placeholder name + } + } else if !identsRx.MatchString(name) { + panic(fmt.Sprintf("invalid name: %q", name)) + } + tr := &transformer{name: name, fnc: reflect.ValueOf(f)} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + tr.typ = ti + } + return tr +} + +type transformer struct { + core + name string + typ reflect.Type // T + fnc reflect.Value // func(T) R +} + +func (tr *transformer) isFiltered() bool { return tr.typ != nil } + +func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption { + for i := len(s.curPath) - 1; i >= 0; i-- { + if t, ok := s.curPath[i].(Transform); !ok { + break // Hit most recent non-Transform step + } else if tr == t.trans { + return nil // Cannot directly use same Transform + } + } + if tr.typ == nil || t.AssignableTo(tr.typ) { + return tr + } + return nil +} + +func (tr *transformer) apply(s *state, vx, vy reflect.Value) { + step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}} + vvx := s.callTRFunc(tr.fnc, vx, step) + vvy := s.callTRFunc(tr.fnc, vy, step) + step.vx, step.vy = vvx, vvy + s.compareAny(step) +} + +func (tr transformer) String() string { + return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) +} + +// Comparer returns an Option that determines whether two values are equal +// to each other. +// +// The comparer f must be a function "func(T, T) bool" and is implicitly +// filtered to input values assignable to T. If T is an interface, it is +// possible that f is called with two values of different concrete types that +// both implement T. +// +// The equality function must be: +// • Symmetric: equal(x, y) == equal(y, x) +// • Deterministic: equal(x, y) == equal(x, y) +// • Pure: equal(x, y) does not modify x or y +func Comparer(f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Equal) || v.IsNil() { + panic(fmt.Sprintf("invalid comparer function: %T", f)) + } + cm := &comparer{fnc: v} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + cm.typ = ti + } + return cm +} + +type comparer struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool +} + +func (cm *comparer) isFiltered() bool { return cm.typ != nil } + +func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption { + if cm.typ == nil || t.AssignableTo(cm.typ) { + return cm + } + return nil +} + +func (cm *comparer) apply(s *state, vx, vy reflect.Value) { + eq := s.callTTBFunc(cm.fnc, vx, vy) + s.report(eq, reportByFunc) +} + +func (cm comparer) String() string { + return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) +} + +// AllowUnexported returns an Option that forcibly allows operations on +// unexported fields in certain structs, which are specified by passing in a +// value of each struct type. +// +// Users of this option must understand that comparing on unexported fields +// from external packages is not safe since changes in the internal +// implementation of some external package may cause the result of Equal +// to unexpectedly change. However, it may be valid to use this option on types +// defined in an internal package where the semantic meaning of an unexported +// field is in the control of the user. +// +// In many cases, a custom Comparer should be used instead that defines +// equality as a function of the public API of a type rather than the underlying +// unexported implementation. +// +// For example, the reflect.Type documentation defines equality to be determined +// by the == operator on the interface (essentially performing a shallow pointer +// comparison) and most attempts to compare *regexp.Regexp types are interested +// in only checking that the regular expression strings are equal. +// Both of these are accomplished using Comparers: +// +// Comparer(func(x, y reflect.Type) bool { return x == y }) +// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) +// +// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore +// all unexported fields on specified struct types. +func AllowUnexported(types ...interface{}) Option { + if !supportAllowUnexported { + panic("AllowUnexported is not supported on purego builds, Google App Engine Standard, or GopherJS") + } + m := make(map[reflect.Type]bool) + for _, typ := range types { + t := reflect.TypeOf(typ) + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("invalid struct type: %T", typ)) + } + m[t] = true + } + return visibleStructs(m) +} + +type visibleStructs map[reflect.Type]bool + +func (visibleStructs) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// Result represents the comparison result for a single node and +// is provided by cmp when calling Result (see Reporter). +type Result struct { + _ [0]func() // Make Result incomparable + flags resultFlags +} + +// Equal reports whether the node was determined to be equal or not. +// As a special case, ignored nodes are considered equal. +func (r Result) Equal() bool { + return r.flags&(reportEqual|reportByIgnore) != 0 +} + +// ByIgnore reports whether the node is equal because it was ignored. +// This never reports true if Equal reports false. +func (r Result) ByIgnore() bool { + return r.flags&reportByIgnore != 0 +} + +// ByMethod reports whether the Equal method determined equality. +func (r Result) ByMethod() bool { + return r.flags&reportByMethod != 0 +} + +// ByFunc reports whether a Comparer function determined equality. +func (r Result) ByFunc() bool { + return r.flags&reportByFunc != 0 +} + +type resultFlags uint + +const ( + _ resultFlags = (1 << iota) / 2 + + reportEqual + reportUnequal + reportByIgnore + reportByMethod + reportByFunc +) + +// Reporter is an Option that can be passed to Equal. When Equal traverses +// the value trees, it calls PushStep as it descends into each node in the +// tree and PopStep as it ascend out of the node. The leaves of the tree are +// either compared (determined to be equal or not equal) or ignored and reported +// as such by calling the Report method. +func Reporter(r interface { + // PushStep is called when a tree-traversal operation is performed. + // The PathStep itself is only valid until the step is popped. + // The PathStep.Values are valid for the duration of the entire traversal + // and must not be mutated. + // + // Equal always calls PushStep at the start to provide an operation-less + // PathStep used to report the root values. + // + // Within a slice, the exact set of inserted, removed, or modified elements + // is unspecified and may change in future implementations. + // The entries of a map are iterated through in an unspecified order. + PushStep(PathStep) + + // Report is called exactly once on leaf nodes to report whether the + // comparison identified the node as equal, unequal, or ignored. + // A leaf node is one that is immediately preceded by and followed by + // a pair of PushStep and PopStep calls. + Report(Result) + + // PopStep ascends back up the value tree. + // There is always a matching pop call for every push call. + PopStep() +}) Option { + return reporter{r} +} + +type reporter struct{ reporterIface } +type reporterIface interface { + PushStep(PathStep) + Report(Result) + PopStep() +} + +func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// normalizeOption normalizes the input options such that all Options groups +// are flattened and groups with a single element are reduced to that element. +// Only coreOptions and Options containing coreOptions are allowed. +func normalizeOption(src Option) Option { + switch opts := flattenOptions(nil, Options{src}); len(opts) { + case 0: + return nil + case 1: + return opts[0] + default: + return opts + } +} + +// flattenOptions copies all options in src to dst as a flat list. +// Only coreOptions and Options containing coreOptions are allowed. +func flattenOptions(dst, src Options) Options { + for _, opt := range src { + switch opt := opt.(type) { + case nil: + continue + case Options: + dst = flattenOptions(dst, opt) + case coreOption: + dst = append(dst, opt) + default: + panic(fmt.Sprintf("invalid option type: %T", opt)) + } + } + return dst +} diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go new file mode 100644 index 000000000..96fffd291 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -0,0 +1,308 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// Path is a list of PathSteps describing the sequence of operations to get +// from some root type to the current position in the value tree. +// The first Path element is always an operation-less PathStep that exists +// simply to identify the initial type. +// +// When traversing structs with embedded structs, the embedded struct will +// always be accessed as a field before traversing the fields of the +// embedded struct themselves. That is, an exported field from the +// embedded struct will never be accessed directly from the parent struct. +type Path []PathStep + +// PathStep is a union-type for specific operations to traverse +// a value's tree structure. Users of this package never need to implement +// these types as values of this type will be returned by this package. +// +// Implementations of this interface are +// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform. +type PathStep interface { + String() string + + // Type is the resulting type after performing the path step. + Type() reflect.Type + + // Values is the resulting values after performing the path step. + // The type of each valid value is guaranteed to be identical to Type. + // + // In some cases, one or both may be invalid or have restrictions: + // • For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // AllowUnexported to traverse unexported fields. + // • For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // • For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. + // + // The provided values must not be mutated. + Values() (vx, vy reflect.Value) +} + +var ( + _ PathStep = StructField{} + _ PathStep = SliceIndex{} + _ PathStep = MapIndex{} + _ PathStep = Indirect{} + _ PathStep = TypeAssertion{} + _ PathStep = Transform{} +) + +func (pa *Path) push(s PathStep) { + *pa = append(*pa, s) +} + +func (pa *Path) pop() { + *pa = (*pa)[:len(*pa)-1] +} + +// Last returns the last PathStep in the Path. +// If the path is empty, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Last() PathStep { + return pa.Index(-1) +} + +// Index returns the ith step in the Path and supports negative indexing. +// A negative index starts counting from the tail of the Path such that -1 +// refers to the last step, -2 refers to the second-to-last step, and so on. +// If index is invalid, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Index(i int) PathStep { + if i < 0 { + i = len(pa) + i + } + if i < 0 || i >= len(pa) { + return pathStep{} + } + return pa[i] +} + +// String returns the simplified path to a node. +// The simplified path only contains struct field accesses. +// +// For example: +// MyMap.MySlices.MyField +func (pa Path) String() string { + var ss []string + for _, s := range pa { + if _, ok := s.(StructField); ok { + ss = append(ss, s.String()) + } + } + return strings.TrimPrefix(strings.Join(ss, ""), ".") +} + +// GoString returns the path to a specific node using Go syntax. +// +// For example: +// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField +func (pa Path) GoString() string { + var ssPre, ssPost []string + var numIndirect int + for i, s := range pa { + var nextStep PathStep + if i+1 < len(pa) { + nextStep = pa[i+1] + } + switch s := s.(type) { + case Indirect: + numIndirect++ + pPre, pPost := "(", ")" + switch nextStep.(type) { + case Indirect: + continue // Next step is indirection, so let them batch up + case StructField: + numIndirect-- // Automatic indirection on struct fields + case nil: + pPre, pPost = "", "" // Last step; no need for parenthesis + } + if numIndirect > 0 { + ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect)) + ssPost = append(ssPost, pPost) + } + numIndirect = 0 + continue + case Transform: + ssPre = append(ssPre, s.trans.name+"(") + ssPost = append(ssPost, ")") + continue + } + ssPost = append(ssPost, s.String()) + } + for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 { + ssPre[i], ssPre[j] = ssPre[j], ssPre[i] + } + return strings.Join(ssPre, "") + strings.Join(ssPost, "") +} + +type pathStep struct { + typ reflect.Type + vx, vy reflect.Value +} + +func (ps pathStep) Type() reflect.Type { return ps.typ } +func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy } +func (ps pathStep) String() string { + if ps.typ == nil { + return "" + } + s := ps.typ.String() + if s == "" || strings.ContainsAny(s, "{}\n") { + return "root" // Type too simple or complex to print + } + return fmt.Sprintf("{%s}", s) +} + +// StructField represents a struct field access on a field called Name. +type StructField struct{ *structField } +type structField struct { + pathStep + name string + idx int + + // These fields are used for forcibly accessing an unexported field. + // pvx, pvy, and field are only valid if unexported is true. + unexported bool + mayForce bool // Forcibly allow visibility + pvx, pvy reflect.Value // Parent values + field reflect.StructField // Field information +} + +func (sf StructField) Type() reflect.Type { return sf.typ } +func (sf StructField) Values() (vx, vy reflect.Value) { + if !sf.unexported { + return sf.vx, sf.vy // CanInterface reports true + } + + // Forcibly obtain read-write access to an unexported struct field. + if sf.mayForce { + vx = retrieveUnexportedField(sf.pvx, sf.field) + vy = retrieveUnexportedField(sf.pvy, sf.field) + return vx, vy // CanInterface reports true + } + return sf.vx, sf.vy // CanInterface reports false +} +func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } + +// Name is the field name. +func (sf StructField) Name() string { return sf.name } + +// Index is the index of the field in the parent struct type. +// See reflect.Type.Field. +func (sf StructField) Index() int { return sf.idx } + +// SliceIndex is an index operation on a slice or array at some index Key. +type SliceIndex struct{ *sliceIndex } +type sliceIndex struct { + pathStep + xkey, ykey int +} + +func (si SliceIndex) Type() reflect.Type { return si.typ } +func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy } +func (si SliceIndex) String() string { + switch { + case si.xkey == si.ykey: + return fmt.Sprintf("[%d]", si.xkey) + case si.ykey == -1: + // [5->?] means "I don't know where X[5] went" + return fmt.Sprintf("[%d->?]", si.xkey) + case si.xkey == -1: + // [?->3] means "I don't know where Y[3] came from" + return fmt.Sprintf("[?->%d]", si.ykey) + default: + // [5->3] means "X[5] moved to Y[3]" + return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) + } +} + +// Key is the index key; it may return -1 if in a split state +func (si SliceIndex) Key() int { + if si.xkey != si.ykey { + return -1 + } + return si.xkey +} + +// SplitKeys are the indexes for indexing into slices in the +// x and y values, respectively. These indexes may differ due to the +// insertion or removal of an element in one of the slices, causing +// all of the indexes to be shifted. If an index is -1, then that +// indicates that the element does not exist in the associated slice. +// +// Key is guaranteed to return -1 if and only if the indexes returned +// by SplitKeys are not the same. SplitKeys will never return -1 for +// both indexes. +func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } + +// MapIndex is an index operation on a map at some index Key. +type MapIndex struct{ *mapIndex } +type mapIndex struct { + pathStep + key reflect.Value +} + +func (mi MapIndex) Type() reflect.Type { return mi.typ } +func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy } +func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } + +// Key is the value of the map key. +func (mi MapIndex) Key() reflect.Value { return mi.key } + +// Indirect represents pointer indirection on the parent type. +type Indirect struct{ *indirect } +type indirect struct { + pathStep +} + +func (in Indirect) Type() reflect.Type { return in.typ } +func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } +func (in Indirect) String() string { return "*" } + +// TypeAssertion represents a type assertion on an interface. +type TypeAssertion struct{ *typeAssertion } +type typeAssertion struct { + pathStep +} + +func (ta TypeAssertion) Type() reflect.Type { return ta.typ } +func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } + +// Transform is a transformation from the parent type to the current type. +type Transform struct{ *transform } +type transform struct { + pathStep + trans *transformer +} + +func (tf Transform) Type() reflect.Type { return tf.typ } +func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } +func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } + +// Name is the name of the Transformer. +func (tf Transform) Name() string { return tf.trans.name } + +// Func is the function pointer to the transformer function. +func (tf Transform) Func() reflect.Value { return tf.trans.fnc } + +// Option returns the originally constructed Transformer option. +// The == operator can be used to detect the exact option used. +func (tf Transform) Option() Option { return tf.trans } + +// isExported reports whether the identifier is exported. +func isExported(id string) bool { + r, _ := utf8.DecodeRuneInString(id) + return unicode.IsUpper(r) +} diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go new file mode 100644 index 000000000..6ddf29993 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report.go @@ -0,0 +1,51 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +// defaultReporter implements the reporter interface. +// +// As Equal serially calls the PushStep, Report, and PopStep methods, the +// defaultReporter constructs a tree-based representation of the compared value +// and the result of each comparison (see valueNode). +// +// When the String method is called, the FormatDiff method transforms the +// valueNode tree into a textNode tree, which is a tree-based representation +// of the textual output (see textNode). +// +// Lastly, the textNode.String method produces the final report as a string. +type defaultReporter struct { + root *valueNode + curr *valueNode +} + +func (r *defaultReporter) PushStep(ps PathStep) { + r.curr = r.curr.PushStep(ps) + if r.root == nil { + r.root = r.curr + } +} +func (r *defaultReporter) Report(rs Result) { + r.curr.Report(rs) +} +func (r *defaultReporter) PopStep() { + r.curr = r.curr.PopStep() +} + +// String provides a full report of the differences detected as a structured +// literal in pseudo-Go syntax. String may only be called after the entire tree +// has been traversed. +func (r *defaultReporter) String() string { + assert(r.root != nil && r.curr == nil) + if r.root.NumDiff == 0 { + return "" + } + return formatOptions{}.FormatDiff(r.root).String() +} + +func assert(ok bool) { + if !ok { + panic("assertion failure") + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go new file mode 100644 index 000000000..17a05eede --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -0,0 +1,296 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + + "github.com/google/go-cmp/cmp/internal/value" +) + +// TODO: Enforce limits? +// * Enforce maximum number of records to print per node? +// * Enforce maximum size in bytes allowed? +// * As a heuristic, use less verbosity for equal nodes than unequal nodes. +// TODO: Enforce unique outputs? +// * Avoid Stringer methods if it results in same output? +// * Print pointer address if outputs still equal? + +// numContextRecords is the number of surrounding equal records to print. +const numContextRecords = 2 + +type diffMode byte + +const ( + diffUnknown diffMode = 0 + diffIdentical diffMode = ' ' + diffRemoved diffMode = '-' + diffInserted diffMode = '+' +) + +type typeMode int + +const ( + // emitType always prints the type. + emitType typeMode = iota + // elideType never prints the type. + elideType + // autoType prints the type only for composite kinds + // (i.e., structs, slices, arrays, and maps). + autoType +) + +type formatOptions struct { + // DiffMode controls the output mode of FormatDiff. + // + // If diffUnknown, then produce a diff of the x and y values. + // If diffIdentical, then emit values as if they were equal. + // If diffRemoved, then only emit x values (ignoring y values). + // If diffInserted, then only emit y values (ignoring x values). + DiffMode diffMode + + // TypeMode controls whether to print the type for the current node. + // + // As a general rule of thumb, we always print the type of the next node + // after an interface, and always elide the type of the next node after + // a slice or map node. + TypeMode typeMode + + // formatValueOptions are options specific to printing reflect.Values. + formatValueOptions +} + +func (opts formatOptions) WithDiffMode(d diffMode) formatOptions { + opts.DiffMode = d + return opts +} +func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { + opts.TypeMode = t + return opts +} + +// FormatDiff converts a valueNode tree into a textNode tree, where the later +// is a textual representation of the differences detected in the former. +func (opts formatOptions) FormatDiff(v *valueNode) textNode { + // Check whether we have specialized formatting for this node. + // This is not necessary, but helpful for producing more readable outputs. + if opts.CanFormatDiffSlice(v) { + return opts.FormatDiffSlice(v) + } + + // For leaf nodes, format the value based on the reflect.Values alone. + if v.MaxDepth == 0 { + switch opts.DiffMode { + case diffUnknown, diffIdentical: + // Format Equal. + if v.NumDiff == 0 { + outx := opts.FormatValue(v.ValueX, visitedPointers{}) + outy := opts.FormatValue(v.ValueY, visitedPointers{}) + if v.NumIgnored > 0 && v.NumSame == 0 { + return textEllipsis + } else if outx.Len() < outy.Len() { + return outx + } else { + return outy + } + } + + // Format unequal. + assert(opts.DiffMode == diffUnknown) + var list textList + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{}) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{}) + if outx != nil { + list = append(list, textRecord{Diff: '-', Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: '+', Value: outy}) + } + return opts.WithTypeMode(emitType).FormatType(v.Type, list) + case diffRemoved: + return opts.FormatValue(v.ValueX, visitedPointers{}) + case diffInserted: + return opts.FormatValue(v.ValueY, visitedPointers{}) + default: + panic("invalid diff mode") + } + } + + // Descend into the child value node. + if v.TransformerName != "" { + out := opts.WithTypeMode(emitType).FormatDiff(v.Value) + out = textWrap{"Inverse(" + v.TransformerName + ", ", out, ")"} + return opts.FormatType(v.Type, out) + } else { + switch k := v.Type.Kind(); k { + case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: + return opts.FormatType(v.Type, opts.formatDiffList(v.Records, k)) + case reflect.Ptr: + return textWrap{"&", opts.FormatDiff(v.Value), ""} + case reflect.Interface: + return opts.WithTypeMode(emitType).FormatDiff(v.Value) + default: + panic(fmt.Sprintf("%v cannot have children", k)) + } + } +} + +func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) textNode { + // Derive record name based on the data structure kind. + var name string + var formatKey func(reflect.Value) string + switch k { + case reflect.Struct: + name = "field" + opts = opts.WithTypeMode(autoType) + formatKey = func(v reflect.Value) string { return v.String() } + case reflect.Slice, reflect.Array: + name = "element" + opts = opts.WithTypeMode(elideType) + formatKey = func(reflect.Value) string { return "" } + case reflect.Map: + name = "entry" + opts = opts.WithTypeMode(elideType) + formatKey = formatMapKey + } + + // Handle unification. + switch opts.DiffMode { + case diffIdentical, diffRemoved, diffInserted: + var list textList + var deferredEllipsis bool // Add final "..." to indicate records were dropped + for _, r := range recs { + // Elide struct fields that are zero value. + if k == reflect.Struct { + var isZero bool + switch opts.DiffMode { + case diffIdentical: + isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY) + case diffRemoved: + isZero = value.IsZero(r.Value.ValueX) + case diffInserted: + isZero = value.IsZero(r.Value.ValueY) + } + if isZero { + continue + } + } + // Elide ignored nodes. + if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 { + deferredEllipsis = !(k == reflect.Slice || k == reflect.Array) + if !deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + continue + } + if out := opts.FormatDiff(r.Value); out != nil { + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + if deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + return textWrap{"{", list, "}"} + case diffUnknown: + default: + panic("invalid diff mode") + } + + // Handle differencing. + var list textList + groups := coalesceAdjacentRecords(name, recs) + for i, ds := range groups { + // Handle equal records. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing records to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 { + if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numLo++ + } + for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numHi++ + } + if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 { + numHi++ // Avoid pointless coalescing of a single equal record + } + + // Format the equal values. + for _, r := range recs[:numLo] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + for _, r := range recs[numEqual-numHi : numEqual] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + recs = recs[numEqual:] + continue + } + + // Handle unequal records. + for _, r := range recs[:ds.NumDiff()] { + switch { + case opts.CanFormatDiffSlice(r.Value): + out := opts.FormatDiffSlice(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + case r.Value.NumChildren == r.Value.MaxDepth: + outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value) + outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value) + if outx != nil { + list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) + } + default: + out := opts.FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + recs = recs[ds.NumDiff():] + } + assert(len(recs) == 0) + return textWrap{"{", list, "}"} +} + +// coalesceAdjacentRecords coalesces the list of records into groups of +// adjacent equal, or unequal counts. +func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, r := range recs { + switch rv := r.Value; { + case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0: + lastStats(1).NumIgnored++ + case rv.NumDiff == 0: + lastStats(1).NumIdentical++ + case rv.NumDiff > 0 && !rv.ValueY.IsValid(): + lastStats(2).NumRemoved++ + case rv.NumDiff > 0 && !rv.ValueX.IsValid(): + lastStats(2).NumInserted++ + default: + lastStats(2).NumModified++ + } + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go new file mode 100644 index 000000000..2761b6289 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -0,0 +1,278 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/value" +) + +type formatValueOptions struct { + // AvoidStringer controls whether to avoid calling custom stringer + // methods like error.Error or fmt.Stringer.String. + AvoidStringer bool + + // ShallowPointers controls whether to avoid descending into pointers. + // Useful when printing map keys, where pointer comparison is performed + // on the pointer address rather than the pointed-at value. + ShallowPointers bool + + // PrintAddresses controls whether to print the address of all pointers, + // slice elements, and maps. + PrintAddresses bool +} + +// FormatType prints the type as if it were wrapping s. +// This may return s as-is depending on the current type and TypeMode mode. +func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { + // Check whether to emit the type or not. + switch opts.TypeMode { + case autoType: + switch t.Kind() { + case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map: + if s.Equal(textNil) { + return s + } + default: + return s + } + case elideType: + return s + } + + // Determine the type label, applying special handling for unnamed types. + typeName := t.String() + if t.Name() == "" { + // According to Go grammar, certain type literals contain symbols that + // do not strongly bind to the next lexicographical token (e.g., *T). + switch t.Kind() { + case reflect.Chan, reflect.Func, reflect.Ptr: + typeName = "(" + typeName + ")" + } + typeName = strings.Replace(typeName, "struct {", "struct{", -1) + typeName = strings.Replace(typeName, "interface {", "interface{", -1) + } + + // Avoid wrap the value in parenthesis if unnecessary. + if s, ok := s.(textWrap); ok { + hasParens := strings.HasPrefix(s.Prefix, "(") && strings.HasSuffix(s.Suffix, ")") + hasBraces := strings.HasPrefix(s.Prefix, "{") && strings.HasSuffix(s.Suffix, "}") + if hasParens || hasBraces { + return textWrap{typeName, s, ""} + } + } + return textWrap{typeName + "(", s, ")"} +} + +// FormatValue prints the reflect.Value, taking extra care to avoid descending +// into pointers already in m. As pointers are visited, m is also updated. +func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) { + if !v.IsValid() { + return nil + } + t := v.Type() + + // Check whether there is an Error or String method to call. + if !opts.AvoidStringer && v.CanInterface() { + // Avoid calling Error or String methods on nil receivers since many + // implementations crash when doing so. + if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { + switch v := v.Interface().(type) { + case error: + return textLine("e" + formatString(v.Error())) + case fmt.Stringer: + return textLine("s" + formatString(v.String())) + } + } + } + + // Check whether to explicitly wrap the result with the type. + var skipType bool + defer func() { + if !skipType { + out = opts.FormatType(t, out) + } + }() + + var ptr string + switch t.Kind() { + case reflect.Bool: + return textLine(fmt.Sprint(v.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return textLine(fmt.Sprint(v.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + // Unnamed uints are usually bytes or words, so use hexadecimal. + if t.PkgPath() == "" || t.Kind() == reflect.Uintptr { + return textLine(formatHex(v.Uint())) + } + return textLine(fmt.Sprint(v.Uint())) + case reflect.Float32, reflect.Float64: + return textLine(fmt.Sprint(v.Float())) + case reflect.Complex64, reflect.Complex128: + return textLine(fmt.Sprint(v.Complex())) + case reflect.String: + return textLine(formatString(v.String())) + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + return textLine(formatPointer(v)) + case reflect.Struct: + var list textList + for i := 0; i < v.NumField(); i++ { + vv := v.Field(i) + if value.IsZero(vv) { + continue // Elide fields with zero values + } + s := opts.WithTypeMode(autoType).FormatValue(vv, m) + list = append(list, textRecord{Key: t.Field(i).Name, Value: s}) + } + return textWrap{"{", list, "}"} + case reflect.Slice: + if v.IsNil() { + return textNil + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + fallthrough + case reflect.Array: + var list textList + for i := 0; i < v.Len(); i++ { + vi := v.Index(i) + if vi.CanAddr() { // Check for cyclic elements + p := vi.Addr() + if m.Visit(p) { + var out textNode + out = textLine(formatPointer(p)) + out = opts.WithTypeMode(emitType).FormatType(p.Type(), out) + out = textWrap{"*", out, ""} + list = append(list, textRecord{Value: out}) + continue + } + } + s := opts.WithTypeMode(elideType).FormatValue(vi, m) + list = append(list, textRecord{Value: s}) + } + return textWrap{ptr + "{", list, "}"} + case reflect.Map: + if v.IsNil() { + return textNil + } + if m.Visit(v) { + return textLine(formatPointer(v)) + } + + var list textList + for _, k := range value.SortKeys(v.MapKeys()) { + sk := formatMapKey(k) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m) + list = append(list, textRecord{Key: sk, Value: sv}) + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + return textWrap{ptr + "{", list, "}"} + case reflect.Ptr: + if v.IsNil() { + return textNil + } + if m.Visit(v) || opts.ShallowPointers { + return textLine(formatPointer(v)) + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + skipType = true // Let the underlying value print the type instead + return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""} + case reflect.Interface: + if v.IsNil() { + return textNil + } + // Interfaces accept different concrete types, + // so configure the underlying value to explicitly print the type. + skipType = true // Print the concrete type instead + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m) + default: + panic(fmt.Sprintf("%v kind not handled", v.Kind())) + } +} + +// formatMapKey formats v as if it were a map key. +// The result is guaranteed to be a single line. +func formatMapKey(v reflect.Value) string { + var opts formatOptions + opts.TypeMode = elideType + opts.ShallowPointers = true + s := opts.FormatValue(v, visitedPointers{}).String() + return strings.TrimSpace(s) +} + +// formatString prints s as a double-quoted or backtick-quoted string. +func formatString(s string) string { + // Use quoted string if it the same length as a raw string literal. + // Otherwise, attempt to use the raw string form. + qs := strconv.Quote(s) + if len(qs) == 1+len(s)+1 { + return qs + } + + // Disallow newlines to ensure output is a single line. + // Only allow printable runes for readability purposes. + rawInvalid := func(r rune) bool { + return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') + } + if strings.IndexFunc(s, rawInvalid) < 0 { + return "`" + s + "`" + } + return qs +} + +// formatHex prints u as a hexadecimal integer in Go notation. +func formatHex(u uint64) string { + var f string + switch { + case u <= 0xff: + f = "0x%02x" + case u <= 0xffff: + f = "0x%04x" + case u <= 0xffffff: + f = "0x%06x" + case u <= 0xffffffff: + f = "0x%08x" + case u <= 0xffffffffff: + f = "0x%010x" + case u <= 0xffffffffffff: + f = "0x%012x" + case u <= 0xffffffffffffff: + f = "0x%014x" + case u <= 0xffffffffffffffff: + f = "0x%016x" + } + return fmt.Sprintf(f, u) +} + +// formatPointer prints the address of the pointer. +func formatPointer(v reflect.Value) string { + p := v.Pointer() + if flags.Deterministic { + p = 0xdeadf00f // Only used for stable testing purposes + } + return fmt.Sprintf("⟪0x%x⟫", p) +} + +type visitedPointers map[value.Pointer]struct{} + +// Visit inserts pointer v into the visited map and reports whether it had +// already been visited before. +func (m visitedPointers) Visit(v reflect.Value) bool { + p := value.PointerOf(v) + _, visited := m[p] + m[p] = struct{}{} + return visited +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go new file mode 100644 index 000000000..eafcf2e4c --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -0,0 +1,333 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/diff" +) + +// CanFormatDiffSlice reports whether we support custom formatting for nodes +// that are slices of primitive kinds or strings. +func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { + switch { + case opts.DiffMode != diffUnknown: + return false // Must be formatting in diff mode + case v.NumDiff == 0: + return false // No differences detected + case v.NumIgnored+v.NumCompared+v.NumTransformed > 0: + // TODO: Handle the case where someone uses bytes.Equal on a large slice. + return false // Some custom option was used to determined equality + case !v.ValueX.IsValid() || !v.ValueY.IsValid(): + return false // Both values must be valid + } + + switch t := v.Type; t.Kind() { + case reflect.String: + case reflect.Array, reflect.Slice: + // Only slices of primitive types have specialized handling. + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + default: + return false + } + + // If a sufficient number of elements already differ, + // use specialized formatting even if length requirement is not met. + if v.NumDiff > v.NumSame { + return true + } + default: + return false + } + + // Use specialized string diffing for longer slices or strings. + const minLength = 64 + return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength +} + +// FormatDiffSlice prints a diff for the slices (or strings) represented by v. +// This provides custom-tailored logic to make printing of differences in +// textual strings and slices of primitive kinds more readable. +func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { + assert(opts.DiffMode == diffUnknown) + t, vx, vy := v.Type, v.ValueX, v.ValueY + + // Auto-detect the type of the data. + var isLinedText, isText, isBinary bool + var sx, sy string + switch { + case t.Kind() == reflect.String: + sx, sy = vx.String(), vy.String() + isText = true // Initial estimate, verify later + case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): + sx, sy = string(vx.Bytes()), string(vy.Bytes()) + isBinary = true // Initial estimate, verify later + case t.Kind() == reflect.Array: + // Arrays need to be addressable for slice operations to work. + vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() + vx2.Set(vx) + vy2.Set(vy) + vx, vy = vx2, vy2 + } + if isText || isBinary { + var numLines, lastLineIdx, maxLineLen int + isBinary = false + for i, r := range sx + sy { + if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { + isBinary = true + break + } + if r == '\n' { + if maxLineLen < i-lastLineIdx { + maxLineLen = i - lastLineIdx + } + lastLineIdx = i + 1 + numLines++ + } + } + isText = !isBinary + isLinedText = isText && numLines >= 4 && maxLineLen <= 256 + } + + // Format the string into printable records. + var list textList + var delim string + switch { + // If the text appears to be multi-lined text, + // then perform differencing across individual lines. + case isLinedText: + ssx := strings.Split(sx, "\n") + ssy := strings.Split(sy, "\n") + list = opts.formatDiffSlice( + reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.Index(0).String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "\n" + // If the text appears to be single-lined text, + // then perform differencing in approximately fixed-sized chunks. + // The output is printed as quoted strings. + case isText: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "" + // If the text appears to be binary data, + // then perform differencing in approximately fixed-sized chunks. + // The output is inspired by hexdump. + case isBinary: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte", + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + ss = append(ss, formatHex(v.Index(i).Uint())) + } + s := strings.Join(ss, ", ") + comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String()))) + return textRecord{Diff: d, Value: textLine(s), Comment: comment} + }, + ) + // For all other slices of primitive types, + // then perform differencing in approximately fixed-sized chunks. + // The size of each chunk depends on the width of the element kind. + default: + var chunkSize int + if t.Elem().Kind() == reflect.Bool { + chunkSize = 16 + } else { + switch t.Elem().Bits() { + case 8: + chunkSize = 16 + case 16: + chunkSize = 12 + case 32: + chunkSize = 8 + default: + chunkSize = 8 + } + } + list = opts.formatDiffSlice( + vx, vy, chunkSize, t.Elem().Kind().String(), + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + ss = append(ss, fmt.Sprint(v.Index(i).Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + ss = append(ss, formatHex(v.Index(i).Uint())) + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + ss = append(ss, fmt.Sprint(v.Index(i).Interface())) + } + } + s := strings.Join(ss, ", ") + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + } + + // Wrap the output with appropriate type information. + var out textNode = textWrap{"{", list, "}"} + if !isText { + // The "{...}" byte-sequence literal is not valid Go syntax for strings. + // Emit the type for extra clarity (e.g. "string{...}"). + if t.Kind() == reflect.String { + opts = opts.WithTypeMode(emitType) + } + return opts.FormatType(t, out) + } + switch t.Kind() { + case reflect.String: + out = textWrap{"strings.Join(", out, fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf(string("")) { + out = opts.FormatType(t, out) + } + case reflect.Slice: + out = textWrap{"bytes.Join(", out, fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf([]byte(nil)) { + out = opts.FormatType(t, out) + } + } + return out +} + +// formatASCII formats s as an ASCII string. +// This is useful for printing binary strings in a semi-legible way. +func formatASCII(s string) string { + b := bytes.Repeat([]byte{'.'}, len(s)) + for i := 0; i < len(s); i++ { + if ' ' <= s[i] && s[i] <= '~' { + b[i] = s[i] + } + } + return string(b) +} + +func (opts formatOptions) formatDiffSlice( + vx, vy reflect.Value, chunkSize int, name string, + makeRec func(reflect.Value, diffMode) textRecord, +) (list textList) { + es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result { + return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface()) + }) + + appendChunks := func(v reflect.Value, d diffMode) int { + n0 := v.Len() + for v.Len() > 0 { + n := chunkSize + if n > v.Len() { + n = v.Len() + } + list = append(list, makeRec(v.Slice(0, n), d)) + v = v.Slice(n, v.Len()) + } + return n0 - v.Len() + } + + groups := coalesceAdjacentEdits(name, es) + groups = coalesceInterveningIdentical(groups, chunkSize/4) + for i, ds := range groups { + // Print equal. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing equal bytes to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 { + numLo++ + } + for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + numHi++ + } + if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 { + numHi = numEqual - numLo // Avoid pointless coalescing of single equal row + } + + // Print the equal bytes. + appendChunks(vx.Slice(0, numLo), diffIdentical) + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical) + vx = vx.Slice(numEqual, vx.Len()) + vy = vy.Slice(numEqual, vy.Len()) + continue + } + + // Print unequal. + nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) + vx = vx.Slice(nx, vx.Len()) + ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) + vy = vy.Slice(ny, vy.Len()) + } + assert(vx.Len() == 0 && vy.Len() == 0) + return list +} + +// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent +// equal or unequal counts. +func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, e := range es { + switch e { + case diff.Identity: + lastStats(1).NumIdentical++ + case diff.UniqueX: + lastStats(2).NumRemoved++ + case diff.UniqueY: + lastStats(2).NumInserted++ + case diff.Modified: + lastStats(2).NumModified++ + } + } + return groups +} + +// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize) +// equal groups into adjacent unequal groups that currently result in a +// dual inserted/removed printout. This acts as a high-pass filter to smooth +// out high-frequency changes within the windowSize. +func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { + groups, groupsOrig := groups[:0], groups + for i, ds := range groupsOrig { + if len(groups) >= 2 && ds.NumDiff() > 0 { + prev := &groups[len(groups)-2] // Unequal group + curr := &groups[len(groups)-1] // Equal group + next := &groupsOrig[i] // Unequal group + hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0 + hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0 + if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize { + *prev = prev.Append(*curr).Append(*next) + groups = groups[:len(groups)-1] // Truncate off equal group + continue + } + } + groups = append(groups, ds) + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go new file mode 100644 index 000000000..8b8fcab7b --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -0,0 +1,387 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "math/rand" + "strings" + "time" + + "github.com/google/go-cmp/cmp/internal/flags" +) + +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 + +type indentMode int + +func (n indentMode) appendIndent(b []byte, d diffMode) []byte { + // The output of Diff is documented as being unstable to provide future + // flexibility in changing the output for more humanly readable reports. + // This logic intentionally introduces instability to the exact output + // so that users can detect accidental reliance on stability early on, + // rather than much later when an actual change to the format occurs. + if flags.Deterministic || randBool { + // Use regular spaces (U+0020). + switch d { + case diffUnknown, diffIdentical: + b = append(b, " "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } else { + // Use non-breaking spaces (U+00a0). + switch d { + case diffUnknown, diffIdentical: + b = append(b, "  "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } + return repeatCount(n).appendChar(b, '\t') +} + +type repeatCount int + +func (n repeatCount) appendChar(b []byte, c byte) []byte { + for ; n > 0; n-- { + b = append(b, c) + } + return b +} + +// textNode is a simplified tree-based representation of structured text. +// Possible node types are textWrap, textList, or textLine. +type textNode interface { + // Len reports the length in bytes of a single-line version of the tree. + // Nested textRecord.Diff and textRecord.Comment fields are ignored. + Len() int + // Equal reports whether the two trees are structurally identical. + // Nested textRecord.Diff and textRecord.Comment fields are compared. + Equal(textNode) bool + // String returns the string representation of the text tree. + // It is not guaranteed that len(x.String()) == x.Len(), + // nor that x.String() == y.String() implies that x.Equal(y). + String() string + + // formatCompactTo formats the contents of the tree as a single-line string + // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment + // fields are ignored. + // + // However, not all nodes in the tree should be collapsed as a single-line. + // If a node can be collapsed as a single-line, it is replaced by a textLine + // node. Since the top-level node cannot replace itself, this also returns + // the current node itself. + // + // This does not mutate the receiver. + formatCompactTo([]byte, diffMode) ([]byte, textNode) + // formatExpandedTo formats the contents of the tree as a multi-line string + // to the provided buffer. In order for column alignment to operate well, + // formatCompactTo must be called before calling formatExpandedTo. + formatExpandedTo([]byte, diffMode, indentMode) []byte +} + +// textWrap is a wrapper that concatenates a prefix and/or a suffix +// to the underlying node. +type textWrap struct { + Prefix string // e.g., "bytes.Buffer{" + Value textNode // textWrap | textList | textLine + Suffix string // e.g., "}" +} + +func (s textWrap) Len() int { + return len(s.Prefix) + s.Value.Len() + len(s.Suffix) +} +func (s1 textWrap) Equal(s2 textNode) bool { + if s2, ok := s2.(textWrap); ok { + return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix + } + return false +} +func (s textWrap) String() string { + var d diffMode + var n indentMode + _, s2 := s.formatCompactTo(nil, d) + b := n.appendIndent(nil, d) // Leading indent + b = s2.formatExpandedTo(b, d, n) // Main body + b = append(b, '\n') // Trailing newline + return string(b) +} +func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + n0 := len(b) // Original buffer length + b = append(b, s.Prefix...) + b, s.Value = s.Value.formatCompactTo(b, d) + b = append(b, s.Suffix...) + if _, ok := s.Value.(textLine); ok { + return b, textLine(b[n0:]) + } + return b, s +} +func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + b = append(b, s.Prefix...) + b = s.Value.formatExpandedTo(b, d, n) + b = append(b, s.Suffix...) + return b +} + +// textList is a comma-separated list of textWrap or textLine nodes. +// The list may be formatted as multi-lines or single-line at the discretion +// of the textList.formatCompactTo method. +type textList []textRecord +type textRecord struct { + Diff diffMode // e.g., 0 or '-' or '+' + Key string // e.g., "MyField" + Value textNode // textWrap | textLine + Comment fmt.Stringer // e.g., "6 identical fields" +} + +// AppendEllipsis appends a new ellipsis node to the list if none already +// exists at the end. If cs is non-zero it coalesces the statistics with the +// previous diffStats. +func (s *textList) AppendEllipsis(ds diffStats) { + hasStats := ds != diffStats{} + if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { + if hasStats { + *s = append(*s, textRecord{Value: textEllipsis, Comment: ds}) + } else { + *s = append(*s, textRecord{Value: textEllipsis}) + } + return + } + if hasStats { + (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds) + } +} + +func (s textList) Len() (n int) { + for i, r := range s { + n += len(r.Key) + if r.Key != "" { + n += len(": ") + } + n += r.Value.Len() + if i < len(s)-1 { + n += len(", ") + } + } + return n +} + +func (s1 textList) Equal(s2 textNode) bool { + if s2, ok := s2.(textList); ok { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + r1, r2 := s1[i], s2[i] + if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) { + return false + } + } + return true + } + return false +} + +func (s textList) String() string { + return textWrap{"{", s, "}"}.String() +} + +func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + s = append(textList(nil), s...) // Avoid mutating original + + // Determine whether we can collapse this list as a single line. + n0 := len(b) // Original buffer length + var multiLine bool + for i, r := range s { + if r.Diff == diffInserted || r.Diff == diffRemoved { + multiLine = true + } + b = append(b, r.Key...) + if r.Key != "" { + b = append(b, ": "...) + } + b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff) + if _, ok := s[i].Value.(textLine); !ok { + multiLine = true + } + if r.Comment != nil { + multiLine = true + } + if i < len(s)-1 { + b = append(b, ", "...) + } + } + // Force multi-lined output when printing a removed/inserted node that + // is sufficiently long. + if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > 80 { + multiLine = true + } + if !multiLine { + return b, textLine(b[n0:]) + } + return b, s +} + +func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + alignKeyLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return r.Key == "" || !isLine + }, + func(r textRecord) int { return len(r.Key) }, + ) + alignValueLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil + }, + func(r textRecord) int { return len(r.Value.(textLine)) }, + ) + + // Format the list as a multi-lined output. + n++ + for i, r := range s { + b = n.appendIndent(append(b, '\n'), d|r.Diff) + if r.Key != "" { + b = append(b, r.Key+": "...) + } + b = alignKeyLens[i].appendChar(b, ' ') + + b = r.Value.formatExpandedTo(b, d|r.Diff, n) + if !r.Value.Equal(textEllipsis) { + b = append(b, ',') + } + b = alignValueLens[i].appendChar(b, ' ') + + if r.Comment != nil { + b = append(b, " // "+r.Comment.String()...) + } + } + n-- + + return n.appendIndent(append(b, '\n'), d) +} + +func (s textList) alignLens( + skipFunc func(textRecord) bool, + lenFunc func(textRecord) int, +) []repeatCount { + var startIdx, endIdx, maxLen int + lens := make([]repeatCount, len(s)) + for i, r := range s { + if skipFunc(r) { + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + startIdx, endIdx, maxLen = i+1, i+1, 0 + } else { + if maxLen < lenFunc(r) { + maxLen = lenFunc(r) + } + endIdx = i + 1 + } + } + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + return lens +} + +// textLine is a single-line segment of text and is always a leaf node +// in the textNode tree. +type textLine []byte + +var ( + textNil = textLine("nil") + textEllipsis = textLine("...") +) + +func (s textLine) Len() int { + return len(s) +} +func (s1 textLine) Equal(s2 textNode) bool { + if s2, ok := s2.(textLine); ok { + return bytes.Equal([]byte(s1), []byte(s2)) + } + return false +} +func (s textLine) String() string { + return string(s) +} +func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + return append(b, s...), s +} +func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte { + return append(b, s...) +} + +type diffStats struct { + Name string + NumIgnored int + NumIdentical int + NumRemoved int + NumInserted int + NumModified int +} + +func (s diffStats) NumDiff() int { + return s.NumRemoved + s.NumInserted + s.NumModified +} + +func (s diffStats) Append(ds diffStats) diffStats { + assert(s.Name == ds.Name) + s.NumIgnored += ds.NumIgnored + s.NumIdentical += ds.NumIdentical + s.NumRemoved += ds.NumRemoved + s.NumInserted += ds.NumInserted + s.NumModified += ds.NumModified + return s +} + +// String prints a humanly-readable summary of coalesced records. +// +// Example: +// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" +func (s diffStats) String() string { + var ss []string + var sum int + labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"} + counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified} + for i, n := range counts { + if n > 0 { + ss = append(ss, fmt.Sprintf("%d %v", n, labels[i])) + } + sum += n + } + + // Pluralize the name (adjusting for some obscure English grammar rules). + name := s.Name + if sum > 1 { + name += "s" + if strings.HasSuffix(name, "ys") { + name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries" + } + } + + // Format the list according to English grammar (with Oxford comma). + switch n := len(ss); n { + case 0: + return "" + case 1, 2: + return strings.Join(ss, " and ") + " " + name + default: + return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name + } +} + +type commentString string + +func (s commentString) String() string { return string(s) } diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go new file mode 100644 index 000000000..83031a7f5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_value.go @@ -0,0 +1,121 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import "reflect" + +// valueNode represents a single node within a report, which is a +// structured representation of the value tree, containing information +// regarding which nodes are equal or not. +type valueNode struct { + parent *valueNode + + Type reflect.Type + ValueX reflect.Value + ValueY reflect.Value + + // NumSame is the number of leaf nodes that are equal. + // All descendants are equal only if NumDiff is 0. + NumSame int + // NumDiff is the number of leaf nodes that are not equal. + NumDiff int + // NumIgnored is the number of leaf nodes that are ignored. + NumIgnored int + // NumCompared is the number of leaf nodes that were compared + // using an Equal method or Comparer function. + NumCompared int + // NumTransformed is the number of non-leaf nodes that were transformed. + NumTransformed int + // NumChildren is the number of transitive descendants of this node. + // This counts from zero; thus, leaf nodes have no descendants. + NumChildren int + // MaxDepth is the maximum depth of the tree. This counts from zero; + // thus, leaf nodes have a depth of zero. + MaxDepth int + + // Records is a list of struct fields, slice elements, or map entries. + Records []reportRecord // If populated, implies Value is not populated + + // Value is the result of a transformation, pointer indirect, of + // type assertion. + Value *valueNode // If populated, implies Records is not populated + + // TransformerName is the name of the transformer. + TransformerName string // If non-empty, implies Value is populated +} +type reportRecord struct { + Key reflect.Value // Invalid for slice element + Value *valueNode +} + +func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) { + vx, vy := ps.Values() + child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy} + switch s := ps.(type) { + case StructField: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child}) + case SliceIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Value: child}) + case MapIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child}) + case Indirect: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case TypeAssertion: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case Transform: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + parent.TransformerName = s.Name() + parent.NumTransformed++ + default: + assert(parent == nil) // Must be the root step + } + return child +} + +func (r *valueNode) Report(rs Result) { + assert(r.MaxDepth == 0) // May only be called on leaf nodes + + if rs.ByIgnore() { + r.NumIgnored++ + } else { + if rs.Equal() { + r.NumSame++ + } else { + r.NumDiff++ + } + } + assert(r.NumSame+r.NumDiff+r.NumIgnored == 1) + + if rs.ByMethod() { + r.NumCompared++ + } + if rs.ByFunc() { + r.NumCompared++ + } + assert(r.NumCompared <= 1) +} + +func (child *valueNode) PopStep() (parent *valueNode) { + if child.parent == nil { + return nil + } + parent = child.parent + parent.NumSame += child.NumSame + parent.NumDiff += child.NumDiff + parent.NumIgnored += child.NumIgnored + parent.NumCompared += child.NumCompared + parent.NumTransformed += child.NumTransformed + parent.NumChildren += child.NumChildren + 1 + if parent.MaxDepth < child.MaxDepth+1 { + parent.MaxDepth = child.MaxDepth + 1 + } + return parent +} diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go index a030fa676..6199e7cb3 100644 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go @@ -1,80 +1,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: OpenAPIv2/OpenAPIv2.proto -/* -Package openapi_v2 is a generated protocol buffer package. - -It is generated from these files: - OpenAPIv2/OpenAPIv2.proto - -It has these top-level messages: - AdditionalPropertiesItem - Any - ApiKeySecurity - BasicAuthenticationSecurity - BodyParameter - Contact - Default - Definitions - Document - Examples - ExternalDocs - FileSchema - FormDataParameterSubSchema - Header - HeaderParameterSubSchema - Headers - Info - ItemsItem - JsonReference - License - NamedAny - NamedHeader - NamedParameter - NamedPathItem - NamedResponse - NamedResponseValue - NamedSchema - NamedSecurityDefinitionsItem - NamedString - NamedStringArray - NonBodyParameter - Oauth2AccessCodeSecurity - Oauth2ApplicationSecurity - Oauth2ImplicitSecurity - Oauth2PasswordSecurity - Oauth2Scopes - Operation - Parameter - ParameterDefinitions - ParametersItem - PathItem - PathParameterSubSchema - Paths - PrimitivesItems - Properties - QueryParameterSubSchema - Response - ResponseDefinitions - ResponseValue - Responses - Schema - SchemaItem - SecurityDefinitions - SecurityDefinitionsItem - SecurityRequirement - StringArray - Tag - TypeItem - VendorExtension - Xml -*/ package openapi_v2 -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/any" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -85,32 +19,57 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type AdditionalPropertiesItem struct { // Types that are valid to be assigned to Oneof: // *AdditionalPropertiesItem_Schema // *AdditionalPropertiesItem_Boolean - Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` + Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *AdditionalPropertiesItem) Reset() { *m = AdditionalPropertiesItem{} } -func (m *AdditionalPropertiesItem) String() string { return proto.CompactTextString(m) } -func (*AdditionalPropertiesItem) ProtoMessage() {} -func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *AdditionalPropertiesItem) Reset() { *m = AdditionalPropertiesItem{} } +func (m *AdditionalPropertiesItem) String() string { return proto.CompactTextString(m) } +func (*AdditionalPropertiesItem) ProtoMessage() {} +func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{0} +} + +func (m *AdditionalPropertiesItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AdditionalPropertiesItem.Unmarshal(m, b) +} +func (m *AdditionalPropertiesItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AdditionalPropertiesItem.Marshal(b, m, deterministic) +} +func (m *AdditionalPropertiesItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdditionalPropertiesItem.Merge(m, src) +} +func (m *AdditionalPropertiesItem) XXX_Size() int { + return xxx_messageInfo_AdditionalPropertiesItem.Size(m) +} +func (m *AdditionalPropertiesItem) XXX_DiscardUnknown() { + xxx_messageInfo_AdditionalPropertiesItem.DiscardUnknown(m) +} + +var xxx_messageInfo_AdditionalPropertiesItem proto.InternalMessageInfo type isAdditionalPropertiesItem_Oneof interface { isAdditionalPropertiesItem_Oneof() } type AdditionalPropertiesItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` + Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` } + type AdditionalPropertiesItem_Boolean struct { - Boolean bool `protobuf:"varint,2,opt,name=boolean,oneof"` + Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` } -func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} +func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} + func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { @@ -134,90 +93,48 @@ func (m *AdditionalPropertiesItem) GetBoolean() bool { return false } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*AdditionalPropertiesItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _AdditionalPropertiesItem_OneofMarshaler, _AdditionalPropertiesItem_OneofUnmarshaler, _AdditionalPropertiesItem_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*AdditionalPropertiesItem) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*AdditionalPropertiesItem_Schema)(nil), (*AdditionalPropertiesItem_Boolean)(nil), } } -func _AdditionalPropertiesItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*AdditionalPropertiesItem) - // oneof - switch x := m.Oneof.(type) { - case *AdditionalPropertiesItem_Schema: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Schema); err != nil { - return err - } - case *AdditionalPropertiesItem_Boolean: - t := uint64(0) - if x.Boolean { - t = 1 - } - b.EncodeVarint(2<<3 | proto.WireVarint) - b.EncodeVarint(t) - case nil: - default: - return fmt.Errorf("AdditionalPropertiesItem.Oneof has unexpected type %T", x) - } - return nil -} - -func _AdditionalPropertiesItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*AdditionalPropertiesItem) - switch tag { - case 1: // oneof.schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Schema) - err := b.DecodeMessage(msg) - m.Oneof = &AdditionalPropertiesItem_Schema{msg} - return true, err - case 2: // oneof.boolean - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Oneof = &AdditionalPropertiesItem_Boolean{x != 0} - return true, err - default: - return false, nil - } -} - -func _AdditionalPropertiesItem_OneofSizer(msg proto.Message) (n int) { - m := msg.(*AdditionalPropertiesItem) - // oneof - switch x := m.Oneof.(type) { - case *AdditionalPropertiesItem_Schema: - s := proto.Size(x.Schema) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *AdditionalPropertiesItem_Boolean: - n += proto.SizeVarint(2<<3 | proto.WireVarint) - n += 1 - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n +type Any struct { + Value *any.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -type Any struct { - Value *google_protobuf.Any `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` - Yaml string `protobuf:"bytes,2,opt,name=yaml" json:"yaml,omitempty"` +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{1} } -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *Any) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Any.Unmarshal(m, b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) +} +func (m *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(m, src) +} +func (m *Any) XXX_Size() int { + return xxx_messageInfo_Any.Size(m) +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) +} -func (m *Any) GetValue() *google_protobuf.Any { +var xxx_messageInfo_Any proto.InternalMessageInfo + +func (m *Any) GetValue() *any.Any { if m != nil { return m.Value } @@ -232,17 +149,40 @@ func (m *Any) GetYaml() string { } type ApiKeySecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApiKeySecurity) Reset() { *m = ApiKeySecurity{} } +func (m *ApiKeySecurity) String() string { return proto.CompactTextString(m) } +func (*ApiKeySecurity) ProtoMessage() {} +func (*ApiKeySecurity) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{2} +} + +func (m *ApiKeySecurity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApiKeySecurity.Unmarshal(m, b) +} +func (m *ApiKeySecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApiKeySecurity.Marshal(b, m, deterministic) +} +func (m *ApiKeySecurity) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApiKeySecurity.Merge(m, src) +} +func (m *ApiKeySecurity) XXX_Size() int { + return xxx_messageInfo_ApiKeySecurity.Size(m) +} +func (m *ApiKeySecurity) XXX_DiscardUnknown() { + xxx_messageInfo_ApiKeySecurity.DiscardUnknown(m) } -func (m *ApiKeySecurity) Reset() { *m = ApiKeySecurity{} } -func (m *ApiKeySecurity) String() string { return proto.CompactTextString(m) } -func (*ApiKeySecurity) ProtoMessage() {} -func (*ApiKeySecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +var xxx_messageInfo_ApiKeySecurity proto.InternalMessageInfo func (m *ApiKeySecurity) GetType() string { if m != nil { @@ -280,15 +220,38 @@ func (m *ApiKeySecurity) GetVendorExtension() []*NamedAny { } type BasicAuthenticationSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BasicAuthenticationSecurity) Reset() { *m = BasicAuthenticationSecurity{} } +func (m *BasicAuthenticationSecurity) String() string { return proto.CompactTextString(m) } +func (*BasicAuthenticationSecurity) ProtoMessage() {} +func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{3} +} + +func (m *BasicAuthenticationSecurity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BasicAuthenticationSecurity.Unmarshal(m, b) +} +func (m *BasicAuthenticationSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BasicAuthenticationSecurity.Marshal(b, m, deterministic) +} +func (m *BasicAuthenticationSecurity) XXX_Merge(src proto.Message) { + xxx_messageInfo_BasicAuthenticationSecurity.Merge(m, src) +} +func (m *BasicAuthenticationSecurity) XXX_Size() int { + return xxx_messageInfo_BasicAuthenticationSecurity.Size(m) +} +func (m *BasicAuthenticationSecurity) XXX_DiscardUnknown() { + xxx_messageInfo_BasicAuthenticationSecurity.DiscardUnknown(m) } -func (m *BasicAuthenticationSecurity) Reset() { *m = BasicAuthenticationSecurity{} } -func (m *BasicAuthenticationSecurity) String() string { return proto.CompactTextString(m) } -func (*BasicAuthenticationSecurity) ProtoMessage() {} -func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +var xxx_messageInfo_BasicAuthenticationSecurity proto.InternalMessageInfo func (m *BasicAuthenticationSecurity) GetType() string { if m != nil { @@ -313,21 +276,44 @@ func (m *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { type BodyParameter struct { // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` // The name of the parameter. - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // Determines the location of the parameter. - In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` + In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,4,opt,name=required" json:"required,omitempty"` - Schema *Schema `protobuf:"bytes,5,opt,name=schema" json:"schema,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + Schema *Schema `protobuf:"bytes,5,opt,name=schema,proto3" json:"schema,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *BodyParameter) Reset() { *m = BodyParameter{} } -func (m *BodyParameter) String() string { return proto.CompactTextString(m) } -func (*BodyParameter) ProtoMessage() {} -func (*BodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (m *BodyParameter) Reset() { *m = BodyParameter{} } +func (m *BodyParameter) String() string { return proto.CompactTextString(m) } +func (*BodyParameter) ProtoMessage() {} +func (*BodyParameter) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{4} +} + +func (m *BodyParameter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BodyParameter.Unmarshal(m, b) +} +func (m *BodyParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BodyParameter.Marshal(b, m, deterministic) +} +func (m *BodyParameter) XXX_Merge(src proto.Message) { + xxx_messageInfo_BodyParameter.Merge(m, src) +} +func (m *BodyParameter) XXX_Size() int { + return xxx_messageInfo_BodyParameter.Size(m) +} +func (m *BodyParameter) XXX_DiscardUnknown() { + xxx_messageInfo_BodyParameter.DiscardUnknown(m) +} + +var xxx_messageInfo_BodyParameter proto.InternalMessageInfo func (m *BodyParameter) GetDescription() string { if m != nil { @@ -374,18 +360,41 @@ func (m *BodyParameter) GetVendorExtension() []*NamedAny { // Contact information for the owners of the API. type Contact struct { // The identifying name of the contact person/organization. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The URL pointing to the contact information. - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` // The email address of the contact person/organization. - Email string `protobuf:"bytes,3,opt,name=email" json:"email,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Contact) Reset() { *m = Contact{} } -func (m *Contact) String() string { return proto.CompactTextString(m) } -func (*Contact) ProtoMessage() {} -func (*Contact) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (m *Contact) Reset() { *m = Contact{} } +func (m *Contact) String() string { return proto.CompactTextString(m) } +func (*Contact) ProtoMessage() {} +func (*Contact) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{5} +} + +func (m *Contact) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Contact.Unmarshal(m, b) +} +func (m *Contact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Contact.Marshal(b, m, deterministic) +} +func (m *Contact) XXX_Merge(src proto.Message) { + xxx_messageInfo_Contact.Merge(m, src) +} +func (m *Contact) XXX_Size() int { + return xxx_messageInfo_Contact.Size(m) +} +func (m *Contact) XXX_DiscardUnknown() { + xxx_messageInfo_Contact.DiscardUnknown(m) +} + +var xxx_messageInfo_Contact proto.InternalMessageInfo func (m *Contact) GetName() string { if m != nil { @@ -416,13 +425,36 @@ func (m *Contact) GetVendorExtension() []*NamedAny { } type Default struct { - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Default) Reset() { *m = Default{} } +func (m *Default) String() string { return proto.CompactTextString(m) } +func (*Default) ProtoMessage() {} +func (*Default) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{6} +} + +func (m *Default) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Default.Unmarshal(m, b) +} +func (m *Default) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Default.Marshal(b, m, deterministic) +} +func (m *Default) XXX_Merge(src proto.Message) { + xxx_messageInfo_Default.Merge(m, src) +} +func (m *Default) XXX_Size() int { + return xxx_messageInfo_Default.Size(m) +} +func (m *Default) XXX_DiscardUnknown() { + xxx_messageInfo_Default.DiscardUnknown(m) } -func (m *Default) Reset() { *m = Default{} } -func (m *Default) String() string { return proto.CompactTextString(m) } -func (*Default) ProtoMessage() {} -func (*Default) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +var xxx_messageInfo_Default proto.InternalMessageInfo func (m *Default) GetAdditionalProperties() []*NamedAny { if m != nil { @@ -433,13 +465,36 @@ func (m *Default) GetAdditionalProperties() []*NamedAny { // One or more JSON objects describing the schemas being consumed and produced by the API. type Definitions struct { - AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Definitions) Reset() { *m = Definitions{} } -func (m *Definitions) String() string { return proto.CompactTextString(m) } -func (*Definitions) ProtoMessage() {} -func (*Definitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (m *Definitions) Reset() { *m = Definitions{} } +func (m *Definitions) String() string { return proto.CompactTextString(m) } +func (*Definitions) ProtoMessage() {} +func (*Definitions) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{7} +} + +func (m *Definitions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Definitions.Unmarshal(m, b) +} +func (m *Definitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Definitions.Marshal(b, m, deterministic) +} +func (m *Definitions) XXX_Merge(src proto.Message) { + xxx_messageInfo_Definitions.Merge(m, src) +} +func (m *Definitions) XXX_Size() int { + return xxx_messageInfo_Definitions.Size(m) +} +func (m *Definitions) XXX_DiscardUnknown() { + xxx_messageInfo_Definitions.DiscardUnknown(m) +} + +var xxx_messageInfo_Definitions proto.InternalMessageInfo func (m *Definitions) GetAdditionalProperties() []*NamedSchema { if m != nil { @@ -450,33 +505,56 @@ func (m *Definitions) GetAdditionalProperties() []*NamedSchema { type Document struct { // The Swagger version of this document. - Swagger string `protobuf:"bytes,1,opt,name=swagger" json:"swagger,omitempty"` - Info *Info `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` + Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"` + Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` // The host (name or ip) of the API. Example: 'swagger.io' - Host string `protobuf:"bytes,3,opt,name=host" json:"host,omitempty"` + Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` // The base path to the API. Example: '/api'. - BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath" json:"base_path,omitempty"` + BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"` // The transfer protocol of the API. - Schemes []string `protobuf:"bytes,5,rep,name=schemes" json:"schemes,omitempty"` + Schemes []string `protobuf:"bytes,5,rep,name=schemes,proto3" json:"schemes,omitempty"` // A list of MIME types accepted by the API. - Consumes []string `protobuf:"bytes,6,rep,name=consumes" json:"consumes,omitempty"` + Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` // A list of MIME types the API can produce. - Produces []string `protobuf:"bytes,7,rep,name=produces" json:"produces,omitempty"` - Paths *Paths `protobuf:"bytes,8,opt,name=paths" json:"paths,omitempty"` - Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions" json:"definitions,omitempty"` - Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters" json:"parameters,omitempty"` - Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses" json:"responses,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` - SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions" json:"security_definitions,omitempty"` - Tags []*Tag `protobuf:"bytes,14,rep,name=tags" json:"tags,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Document) Reset() { *m = Document{} } -func (m *Document) String() string { return proto.CompactTextString(m) } -func (*Document) ProtoMessage() {} -func (*Document) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` + Paths *Paths `protobuf:"bytes,8,opt,name=paths,proto3" json:"paths,omitempty"` + Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions,proto3" json:"definitions,omitempty"` + Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters,proto3" json:"parameters,omitempty"` + Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses,proto3" json:"responses,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` + Tags []*Tag `protobuf:"bytes,14,rep,name=tags,proto3" json:"tags,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Document) Reset() { *m = Document{} } +func (m *Document) String() string { return proto.CompactTextString(m) } +func (*Document) ProtoMessage() {} +func (*Document) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{8} +} + +func (m *Document) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Document.Unmarshal(m, b) +} +func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Document.Marshal(b, m, deterministic) +} +func (m *Document) XXX_Merge(src proto.Message) { + xxx_messageInfo_Document.Merge(m, src) +} +func (m *Document) XXX_Size() int { + return xxx_messageInfo_Document.Size(m) +} +func (m *Document) XXX_DiscardUnknown() { + xxx_messageInfo_Document.DiscardUnknown(m) +} + +var xxx_messageInfo_Document proto.InternalMessageInfo func (m *Document) GetSwagger() string { if m != nil { @@ -591,13 +669,36 @@ func (m *Document) GetVendorExtension() []*NamedAny { } type Examples struct { - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Examples) Reset() { *m = Examples{} } +func (m *Examples) String() string { return proto.CompactTextString(m) } +func (*Examples) ProtoMessage() {} +func (*Examples) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{9} +} + +func (m *Examples) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Examples.Unmarshal(m, b) +} +func (m *Examples) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Examples.Marshal(b, m, deterministic) +} +func (m *Examples) XXX_Merge(src proto.Message) { + xxx_messageInfo_Examples.Merge(m, src) +} +func (m *Examples) XXX_Size() int { + return xxx_messageInfo_Examples.Size(m) +} +func (m *Examples) XXX_DiscardUnknown() { + xxx_messageInfo_Examples.DiscardUnknown(m) } -func (m *Examples) Reset() { *m = Examples{} } -func (m *Examples) String() string { return proto.CompactTextString(m) } -func (*Examples) ProtoMessage() {} -func (*Examples) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +var xxx_messageInfo_Examples proto.InternalMessageInfo func (m *Examples) GetAdditionalProperties() []*NamedAny { if m != nil { @@ -608,15 +709,38 @@ func (m *Examples) GetAdditionalProperties() []*NamedAny { // information about external documentation type ExternalDocs struct { - Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ExternalDocs) Reset() { *m = ExternalDocs{} } -func (m *ExternalDocs) String() string { return proto.CompactTextString(m) } -func (*ExternalDocs) ProtoMessage() {} -func (*ExternalDocs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (m *ExternalDocs) Reset() { *m = ExternalDocs{} } +func (m *ExternalDocs) String() string { return proto.CompactTextString(m) } +func (*ExternalDocs) ProtoMessage() {} +func (*ExternalDocs) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{10} +} + +func (m *ExternalDocs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExternalDocs.Unmarshal(m, b) +} +func (m *ExternalDocs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExternalDocs.Marshal(b, m, deterministic) +} +func (m *ExternalDocs) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalDocs.Merge(m, src) +} +func (m *ExternalDocs) XXX_Size() int { + return xxx_messageInfo_ExternalDocs.Size(m) +} +func (m *ExternalDocs) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalDocs.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalDocs proto.InternalMessageInfo func (m *ExternalDocs) GetDescription() string { if m != nil { @@ -641,22 +765,45 @@ func (m *ExternalDocs) GetVendorExtension() []*NamedAny { // A deterministic version of a JSON Schema object. type FileSchema struct { - Format string `protobuf:"bytes,1,opt,name=format" json:"format,omitempty"` - Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - Default *Any `protobuf:"bytes,4,opt,name=default" json:"default,omitempty"` - Required []string `protobuf:"bytes,5,rep,name=required" json:"required,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` - ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - Example *Any `protobuf:"bytes,9,opt,name=example" json:"example,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *FileSchema) Reset() { *m = FileSchema{} } -func (m *FileSchema) String() string { return proto.CompactTextString(m) } -func (*FileSchema) ProtoMessage() {} -func (*FileSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + Format string `protobuf:"bytes,1,opt,name=format,proto3" json:"format,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Default *Any `protobuf:"bytes,4,opt,name=default,proto3" json:"default,omitempty"` + Required []string `protobuf:"bytes,5,rep,name=required,proto3" json:"required,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileSchema) Reset() { *m = FileSchema{} } +func (m *FileSchema) String() string { return proto.CompactTextString(m) } +func (*FileSchema) ProtoMessage() {} +func (*FileSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{11} +} + +func (m *FileSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileSchema.Unmarshal(m, b) +} +func (m *FileSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileSchema.Marshal(b, m, deterministic) +} +func (m *FileSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileSchema.Merge(m, src) +} +func (m *FileSchema) XXX_Size() int { + return xxx_messageInfo_FileSchema.Size(m) +} +func (m *FileSchema) XXX_DiscardUnknown() { + xxx_messageInfo_FileSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_FileSchema proto.InternalMessageInfo func (m *FileSchema) GetFormat() string { if m != nil { @@ -730,39 +877,62 @@ func (m *FileSchema) GetVendorExtension() []*NamedAny { type FormDataParameterSubSchema struct { // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *FormDataParameterSubSchema) Reset() { *m = FormDataParameterSubSchema{} } -func (m *FormDataParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*FormDataParameterSubSchema) ProtoMessage() {} -func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FormDataParameterSubSchema) Reset() { *m = FormDataParameterSubSchema{} } +func (m *FormDataParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*FormDataParameterSubSchema) ProtoMessage() {} +func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{12} +} + +func (m *FormDataParameterSubSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FormDataParameterSubSchema.Unmarshal(m, b) +} +func (m *FormDataParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FormDataParameterSubSchema.Marshal(b, m, deterministic) +} +func (m *FormDataParameterSubSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_FormDataParameterSubSchema.Merge(m, src) +} +func (m *FormDataParameterSubSchema) XXX_Size() int { + return xxx_messageInfo_FormDataParameterSubSchema.Size(m) +} +func (m *FormDataParameterSubSchema) XXX_DiscardUnknown() { + xxx_messageInfo_FormDataParameterSubSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_FormDataParameterSubSchema proto.InternalMessageInfo func (m *FormDataParameterSubSchema) GetRequired() bool { if m != nil { @@ -926,31 +1096,54 @@ func (m *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { } type Header struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - Description string `protobuf:"bytes,18,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Header) Reset() { *m = Header{} } -func (m *Header) String() string { return proto.CompactTextString(m) } -func (*Header) ProtoMessage() {} -func (*Header) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + Description string `protobuf:"bytes,18,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{13} +} + +func (m *Header) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Header.Unmarshal(m, b) +} +func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Header.Marshal(b, m, deterministic) +} +func (m *Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_Header.Merge(m, src) +} +func (m *Header) XXX_Size() int { + return xxx_messageInfo_Header.Size(m) +} +func (m *Header) XXX_DiscardUnknown() { + xxx_messageInfo_Header.DiscardUnknown(m) +} + +var xxx_messageInfo_Header proto.InternalMessageInfo func (m *Header) GetType() string { if m != nil { @@ -1087,37 +1280,60 @@ func (m *Header) GetVendorExtension() []*NamedAny { type HeaderParameterSubSchema struct { // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *HeaderParameterSubSchema) Reset() { *m = HeaderParameterSubSchema{} } -func (m *HeaderParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*HeaderParameterSubSchema) ProtoMessage() {} -func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HeaderParameterSubSchema) Reset() { *m = HeaderParameterSubSchema{} } +func (m *HeaderParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*HeaderParameterSubSchema) ProtoMessage() {} +func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{14} +} + +func (m *HeaderParameterSubSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HeaderParameterSubSchema.Unmarshal(m, b) +} +func (m *HeaderParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HeaderParameterSubSchema.Marshal(b, m, deterministic) +} +func (m *HeaderParameterSubSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_HeaderParameterSubSchema.Merge(m, src) +} +func (m *HeaderParameterSubSchema) XXX_Size() int { + return xxx_messageInfo_HeaderParameterSubSchema.Size(m) +} +func (m *HeaderParameterSubSchema) XXX_DiscardUnknown() { + xxx_messageInfo_HeaderParameterSubSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_HeaderParameterSubSchema proto.InternalMessageInfo func (m *HeaderParameterSubSchema) GetRequired() bool { if m != nil { @@ -1274,13 +1490,36 @@ func (m *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { } type Headers struct { - AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Headers) Reset() { *m = Headers{} } +func (m *Headers) String() string { return proto.CompactTextString(m) } +func (*Headers) ProtoMessage() {} +func (*Headers) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{15} +} + +func (m *Headers) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Headers.Unmarshal(m, b) +} +func (m *Headers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Headers.Marshal(b, m, deterministic) +} +func (m *Headers) XXX_Merge(src proto.Message) { + xxx_messageInfo_Headers.Merge(m, src) +} +func (m *Headers) XXX_Size() int { + return xxx_messageInfo_Headers.Size(m) +} +func (m *Headers) XXX_DiscardUnknown() { + xxx_messageInfo_Headers.DiscardUnknown(m) } -func (m *Headers) Reset() { *m = Headers{} } -func (m *Headers) String() string { return proto.CompactTextString(m) } -func (*Headers) ProtoMessage() {} -func (*Headers) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +var xxx_messageInfo_Headers proto.InternalMessageInfo func (m *Headers) GetAdditionalProperties() []*NamedHeader { if m != nil { @@ -1292,22 +1531,45 @@ func (m *Headers) GetAdditionalProperties() []*NamedHeader { // General information about the API. type Info struct { // A unique and precise title of the API. - Title string `protobuf:"bytes,1,opt,name=title" json:"title,omitempty"` + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` // A semantic version number of the API. - Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` // The terms of service for the API. - TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService" json:"terms_of_service,omitempty"` - Contact *Contact `protobuf:"bytes,5,opt,name=contact" json:"contact,omitempty"` - License *License `protobuf:"bytes,6,opt,name=license" json:"license,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` + Contact *Contact `protobuf:"bytes,5,opt,name=contact,proto3" json:"contact,omitempty"` + License *License `protobuf:"bytes,6,opt,name=license,proto3" json:"license,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Info) Reset() { *m = Info{} } +func (m *Info) String() string { return proto.CompactTextString(m) } +func (*Info) ProtoMessage() {} +func (*Info) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{16} +} + +func (m *Info) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Info.Unmarshal(m, b) +} +func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Info.Marshal(b, m, deterministic) +} +func (m *Info) XXX_Merge(src proto.Message) { + xxx_messageInfo_Info.Merge(m, src) +} +func (m *Info) XXX_Size() int { + return xxx_messageInfo_Info.Size(m) +} +func (m *Info) XXX_DiscardUnknown() { + xxx_messageInfo_Info.DiscardUnknown(m) } -func (m *Info) Reset() { *m = Info{} } -func (m *Info) String() string { return proto.CompactTextString(m) } -func (*Info) ProtoMessage() {} -func (*Info) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +var xxx_messageInfo_Info proto.InternalMessageInfo func (m *Info) GetTitle() string { if m != nil { @@ -1359,13 +1621,36 @@ func (m *Info) GetVendorExtension() []*NamedAny { } type ItemsItem struct { - Schema []*Schema `protobuf:"bytes,1,rep,name=schema" json:"schema,omitempty"` + Schema []*Schema `protobuf:"bytes,1,rep,name=schema,proto3" json:"schema,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ItemsItem) Reset() { *m = ItemsItem{} } -func (m *ItemsItem) String() string { return proto.CompactTextString(m) } -func (*ItemsItem) ProtoMessage() {} -func (*ItemsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +func (m *ItemsItem) Reset() { *m = ItemsItem{} } +func (m *ItemsItem) String() string { return proto.CompactTextString(m) } +func (*ItemsItem) ProtoMessage() {} +func (*ItemsItem) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{17} +} + +func (m *ItemsItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ItemsItem.Unmarshal(m, b) +} +func (m *ItemsItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ItemsItem.Marshal(b, m, deterministic) +} +func (m *ItemsItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_ItemsItem.Merge(m, src) +} +func (m *ItemsItem) XXX_Size() int { + return xxx_messageInfo_ItemsItem.Size(m) +} +func (m *ItemsItem) XXX_DiscardUnknown() { + xxx_messageInfo_ItemsItem.DiscardUnknown(m) +} + +var xxx_messageInfo_ItemsItem proto.InternalMessageInfo func (m *ItemsItem) GetSchema() []*Schema { if m != nil { @@ -1375,14 +1660,37 @@ func (m *ItemsItem) GetSchema() []*Schema { } type JsonReference struct { - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *JsonReference) Reset() { *m = JsonReference{} } -func (m *JsonReference) String() string { return proto.CompactTextString(m) } -func (*JsonReference) ProtoMessage() {} -func (*JsonReference) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (m *JsonReference) Reset() { *m = JsonReference{} } +func (m *JsonReference) String() string { return proto.CompactTextString(m) } +func (*JsonReference) ProtoMessage() {} +func (*JsonReference) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{18} +} + +func (m *JsonReference) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_JsonReference.Unmarshal(m, b) +} +func (m *JsonReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_JsonReference.Marshal(b, m, deterministic) +} +func (m *JsonReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_JsonReference.Merge(m, src) +} +func (m *JsonReference) XXX_Size() int { + return xxx_messageInfo_JsonReference.Size(m) +} +func (m *JsonReference) XXX_DiscardUnknown() { + xxx_messageInfo_JsonReference.DiscardUnknown(m) +} + +var xxx_messageInfo_JsonReference proto.InternalMessageInfo func (m *JsonReference) GetXRef() string { if m != nil { @@ -1400,16 +1708,39 @@ func (m *JsonReference) GetDescription() string { type License struct { // The name of the license type. It's encouraged to use an OSI compatible license. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The URL pointing to the license. - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *License) Reset() { *m = License{} } +func (m *License) String() string { return proto.CompactTextString(m) } +func (*License) ProtoMessage() {} +func (*License) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{19} +} + +func (m *License) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_License.Unmarshal(m, b) +} +func (m *License) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_License.Marshal(b, m, deterministic) +} +func (m *License) XXX_Merge(src proto.Message) { + xxx_messageInfo_License.Merge(m, src) +} +func (m *License) XXX_Size() int { + return xxx_messageInfo_License.Size(m) +} +func (m *License) XXX_DiscardUnknown() { + xxx_messageInfo_License.DiscardUnknown(m) } -func (m *License) Reset() { *m = License{} } -func (m *License) String() string { return proto.CompactTextString(m) } -func (*License) ProtoMessage() {} -func (*License) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +var xxx_messageInfo_License proto.InternalMessageInfo func (m *License) GetName() string { if m != nil { @@ -1435,15 +1766,38 @@ func (m *License) GetVendorExtension() []*NamedAny { // Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. type NamedAny struct { // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *Any `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedAny) Reset() { *m = NamedAny{} } +func (m *NamedAny) String() string { return proto.CompactTextString(m) } +func (*NamedAny) ProtoMessage() {} +func (*NamedAny) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{20} } -func (m *NamedAny) Reset() { *m = NamedAny{} } -func (m *NamedAny) String() string { return proto.CompactTextString(m) } -func (*NamedAny) ProtoMessage() {} -func (*NamedAny) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (m *NamedAny) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedAny.Unmarshal(m, b) +} +func (m *NamedAny) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedAny.Marshal(b, m, deterministic) +} +func (m *NamedAny) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedAny.Merge(m, src) +} +func (m *NamedAny) XXX_Size() int { + return xxx_messageInfo_NamedAny.Size(m) +} +func (m *NamedAny) XXX_DiscardUnknown() { + xxx_messageInfo_NamedAny.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedAny proto.InternalMessageInfo func (m *NamedAny) GetName() string { if m != nil { @@ -1462,15 +1816,38 @@ func (m *NamedAny) GetValue() *Any { // Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. type NamedHeader struct { // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *Header `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Value *Header `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *NamedHeader) Reset() { *m = NamedHeader{} } -func (m *NamedHeader) String() string { return proto.CompactTextString(m) } -func (*NamedHeader) ProtoMessage() {} -func (*NamedHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } +func (m *NamedHeader) Reset() { *m = NamedHeader{} } +func (m *NamedHeader) String() string { return proto.CompactTextString(m) } +func (*NamedHeader) ProtoMessage() {} +func (*NamedHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{21} +} + +func (m *NamedHeader) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedHeader.Unmarshal(m, b) +} +func (m *NamedHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedHeader.Marshal(b, m, deterministic) +} +func (m *NamedHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedHeader.Merge(m, src) +} +func (m *NamedHeader) XXX_Size() int { + return xxx_messageInfo_NamedHeader.Size(m) +} +func (m *NamedHeader) XXX_DiscardUnknown() { + xxx_messageInfo_NamedHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedHeader proto.InternalMessageInfo func (m *NamedHeader) GetName() string { if m != nil { @@ -1489,15 +1866,38 @@ func (m *NamedHeader) GetValue() *Header { // Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. type NamedParameter struct { // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *Parameter `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Value *Parameter `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *NamedParameter) Reset() { *m = NamedParameter{} } -func (m *NamedParameter) String() string { return proto.CompactTextString(m) } -func (*NamedParameter) ProtoMessage() {} -func (*NamedParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } +func (m *NamedParameter) Reset() { *m = NamedParameter{} } +func (m *NamedParameter) String() string { return proto.CompactTextString(m) } +func (*NamedParameter) ProtoMessage() {} +func (*NamedParameter) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{22} +} + +func (m *NamedParameter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedParameter.Unmarshal(m, b) +} +func (m *NamedParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedParameter.Marshal(b, m, deterministic) +} +func (m *NamedParameter) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedParameter.Merge(m, src) +} +func (m *NamedParameter) XXX_Size() int { + return xxx_messageInfo_NamedParameter.Size(m) +} +func (m *NamedParameter) XXX_DiscardUnknown() { + xxx_messageInfo_NamedParameter.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedParameter proto.InternalMessageInfo func (m *NamedParameter) GetName() string { if m != nil { @@ -1516,15 +1916,38 @@ func (m *NamedParameter) GetValue() *Parameter { // Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. type NamedPathItem struct { // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *PathItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Value *PathItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *NamedPathItem) Reset() { *m = NamedPathItem{} } -func (m *NamedPathItem) String() string { return proto.CompactTextString(m) } -func (*NamedPathItem) ProtoMessage() {} -func (*NamedPathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } +func (m *NamedPathItem) Reset() { *m = NamedPathItem{} } +func (m *NamedPathItem) String() string { return proto.CompactTextString(m) } +func (*NamedPathItem) ProtoMessage() {} +func (*NamedPathItem) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{23} +} + +func (m *NamedPathItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedPathItem.Unmarshal(m, b) +} +func (m *NamedPathItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedPathItem.Marshal(b, m, deterministic) +} +func (m *NamedPathItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedPathItem.Merge(m, src) +} +func (m *NamedPathItem) XXX_Size() int { + return xxx_messageInfo_NamedPathItem.Size(m) +} +func (m *NamedPathItem) XXX_DiscardUnknown() { + xxx_messageInfo_NamedPathItem.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedPathItem proto.InternalMessageInfo func (m *NamedPathItem) GetName() string { if m != nil { @@ -1543,15 +1966,38 @@ func (m *NamedPathItem) GetValue() *PathItem { // Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. type NamedResponse struct { // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *Response `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Value *Response `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedResponse) Reset() { *m = NamedResponse{} } +func (m *NamedResponse) String() string { return proto.CompactTextString(m) } +func (*NamedResponse) ProtoMessage() {} +func (*NamedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{24} } -func (m *NamedResponse) Reset() { *m = NamedResponse{} } -func (m *NamedResponse) String() string { return proto.CompactTextString(m) } -func (*NamedResponse) ProtoMessage() {} -func (*NamedResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } +func (m *NamedResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedResponse.Unmarshal(m, b) +} +func (m *NamedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedResponse.Marshal(b, m, deterministic) +} +func (m *NamedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedResponse.Merge(m, src) +} +func (m *NamedResponse) XXX_Size() int { + return xxx_messageInfo_NamedResponse.Size(m) +} +func (m *NamedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NamedResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedResponse proto.InternalMessageInfo func (m *NamedResponse) GetName() string { if m != nil { @@ -1570,15 +2016,38 @@ func (m *NamedResponse) GetValue() *Response { // Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. type NamedResponseValue struct { // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *ResponseValue `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Value *ResponseValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *NamedResponseValue) Reset() { *m = NamedResponseValue{} } -func (m *NamedResponseValue) String() string { return proto.CompactTextString(m) } -func (*NamedResponseValue) ProtoMessage() {} -func (*NamedResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } +func (m *NamedResponseValue) Reset() { *m = NamedResponseValue{} } +func (m *NamedResponseValue) String() string { return proto.CompactTextString(m) } +func (*NamedResponseValue) ProtoMessage() {} +func (*NamedResponseValue) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{25} +} + +func (m *NamedResponseValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedResponseValue.Unmarshal(m, b) +} +func (m *NamedResponseValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedResponseValue.Marshal(b, m, deterministic) +} +func (m *NamedResponseValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedResponseValue.Merge(m, src) +} +func (m *NamedResponseValue) XXX_Size() int { + return xxx_messageInfo_NamedResponseValue.Size(m) +} +func (m *NamedResponseValue) XXX_DiscardUnknown() { + xxx_messageInfo_NamedResponseValue.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedResponseValue proto.InternalMessageInfo func (m *NamedResponseValue) GetName() string { if m != nil { @@ -1597,15 +2066,38 @@ func (m *NamedResponseValue) GetValue() *ResponseValue { // Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. type NamedSchema struct { // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *Schema `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Value *Schema `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedSchema) Reset() { *m = NamedSchema{} } +func (m *NamedSchema) String() string { return proto.CompactTextString(m) } +func (*NamedSchema) ProtoMessage() {} +func (*NamedSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{26} +} + +func (m *NamedSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedSchema.Unmarshal(m, b) +} +func (m *NamedSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedSchema.Marshal(b, m, deterministic) +} +func (m *NamedSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedSchema.Merge(m, src) +} +func (m *NamedSchema) XXX_Size() int { + return xxx_messageInfo_NamedSchema.Size(m) +} +func (m *NamedSchema) XXX_DiscardUnknown() { + xxx_messageInfo_NamedSchema.DiscardUnknown(m) } -func (m *NamedSchema) Reset() { *m = NamedSchema{} } -func (m *NamedSchema) String() string { return proto.CompactTextString(m) } -func (*NamedSchema) ProtoMessage() {} -func (*NamedSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } +var xxx_messageInfo_NamedSchema proto.InternalMessageInfo func (m *NamedSchema) GetName() string { if m != nil { @@ -1624,15 +2116,38 @@ func (m *NamedSchema) GetValue() *Schema { // Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. type NamedSecurityDefinitionsItem struct { // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *NamedSecurityDefinitionsItem) Reset() { *m = NamedSecurityDefinitionsItem{} } -func (m *NamedSecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } -func (*NamedSecurityDefinitionsItem) ProtoMessage() {} -func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } +func (m *NamedSecurityDefinitionsItem) Reset() { *m = NamedSecurityDefinitionsItem{} } +func (m *NamedSecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } +func (*NamedSecurityDefinitionsItem) ProtoMessage() {} +func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{27} +} + +func (m *NamedSecurityDefinitionsItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedSecurityDefinitionsItem.Unmarshal(m, b) +} +func (m *NamedSecurityDefinitionsItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedSecurityDefinitionsItem.Marshal(b, m, deterministic) +} +func (m *NamedSecurityDefinitionsItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedSecurityDefinitionsItem.Merge(m, src) +} +func (m *NamedSecurityDefinitionsItem) XXX_Size() int { + return xxx_messageInfo_NamedSecurityDefinitionsItem.Size(m) +} +func (m *NamedSecurityDefinitionsItem) XXX_DiscardUnknown() { + xxx_messageInfo_NamedSecurityDefinitionsItem.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedSecurityDefinitionsItem proto.InternalMessageInfo func (m *NamedSecurityDefinitionsItem) GetName() string { if m != nil { @@ -1651,15 +2166,38 @@ func (m *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { // Automatically-generated message used to represent maps of string as ordered (name,value) pairs. type NamedString struct { // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedString) Reset() { *m = NamedString{} } +func (m *NamedString) String() string { return proto.CompactTextString(m) } +func (*NamedString) ProtoMessage() {} +func (*NamedString) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{28} +} + +func (m *NamedString) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedString.Unmarshal(m, b) +} +func (m *NamedString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedString.Marshal(b, m, deterministic) +} +func (m *NamedString) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedString.Merge(m, src) +} +func (m *NamedString) XXX_Size() int { + return xxx_messageInfo_NamedString.Size(m) +} +func (m *NamedString) XXX_DiscardUnknown() { + xxx_messageInfo_NamedString.DiscardUnknown(m) } -func (m *NamedString) Reset() { *m = NamedString{} } -func (m *NamedString) String() string { return proto.CompactTextString(m) } -func (*NamedString) ProtoMessage() {} -func (*NamedString) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } +var xxx_messageInfo_NamedString proto.InternalMessageInfo func (m *NamedString) GetName() string { if m != nil { @@ -1678,15 +2216,38 @@ func (m *NamedString) GetValue() string { // Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. type NamedStringArray struct { // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *StringArray `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Value *StringArray `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedStringArray) Reset() { *m = NamedStringArray{} } +func (m *NamedStringArray) String() string { return proto.CompactTextString(m) } +func (*NamedStringArray) ProtoMessage() {} +func (*NamedStringArray) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{29} } -func (m *NamedStringArray) Reset() { *m = NamedStringArray{} } -func (m *NamedStringArray) String() string { return proto.CompactTextString(m) } -func (*NamedStringArray) ProtoMessage() {} -func (*NamedStringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } +func (m *NamedStringArray) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedStringArray.Unmarshal(m, b) +} +func (m *NamedStringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedStringArray.Marshal(b, m, deterministic) +} +func (m *NamedStringArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedStringArray.Merge(m, src) +} +func (m *NamedStringArray) XXX_Size() int { + return xxx_messageInfo_NamedStringArray.Size(m) +} +func (m *NamedStringArray) XXX_DiscardUnknown() { + xxx_messageInfo_NamedStringArray.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedStringArray proto.InternalMessageInfo func (m *NamedStringArray) GetName() string { if m != nil { @@ -1708,35 +2269,64 @@ type NonBodyParameter struct { // *NonBodyParameter_FormDataParameterSubSchema // *NonBodyParameter_QueryParameterSubSchema // *NonBodyParameter_PathParameterSubSchema - Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` + Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NonBodyParameter) Reset() { *m = NonBodyParameter{} } +func (m *NonBodyParameter) String() string { return proto.CompactTextString(m) } +func (*NonBodyParameter) ProtoMessage() {} +func (*NonBodyParameter) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{30} +} + +func (m *NonBodyParameter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NonBodyParameter.Unmarshal(m, b) +} +func (m *NonBodyParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NonBodyParameter.Marshal(b, m, deterministic) +} +func (m *NonBodyParameter) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonBodyParameter.Merge(m, src) +} +func (m *NonBodyParameter) XXX_Size() int { + return xxx_messageInfo_NonBodyParameter.Size(m) +} +func (m *NonBodyParameter) XXX_DiscardUnknown() { + xxx_messageInfo_NonBodyParameter.DiscardUnknown(m) } -func (m *NonBodyParameter) Reset() { *m = NonBodyParameter{} } -func (m *NonBodyParameter) String() string { return proto.CompactTextString(m) } -func (*NonBodyParameter) ProtoMessage() {} -func (*NonBodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } +var xxx_messageInfo_NonBodyParameter proto.InternalMessageInfo type isNonBodyParameter_Oneof interface { isNonBodyParameter_Oneof() } type NonBodyParameter_HeaderParameterSubSchema struct { - HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,oneof"` + HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,proto3,oneof"` } + type NonBodyParameter_FormDataParameterSubSchema struct { - FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,oneof"` + FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,proto3,oneof"` } + type NonBodyParameter_QueryParameterSubSchema struct { - QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,oneof"` + QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,proto3,oneof"` } + type NonBodyParameter_PathParameterSubSchema struct { - PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,oneof"` + PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,proto3,oneof"` } -func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} + func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} -func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} -func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { if m != nil { @@ -1773,9 +2363,9 @@ func (m *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*NonBodyParameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _NonBodyParameter_OneofMarshaler, _NonBodyParameter_OneofUnmarshaler, _NonBodyParameter_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*NonBodyParameter) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*NonBodyParameter_HeaderParameterSubSchema)(nil), (*NonBodyParameter_FormDataParameterSubSchema)(nil), (*NonBodyParameter_QueryParameterSubSchema)(nil), @@ -1783,122 +2373,43 @@ func (*NonBodyParameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buff } } -func _NonBodyParameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*NonBodyParameter) - // oneof - switch x := m.Oneof.(type) { - case *NonBodyParameter_HeaderParameterSubSchema: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.HeaderParameterSubSchema); err != nil { - return err - } - case *NonBodyParameter_FormDataParameterSubSchema: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.FormDataParameterSubSchema); err != nil { - return err - } - case *NonBodyParameter_QueryParameterSubSchema: - b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.QueryParameterSubSchema); err != nil { - return err - } - case *NonBodyParameter_PathParameterSubSchema: - b.EncodeVarint(4<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.PathParameterSubSchema); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("NonBodyParameter.Oneof has unexpected type %T", x) - } - return nil -} - -func _NonBodyParameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*NonBodyParameter) - switch tag { - case 1: // oneof.header_parameter_sub_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(HeaderParameterSubSchema) - err := b.DecodeMessage(msg) - m.Oneof = &NonBodyParameter_HeaderParameterSubSchema{msg} - return true, err - case 2: // oneof.form_data_parameter_sub_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(FormDataParameterSubSchema) - err := b.DecodeMessage(msg) - m.Oneof = &NonBodyParameter_FormDataParameterSubSchema{msg} - return true, err - case 3: // oneof.query_parameter_sub_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(QueryParameterSubSchema) - err := b.DecodeMessage(msg) - m.Oneof = &NonBodyParameter_QueryParameterSubSchema{msg} - return true, err - case 4: // oneof.path_parameter_sub_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(PathParameterSubSchema) - err := b.DecodeMessage(msg) - m.Oneof = &NonBodyParameter_PathParameterSubSchema{msg} - return true, err - default: - return false, nil - } -} - -func _NonBodyParameter_OneofSizer(msg proto.Message) (n int) { - m := msg.(*NonBodyParameter) - // oneof - switch x := m.Oneof.(type) { - case *NonBodyParameter_HeaderParameterSubSchema: - s := proto.Size(x.HeaderParameterSubSchema) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *NonBodyParameter_FormDataParameterSubSchema: - s := proto.Size(x.FormDataParameterSubSchema) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *NonBodyParameter_QueryParameterSubSchema: - s := proto.Size(x.QueryParameterSubSchema) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *NonBodyParameter_PathParameterSubSchema: - s := proto.Size(x.PathParameterSubSchema) - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n +type Oauth2AccessCodeSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -type Oauth2AccessCodeSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` - TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` - Description string `protobuf:"bytes,6,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +func (m *Oauth2AccessCodeSecurity) Reset() { *m = Oauth2AccessCodeSecurity{} } +func (m *Oauth2AccessCodeSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2AccessCodeSecurity) ProtoMessage() {} +func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{31} } -func (m *Oauth2AccessCodeSecurity) Reset() { *m = Oauth2AccessCodeSecurity{} } -func (m *Oauth2AccessCodeSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2AccessCodeSecurity) ProtoMessage() {} -func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } +func (m *Oauth2AccessCodeSecurity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Oauth2AccessCodeSecurity.Unmarshal(m, b) +} +func (m *Oauth2AccessCodeSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Oauth2AccessCodeSecurity.Marshal(b, m, deterministic) +} +func (m *Oauth2AccessCodeSecurity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Oauth2AccessCodeSecurity.Merge(m, src) +} +func (m *Oauth2AccessCodeSecurity) XXX_Size() int { + return xxx_messageInfo_Oauth2AccessCodeSecurity.Size(m) +} +func (m *Oauth2AccessCodeSecurity) XXX_DiscardUnknown() { + xxx_messageInfo_Oauth2AccessCodeSecurity.DiscardUnknown(m) +} + +var xxx_messageInfo_Oauth2AccessCodeSecurity proto.InternalMessageInfo func (m *Oauth2AccessCodeSecurity) GetType() string { if m != nil { @@ -1950,18 +2461,41 @@ func (m *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { } type Oauth2ApplicationSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Oauth2ApplicationSecurity) Reset() { *m = Oauth2ApplicationSecurity{} } -func (m *Oauth2ApplicationSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2ApplicationSecurity) ProtoMessage() {} -func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } +func (m *Oauth2ApplicationSecurity) Reset() { *m = Oauth2ApplicationSecurity{} } +func (m *Oauth2ApplicationSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2ApplicationSecurity) ProtoMessage() {} +func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{32} +} + +func (m *Oauth2ApplicationSecurity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Oauth2ApplicationSecurity.Unmarshal(m, b) +} +func (m *Oauth2ApplicationSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Oauth2ApplicationSecurity.Marshal(b, m, deterministic) +} +func (m *Oauth2ApplicationSecurity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Oauth2ApplicationSecurity.Merge(m, src) +} +func (m *Oauth2ApplicationSecurity) XXX_Size() int { + return xxx_messageInfo_Oauth2ApplicationSecurity.Size(m) +} +func (m *Oauth2ApplicationSecurity) XXX_DiscardUnknown() { + xxx_messageInfo_Oauth2ApplicationSecurity.DiscardUnknown(m) +} + +var xxx_messageInfo_Oauth2ApplicationSecurity proto.InternalMessageInfo func (m *Oauth2ApplicationSecurity) GetType() string { if m != nil { @@ -2006,18 +2540,41 @@ func (m *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { } type Oauth2ImplicitSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Oauth2ImplicitSecurity) Reset() { *m = Oauth2ImplicitSecurity{} } +func (m *Oauth2ImplicitSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2ImplicitSecurity) ProtoMessage() {} +func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{33} +} + +func (m *Oauth2ImplicitSecurity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Oauth2ImplicitSecurity.Unmarshal(m, b) +} +func (m *Oauth2ImplicitSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Oauth2ImplicitSecurity.Marshal(b, m, deterministic) +} +func (m *Oauth2ImplicitSecurity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Oauth2ImplicitSecurity.Merge(m, src) +} +func (m *Oauth2ImplicitSecurity) XXX_Size() int { + return xxx_messageInfo_Oauth2ImplicitSecurity.Size(m) +} +func (m *Oauth2ImplicitSecurity) XXX_DiscardUnknown() { + xxx_messageInfo_Oauth2ImplicitSecurity.DiscardUnknown(m) } -func (m *Oauth2ImplicitSecurity) Reset() { *m = Oauth2ImplicitSecurity{} } -func (m *Oauth2ImplicitSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2ImplicitSecurity) ProtoMessage() {} -func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } +var xxx_messageInfo_Oauth2ImplicitSecurity proto.InternalMessageInfo func (m *Oauth2ImplicitSecurity) GetType() string { if m != nil { @@ -2062,18 +2619,41 @@ func (m *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { } type Oauth2PasswordSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Oauth2PasswordSecurity) Reset() { *m = Oauth2PasswordSecurity{} } +func (m *Oauth2PasswordSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2PasswordSecurity) ProtoMessage() {} +func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{34} } -func (m *Oauth2PasswordSecurity) Reset() { *m = Oauth2PasswordSecurity{} } -func (m *Oauth2PasswordSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2PasswordSecurity) ProtoMessage() {} -func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } +func (m *Oauth2PasswordSecurity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Oauth2PasswordSecurity.Unmarshal(m, b) +} +func (m *Oauth2PasswordSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Oauth2PasswordSecurity.Marshal(b, m, deterministic) +} +func (m *Oauth2PasswordSecurity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Oauth2PasswordSecurity.Merge(m, src) +} +func (m *Oauth2PasswordSecurity) XXX_Size() int { + return xxx_messageInfo_Oauth2PasswordSecurity.Size(m) +} +func (m *Oauth2PasswordSecurity) XXX_DiscardUnknown() { + xxx_messageInfo_Oauth2PasswordSecurity.DiscardUnknown(m) +} + +var xxx_messageInfo_Oauth2PasswordSecurity proto.InternalMessageInfo func (m *Oauth2PasswordSecurity) GetType() string { if m != nil { @@ -2118,13 +2698,36 @@ func (m *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { } type Oauth2Scopes struct { - AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Oauth2Scopes) Reset() { *m = Oauth2Scopes{} } -func (m *Oauth2Scopes) String() string { return proto.CompactTextString(m) } -func (*Oauth2Scopes) ProtoMessage() {} -func (*Oauth2Scopes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } +func (m *Oauth2Scopes) Reset() { *m = Oauth2Scopes{} } +func (m *Oauth2Scopes) String() string { return proto.CompactTextString(m) } +func (*Oauth2Scopes) ProtoMessage() {} +func (*Oauth2Scopes) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{35} +} + +func (m *Oauth2Scopes) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Oauth2Scopes.Unmarshal(m, b) +} +func (m *Oauth2Scopes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Oauth2Scopes.Marshal(b, m, deterministic) +} +func (m *Oauth2Scopes) XXX_Merge(src proto.Message) { + xxx_messageInfo_Oauth2Scopes.Merge(m, src) +} +func (m *Oauth2Scopes) XXX_Size() int { + return xxx_messageInfo_Oauth2Scopes.Size(m) +} +func (m *Oauth2Scopes) XXX_DiscardUnknown() { + xxx_messageInfo_Oauth2Scopes.DiscardUnknown(m) +} + +var xxx_messageInfo_Oauth2Scopes proto.InternalMessageInfo func (m *Oauth2Scopes) GetAdditionalProperties() []*NamedString { if m != nil { @@ -2134,32 +2737,55 @@ func (m *Oauth2Scopes) GetAdditionalProperties() []*NamedString { } type Operation struct { - Tags []string `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"` + Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` // A brief summary of the operation. - Summary string `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` // A longer description of the operation, GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` // A unique identifier of the operation. - OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` + OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` // A list of MIME types the API can produce. - Produces []string `protobuf:"bytes,6,rep,name=produces" json:"produces,omitempty"` + Produces []string `protobuf:"bytes,6,rep,name=produces,proto3" json:"produces,omitempty"` // A list of MIME types the API can consume. - Consumes []string `protobuf:"bytes,7,rep,name=consumes" json:"consumes,omitempty"` + Consumes []string `protobuf:"bytes,7,rep,name=consumes,proto3" json:"consumes,omitempty"` // The parameters needed to send a valid API call. - Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters" json:"parameters,omitempty"` - Responses *Responses `protobuf:"bytes,9,opt,name=responses" json:"responses,omitempty"` + Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters,proto3" json:"parameters,omitempty"` + Responses *Responses `protobuf:"bytes,9,opt,name=responses,proto3" json:"responses,omitempty"` // The transfer protocol of the API. - Schemes []string `protobuf:"bytes,10,rep,name=schemes" json:"schemes,omitempty"` - Deprecated bool `protobuf:"varint,11,opt,name=deprecated" json:"deprecated,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Schemes []string `protobuf:"bytes,10,rep,name=schemes,proto3" json:"schemes,omitempty"` + Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{36} +} + +func (m *Operation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Operation.Unmarshal(m, b) +} +func (m *Operation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Operation.Marshal(b, m, deterministic) +} +func (m *Operation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Operation.Merge(m, src) +} +func (m *Operation) XXX_Size() int { + return xxx_messageInfo_Operation.Size(m) +} +func (m *Operation) XXX_DiscardUnknown() { + xxx_messageInfo_Operation.DiscardUnknown(m) } -func (m *Operation) Reset() { *m = Operation{} } -func (m *Operation) String() string { return proto.CompactTextString(m) } -func (*Operation) ProtoMessage() {} -func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } +var xxx_messageInfo_Operation proto.InternalMessageInfo func (m *Operation) GetTags() []string { if m != nil { @@ -2256,26 +2882,51 @@ type Parameter struct { // Types that are valid to be assigned to Oneof: // *Parameter_BodyParameter // *Parameter_NonBodyParameter - Oneof isParameter_Oneof `protobuf_oneof:"oneof"` + Oneof isParameter_Oneof `protobuf_oneof:"oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Parameter) Reset() { *m = Parameter{} } -func (m *Parameter) String() string { return proto.CompactTextString(m) } -func (*Parameter) ProtoMessage() {} -func (*Parameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } +func (m *Parameter) Reset() { *m = Parameter{} } +func (m *Parameter) String() string { return proto.CompactTextString(m) } +func (*Parameter) ProtoMessage() {} +func (*Parameter) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{37} +} + +func (m *Parameter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Parameter.Unmarshal(m, b) +} +func (m *Parameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Parameter.Marshal(b, m, deterministic) +} +func (m *Parameter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Parameter.Merge(m, src) +} +func (m *Parameter) XXX_Size() int { + return xxx_messageInfo_Parameter.Size(m) +} +func (m *Parameter) XXX_DiscardUnknown() { + xxx_messageInfo_Parameter.DiscardUnknown(m) +} + +var xxx_messageInfo_Parameter proto.InternalMessageInfo type isParameter_Oneof interface { isParameter_Oneof() } type Parameter_BodyParameter struct { - BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,oneof"` + BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,proto3,oneof"` } + type Parameter_NonBodyParameter struct { - NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,oneof"` + NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,proto3,oneof"` } -func (*Parameter_BodyParameter) isParameter_Oneof() {} +func (*Parameter_BodyParameter) isParameter_Oneof() {} + func (*Parameter_NonBodyParameter) isParameter_Oneof() {} func (m *Parameter) GetOneof() isParameter_Oneof { @@ -2299,89 +2950,46 @@ func (m *Parameter) GetNonBodyParameter() *NonBodyParameter { return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Parameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Parameter_OneofMarshaler, _Parameter_OneofUnmarshaler, _Parameter_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Parameter) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*Parameter_BodyParameter)(nil), (*Parameter_NonBodyParameter)(nil), } } -func _Parameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Parameter) - // oneof - switch x := m.Oneof.(type) { - case *Parameter_BodyParameter: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.BodyParameter); err != nil { - return err - } - case *Parameter_NonBodyParameter: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.NonBodyParameter); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("Parameter.Oneof has unexpected type %T", x) - } - return nil -} - -func _Parameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Parameter) - switch tag { - case 1: // oneof.body_parameter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(BodyParameter) - err := b.DecodeMessage(msg) - m.Oneof = &Parameter_BodyParameter{msg} - return true, err - case 2: // oneof.non_body_parameter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(NonBodyParameter) - err := b.DecodeMessage(msg) - m.Oneof = &Parameter_NonBodyParameter{msg} - return true, err - default: - return false, nil - } -} - -func _Parameter_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Parameter) - // oneof - switch x := m.Oneof.(type) { - case *Parameter_BodyParameter: - s := proto.Size(x.BodyParameter) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *Parameter_NonBodyParameter: - s := proto.Size(x.NonBodyParameter) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - // One or more JSON representations for parameters type ParameterDefinitions struct { - AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ParameterDefinitions) Reset() { *m = ParameterDefinitions{} } -func (m *ParameterDefinitions) String() string { return proto.CompactTextString(m) } -func (*ParameterDefinitions) ProtoMessage() {} -func (*ParameterDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } +func (m *ParameterDefinitions) Reset() { *m = ParameterDefinitions{} } +func (m *ParameterDefinitions) String() string { return proto.CompactTextString(m) } +func (*ParameterDefinitions) ProtoMessage() {} +func (*ParameterDefinitions) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{38} +} + +func (m *ParameterDefinitions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ParameterDefinitions.Unmarshal(m, b) +} +func (m *ParameterDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ParameterDefinitions.Marshal(b, m, deterministic) +} +func (m *ParameterDefinitions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParameterDefinitions.Merge(m, src) +} +func (m *ParameterDefinitions) XXX_Size() int { + return xxx_messageInfo_ParameterDefinitions.Size(m) +} +func (m *ParameterDefinitions) XXX_DiscardUnknown() { + xxx_messageInfo_ParameterDefinitions.DiscardUnknown(m) +} + +var xxx_messageInfo_ParameterDefinitions proto.InternalMessageInfo func (m *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { if m != nil { @@ -2394,26 +3002,51 @@ type ParametersItem struct { // Types that are valid to be assigned to Oneof: // *ParametersItem_Parameter // *ParametersItem_JsonReference - Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` + Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ParametersItem) Reset() { *m = ParametersItem{} } +func (m *ParametersItem) String() string { return proto.CompactTextString(m) } +func (*ParametersItem) ProtoMessage() {} +func (*ParametersItem) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{39} +} + +func (m *ParametersItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ParametersItem.Unmarshal(m, b) +} +func (m *ParametersItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ParametersItem.Marshal(b, m, deterministic) +} +func (m *ParametersItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParametersItem.Merge(m, src) +} +func (m *ParametersItem) XXX_Size() int { + return xxx_messageInfo_ParametersItem.Size(m) +} +func (m *ParametersItem) XXX_DiscardUnknown() { + xxx_messageInfo_ParametersItem.DiscardUnknown(m) } -func (m *ParametersItem) Reset() { *m = ParametersItem{} } -func (m *ParametersItem) String() string { return proto.CompactTextString(m) } -func (*ParametersItem) ProtoMessage() {} -func (*ParametersItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } +var xxx_messageInfo_ParametersItem proto.InternalMessageInfo type isParametersItem_Oneof interface { isParametersItem_Oneof() } type ParametersItem_Parameter struct { - Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,oneof"` + Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,proto3,oneof"` } + type ParametersItem_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` } -func (*ParametersItem_Parameter) isParametersItem_Oneof() {} +func (*ParametersItem_Parameter) isParametersItem_Oneof() {} + func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} func (m *ParametersItem) GetOneof() isParametersItem_Oneof { @@ -2437,98 +3070,55 @@ func (m *ParametersItem) GetJsonReference() *JsonReference { return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ParametersItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ParametersItem_OneofMarshaler, _ParametersItem_OneofUnmarshaler, _ParametersItem_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ParametersItem) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*ParametersItem_Parameter)(nil), (*ParametersItem_JsonReference)(nil), } } -func _ParametersItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ParametersItem) - // oneof - switch x := m.Oneof.(type) { - case *ParametersItem_Parameter: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Parameter); err != nil { - return err - } - case *ParametersItem_JsonReference: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.JsonReference); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ParametersItem.Oneof has unexpected type %T", x) - } - return nil -} - -func _ParametersItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ParametersItem) - switch tag { - case 1: // oneof.parameter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Parameter) - err := b.DecodeMessage(msg) - m.Oneof = &ParametersItem_Parameter{msg} - return true, err - case 2: // oneof.json_reference - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(JsonReference) - err := b.DecodeMessage(msg) - m.Oneof = &ParametersItem_JsonReference{msg} - return true, err - default: - return false, nil - } -} - -func _ParametersItem_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ParametersItem) - // oneof - switch x := m.Oneof.(type) { - case *ParametersItem_Parameter: - s := proto.Size(x.Parameter) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ParametersItem_JsonReference: - s := proto.Size(x.JsonReference) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - type PathItem struct { - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` - Get *Operation `protobuf:"bytes,2,opt,name=get" json:"get,omitempty"` - Put *Operation `protobuf:"bytes,3,opt,name=put" json:"put,omitempty"` - Post *Operation `protobuf:"bytes,4,opt,name=post" json:"post,omitempty"` - Delete *Operation `protobuf:"bytes,5,opt,name=delete" json:"delete,omitempty"` - Options *Operation `protobuf:"bytes,6,opt,name=options" json:"options,omitempty"` - Head *Operation `protobuf:"bytes,7,opt,name=head" json:"head,omitempty"` - Patch *Operation `protobuf:"bytes,8,opt,name=patch" json:"patch,omitempty"` + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Get *Operation `protobuf:"bytes,2,opt,name=get,proto3" json:"get,omitempty"` + Put *Operation `protobuf:"bytes,3,opt,name=put,proto3" json:"put,omitempty"` + Post *Operation `protobuf:"bytes,4,opt,name=post,proto3" json:"post,omitempty"` + Delete *Operation `protobuf:"bytes,5,opt,name=delete,proto3" json:"delete,omitempty"` + Options *Operation `protobuf:"bytes,6,opt,name=options,proto3" json:"options,omitempty"` + Head *Operation `protobuf:"bytes,7,opt,name=head,proto3" json:"head,omitempty"` + Patch *Operation `protobuf:"bytes,8,opt,name=patch,proto3" json:"patch,omitempty"` // The parameters needed to send a valid API call. - Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters" json:"parameters,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters,proto3" json:"parameters,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *PathItem) Reset() { *m = PathItem{} } -func (m *PathItem) String() string { return proto.CompactTextString(m) } -func (*PathItem) ProtoMessage() {} -func (*PathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } +func (m *PathItem) Reset() { *m = PathItem{} } +func (m *PathItem) String() string { return proto.CompactTextString(m) } +func (*PathItem) ProtoMessage() {} +func (*PathItem) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{40} +} + +func (m *PathItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PathItem.Unmarshal(m, b) +} +func (m *PathItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PathItem.Marshal(b, m, deterministic) +} +func (m *PathItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_PathItem.Merge(m, src) +} +func (m *PathItem) XXX_Size() int { + return xxx_messageInfo_PathItem.Size(m) +} +func (m *PathItem) XXX_DiscardUnknown() { + xxx_messageInfo_PathItem.DiscardUnknown(m) +} + +var xxx_messageInfo_PathItem proto.InternalMessageInfo func (m *PathItem) GetXRef() string { if m != nil { @@ -2602,37 +3192,60 @@ func (m *PathItem) GetVendorExtension() []*NamedAny { type PathParameterSubSchema struct { // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *PathParameterSubSchema) Reset() { *m = PathParameterSubSchema{} } -func (m *PathParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*PathParameterSubSchema) ProtoMessage() {} -func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PathParameterSubSchema) Reset() { *m = PathParameterSubSchema{} } +func (m *PathParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*PathParameterSubSchema) ProtoMessage() {} +func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{41} +} + +func (m *PathParameterSubSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PathParameterSubSchema.Unmarshal(m, b) +} +func (m *PathParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PathParameterSubSchema.Marshal(b, m, deterministic) +} +func (m *PathParameterSubSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_PathParameterSubSchema.Merge(m, src) +} +func (m *PathParameterSubSchema) XXX_Size() int { + return xxx_messageInfo_PathParameterSubSchema.Size(m) +} +func (m *PathParameterSubSchema) XXX_DiscardUnknown() { + xxx_messageInfo_PathParameterSubSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_PathParameterSubSchema proto.InternalMessageInfo func (m *PathParameterSubSchema) GetRequired() bool { if m != nil { @@ -2790,14 +3403,37 @@ func (m *PathParameterSubSchema) GetVendorExtension() []*NamedAny { // Relative paths to the individual endpoints. They must be relative to the 'basePath'. type Paths struct { - VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` - Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path" json:"path,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Paths) Reset() { *m = Paths{} } +func (m *Paths) String() string { return proto.CompactTextString(m) } +func (*Paths) ProtoMessage() {} +func (*Paths) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{42} +} + +func (m *Paths) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Paths.Unmarshal(m, b) +} +func (m *Paths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Paths.Marshal(b, m, deterministic) +} +func (m *Paths) XXX_Merge(src proto.Message) { + xxx_messageInfo_Paths.Merge(m, src) +} +func (m *Paths) XXX_Size() int { + return xxx_messageInfo_Paths.Size(m) +} +func (m *Paths) XXX_DiscardUnknown() { + xxx_messageInfo_Paths.DiscardUnknown(m) } -func (m *Paths) Reset() { *m = Paths{} } -func (m *Paths) String() string { return proto.CompactTextString(m) } -func (*Paths) ProtoMessage() {} -func (*Paths) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } +var xxx_messageInfo_Paths proto.InternalMessageInfo func (m *Paths) GetVendorExtension() []*NamedAny { if m != nil { @@ -2814,30 +3450,53 @@ func (m *Paths) GetPath() []*NamedPathItem { } type PrimitivesItems struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *PrimitivesItems) Reset() { *m = PrimitivesItems{} } -func (m *PrimitivesItems) String() string { return proto.CompactTextString(m) } -func (*PrimitivesItems) ProtoMessage() {} -func (*PrimitivesItems) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrimitivesItems) Reset() { *m = PrimitivesItems{} } +func (m *PrimitivesItems) String() string { return proto.CompactTextString(m) } +func (*PrimitivesItems) ProtoMessage() {} +func (*PrimitivesItems) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{43} +} + +func (m *PrimitivesItems) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrimitivesItems.Unmarshal(m, b) +} +func (m *PrimitivesItems) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrimitivesItems.Marshal(b, m, deterministic) +} +func (m *PrimitivesItems) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrimitivesItems.Merge(m, src) +} +func (m *PrimitivesItems) XXX_Size() int { + return xxx_messageInfo_PrimitivesItems.Size(m) +} +func (m *PrimitivesItems) XXX_DiscardUnknown() { + xxx_messageInfo_PrimitivesItems.DiscardUnknown(m) +} + +var xxx_messageInfo_PrimitivesItems proto.InternalMessageInfo func (m *PrimitivesItems) GetType() string { if m != nil { @@ -2966,13 +3625,36 @@ func (m *PrimitivesItems) GetVendorExtension() []*NamedAny { } type Properties struct { - AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Properties) Reset() { *m = Properties{} } +func (m *Properties) String() string { return proto.CompactTextString(m) } +func (*Properties) ProtoMessage() {} +func (*Properties) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{44} +} + +func (m *Properties) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Properties.Unmarshal(m, b) +} +func (m *Properties) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Properties.Marshal(b, m, deterministic) +} +func (m *Properties) XXX_Merge(src proto.Message) { + xxx_messageInfo_Properties.Merge(m, src) +} +func (m *Properties) XXX_Size() int { + return xxx_messageInfo_Properties.Size(m) +} +func (m *Properties) XXX_DiscardUnknown() { + xxx_messageInfo_Properties.DiscardUnknown(m) } -func (m *Properties) Reset() { *m = Properties{} } -func (m *Properties) String() string { return proto.CompactTextString(m) } -func (*Properties) ProtoMessage() {} -func (*Properties) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } +var xxx_messageInfo_Properties proto.InternalMessageInfo func (m *Properties) GetAdditionalProperties() []*NamedSchema { if m != nil { @@ -2983,39 +3665,62 @@ func (m *Properties) GetAdditionalProperties() []*NamedSchema { type QueryParameterSubSchema struct { // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *QueryParameterSubSchema) Reset() { *m = QueryParameterSubSchema{} } -func (m *QueryParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*QueryParameterSubSchema) ProtoMessage() {} -func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueryParameterSubSchema) Reset() { *m = QueryParameterSubSchema{} } +func (m *QueryParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*QueryParameterSubSchema) ProtoMessage() {} +func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{45} +} + +func (m *QueryParameterSubSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueryParameterSubSchema.Unmarshal(m, b) +} +func (m *QueryParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueryParameterSubSchema.Marshal(b, m, deterministic) +} +func (m *QueryParameterSubSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParameterSubSchema.Merge(m, src) +} +func (m *QueryParameterSubSchema) XXX_Size() int { + return xxx_messageInfo_QueryParameterSubSchema.Size(m) +} +func (m *QueryParameterSubSchema) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParameterSubSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParameterSubSchema proto.InternalMessageInfo func (m *QueryParameterSubSchema) GetRequired() bool { if m != nil { @@ -3179,17 +3884,40 @@ func (m *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { } type Response struct { - Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` - Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema" json:"schema,omitempty"` - Headers *Headers `protobuf:"bytes,3,opt,name=headers" json:"headers,omitempty"` - Examples *Examples `protobuf:"bytes,4,opt,name=examples" json:"examples,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` + Headers *Headers `protobuf:"bytes,3,opt,name=headers,proto3" json:"headers,omitempty"` + Examples *Examples `protobuf:"bytes,4,opt,name=examples,proto3" json:"examples,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{46} +} + +func (m *Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Response.Unmarshal(m, b) +} +func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Response.Marshal(b, m, deterministic) +} +func (m *Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Response.Merge(m, src) +} +func (m *Response) XXX_Size() int { + return xxx_messageInfo_Response.Size(m) +} +func (m *Response) XXX_DiscardUnknown() { + xxx_messageInfo_Response.DiscardUnknown(m) } -func (m *Response) Reset() { *m = Response{} } -func (m *Response) String() string { return proto.CompactTextString(m) } -func (*Response) ProtoMessage() {} -func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } +var xxx_messageInfo_Response proto.InternalMessageInfo func (m *Response) GetDescription() string { if m != nil { @@ -3228,13 +3956,36 @@ func (m *Response) GetVendorExtension() []*NamedAny { // One or more JSON representations for parameters type ResponseDefinitions struct { - AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ResponseDefinitions) Reset() { *m = ResponseDefinitions{} } -func (m *ResponseDefinitions) String() string { return proto.CompactTextString(m) } -func (*ResponseDefinitions) ProtoMessage() {} -func (*ResponseDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } +func (m *ResponseDefinitions) Reset() { *m = ResponseDefinitions{} } +func (m *ResponseDefinitions) String() string { return proto.CompactTextString(m) } +func (*ResponseDefinitions) ProtoMessage() {} +func (*ResponseDefinitions) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{47} +} + +func (m *ResponseDefinitions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResponseDefinitions.Unmarshal(m, b) +} +func (m *ResponseDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResponseDefinitions.Marshal(b, m, deterministic) +} +func (m *ResponseDefinitions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseDefinitions.Merge(m, src) +} +func (m *ResponseDefinitions) XXX_Size() int { + return xxx_messageInfo_ResponseDefinitions.Size(m) +} +func (m *ResponseDefinitions) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseDefinitions.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseDefinitions proto.InternalMessageInfo func (m *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { if m != nil { @@ -3247,26 +3998,51 @@ type ResponseValue struct { // Types that are valid to be assigned to Oneof: // *ResponseValue_Response // *ResponseValue_JsonReference - Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` + Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ResponseValue) Reset() { *m = ResponseValue{} } -func (m *ResponseValue) String() string { return proto.CompactTextString(m) } -func (*ResponseValue) ProtoMessage() {} -func (*ResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } +func (m *ResponseValue) Reset() { *m = ResponseValue{} } +func (m *ResponseValue) String() string { return proto.CompactTextString(m) } +func (*ResponseValue) ProtoMessage() {} +func (*ResponseValue) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{48} +} + +func (m *ResponseValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResponseValue.Unmarshal(m, b) +} +func (m *ResponseValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResponseValue.Marshal(b, m, deterministic) +} +func (m *ResponseValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseValue.Merge(m, src) +} +func (m *ResponseValue) XXX_Size() int { + return xxx_messageInfo_ResponseValue.Size(m) +} +func (m *ResponseValue) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseValue proto.InternalMessageInfo type isResponseValue_Oneof interface { isResponseValue_Oneof() } type ResponseValue_Response struct { - Response *Response `protobuf:"bytes,1,opt,name=response,oneof"` + Response *Response `protobuf:"bytes,1,opt,name=response,proto3,oneof"` } + type ResponseValue_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` } -func (*ResponseValue_Response) isResponseValue_Oneof() {} +func (*ResponseValue_Response) isResponseValue_Oneof() {} + func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} func (m *ResponseValue) GetOneof() isResponseValue_Oneof { @@ -3290,90 +4066,47 @@ func (m *ResponseValue) GetJsonReference() *JsonReference { return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ResponseValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ResponseValue_OneofMarshaler, _ResponseValue_OneofUnmarshaler, _ResponseValue_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ResponseValue) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*ResponseValue_Response)(nil), (*ResponseValue_JsonReference)(nil), } } -func _ResponseValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ResponseValue) - // oneof - switch x := m.Oneof.(type) { - case *ResponseValue_Response: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Response); err != nil { - return err - } - case *ResponseValue_JsonReference: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.JsonReference); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ResponseValue.Oneof has unexpected type %T", x) - } - return nil -} - -func _ResponseValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ResponseValue) - switch tag { - case 1: // oneof.response - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Response) - err := b.DecodeMessage(msg) - m.Oneof = &ResponseValue_Response{msg} - return true, err - case 2: // oneof.json_reference - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(JsonReference) - err := b.DecodeMessage(msg) - m.Oneof = &ResponseValue_JsonReference{msg} - return true, err - default: - return false, nil - } -} - -func _ResponseValue_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ResponseValue) - // oneof - switch x := m.Oneof.(type) { - case *ResponseValue_Response: - s := proto.Size(x.Response) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ResponseValue_JsonReference: - s := proto.Size(x.JsonReference) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - // Response objects names can either be any valid HTTP status code or 'default'. type Responses struct { - ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode" json:"response_code,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Responses) Reset() { *m = Responses{} } +func (m *Responses) String() string { return proto.CompactTextString(m) } +func (*Responses) ProtoMessage() {} +func (*Responses) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{49} +} + +func (m *Responses) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Responses.Unmarshal(m, b) +} +func (m *Responses) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Responses.Marshal(b, m, deterministic) +} +func (m *Responses) XXX_Merge(src proto.Message) { + xxx_messageInfo_Responses.Merge(m, src) +} +func (m *Responses) XXX_Size() int { + return xxx_messageInfo_Responses.Size(m) +} +func (m *Responses) XXX_DiscardUnknown() { + xxx_messageInfo_Responses.DiscardUnknown(m) } -func (m *Responses) Reset() { *m = Responses{} } -func (m *Responses) String() string { return proto.CompactTextString(m) } -func (*Responses) ProtoMessage() {} -func (*Responses) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } +var xxx_messageInfo_Responses proto.InternalMessageInfo func (m *Responses) GetResponseCode() []*NamedResponseValue { if m != nil { @@ -3391,43 +4124,66 @@ func (m *Responses) GetVendorExtension() []*NamedAny { // A deterministic version of a JSON Schema object. type Schema struct { - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` - Title string `protobuf:"bytes,3,opt,name=title" json:"title,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` - MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - Maximum float64 `protobuf:"fixed64,7,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,9,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,13,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties" json:"max_properties,omitempty"` - MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties" json:"min_properties,omitempty"` - Required []string `protobuf:"bytes,19,rep,name=required" json:"required,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` - AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` - Type *TypeItem `protobuf:"bytes,22,opt,name=type" json:"type,omitempty"` - Items *ItemsItem `protobuf:"bytes,23,opt,name=items" json:"items,omitempty"` - AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf" json:"all_of,omitempty"` - Properties *Properties `protobuf:"bytes,25,opt,name=properties" json:"properties,omitempty"` - Discriminator string `protobuf:"bytes,26,opt,name=discriminator" json:"discriminator,omitempty"` - ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` - Xml *Xml `protobuf:"bytes,28,opt,name=xml" json:"xml,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - Example *Any `protobuf:"bytes,30,opt,name=example" json:"example,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Schema) Reset() { *m = Schema{} } -func (m *Schema) String() string { return proto.CompactTextString(m) } -func (*Schema) ProtoMessage() {} -func (*Schema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + Maximum float64 `protobuf:"fixed64,7,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,9,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"` + MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"` + Required []string `protobuf:"bytes,19,rep,name=required,proto3" json:"required,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + Type *TypeItem `protobuf:"bytes,22,opt,name=type,proto3" json:"type,omitempty"` + Items *ItemsItem `protobuf:"bytes,23,opt,name=items,proto3" json:"items,omitempty"` + AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf,proto3" json:"all_of,omitempty"` + Properties *Properties `protobuf:"bytes,25,opt,name=properties,proto3" json:"properties,omitempty"` + Discriminator string `protobuf:"bytes,26,opt,name=discriminator,proto3" json:"discriminator,omitempty"` + ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + Xml *Xml `protobuf:"bytes,28,opt,name=xml,proto3" json:"xml,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,30,opt,name=example,proto3" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema) Reset() { *m = Schema{} } +func (m *Schema) String() string { return proto.CompactTextString(m) } +func (*Schema) ProtoMessage() {} +func (*Schema) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{50} +} + +func (m *Schema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema.Unmarshal(m, b) +} +func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema.Marshal(b, m, deterministic) +} +func (m *Schema) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema.Merge(m, src) +} +func (m *Schema) XXX_Size() int { + return xxx_messageInfo_Schema.Size(m) +} +func (m *Schema) XXX_DiscardUnknown() { + xxx_messageInfo_Schema.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema proto.InternalMessageInfo func (m *Schema) GetXRef() string { if m != nil { @@ -3650,26 +4406,51 @@ type SchemaItem struct { // Types that are valid to be assigned to Oneof: // *SchemaItem_Schema // *SchemaItem_FileSchema - Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` + Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *SchemaItem) Reset() { *m = SchemaItem{} } -func (m *SchemaItem) String() string { return proto.CompactTextString(m) } -func (*SchemaItem) ProtoMessage() {} -func (*SchemaItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } +func (m *SchemaItem) Reset() { *m = SchemaItem{} } +func (m *SchemaItem) String() string { return proto.CompactTextString(m) } +func (*SchemaItem) ProtoMessage() {} +func (*SchemaItem) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{51} +} + +func (m *SchemaItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SchemaItem.Unmarshal(m, b) +} +func (m *SchemaItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SchemaItem.Marshal(b, m, deterministic) +} +func (m *SchemaItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_SchemaItem.Merge(m, src) +} +func (m *SchemaItem) XXX_Size() int { + return xxx_messageInfo_SchemaItem.Size(m) +} +func (m *SchemaItem) XXX_DiscardUnknown() { + xxx_messageInfo_SchemaItem.DiscardUnknown(m) +} + +var xxx_messageInfo_SchemaItem proto.InternalMessageInfo type isSchemaItem_Oneof interface { isSchemaItem_Oneof() } type SchemaItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` + Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` } + type SchemaItem_FileSchema struct { - FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,oneof"` + FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,proto3,oneof"` } -func (*SchemaItem_Schema) isSchemaItem_Oneof() {} +func (*SchemaItem_Schema) isSchemaItem_Oneof() {} + func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { @@ -3693,88 +4474,45 @@ func (m *SchemaItem) GetFileSchema() *FileSchema { return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*SchemaItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _SchemaItem_OneofMarshaler, _SchemaItem_OneofUnmarshaler, _SchemaItem_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*SchemaItem) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*SchemaItem_Schema)(nil), (*SchemaItem_FileSchema)(nil), } } -func _SchemaItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*SchemaItem) - // oneof - switch x := m.Oneof.(type) { - case *SchemaItem_Schema: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Schema); err != nil { - return err - } - case *SchemaItem_FileSchema: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.FileSchema); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("SchemaItem.Oneof has unexpected type %T", x) - } - return nil -} - -func _SchemaItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*SchemaItem) - switch tag { - case 1: // oneof.schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Schema) - err := b.DecodeMessage(msg) - m.Oneof = &SchemaItem_Schema{msg} - return true, err - case 2: // oneof.file_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(FileSchema) - err := b.DecodeMessage(msg) - m.Oneof = &SchemaItem_FileSchema{msg} - return true, err - default: - return false, nil - } -} - -func _SchemaItem_OneofSizer(msg proto.Message) (n int) { - m := msg.(*SchemaItem) - // oneof - switch x := m.Oneof.(type) { - case *SchemaItem_Schema: - s := proto.Size(x.Schema) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SchemaItem_FileSchema: - s := proto.Size(x.FileSchema) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n +type SecurityDefinitions struct { + AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -type SecurityDefinitions struct { - AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +func (m *SecurityDefinitions) Reset() { *m = SecurityDefinitions{} } +func (m *SecurityDefinitions) String() string { return proto.CompactTextString(m) } +func (*SecurityDefinitions) ProtoMessage() {} +func (*SecurityDefinitions) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{52} } -func (m *SecurityDefinitions) Reset() { *m = SecurityDefinitions{} } -func (m *SecurityDefinitions) String() string { return proto.CompactTextString(m) } -func (*SecurityDefinitions) ProtoMessage() {} -func (*SecurityDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } +func (m *SecurityDefinitions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SecurityDefinitions.Unmarshal(m, b) +} +func (m *SecurityDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SecurityDefinitions.Marshal(b, m, deterministic) +} +func (m *SecurityDefinitions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityDefinitions.Merge(m, src) +} +func (m *SecurityDefinitions) XXX_Size() int { + return xxx_messageInfo_SecurityDefinitions.Size(m) +} +func (m *SecurityDefinitions) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityDefinitions.DiscardUnknown(m) +} + +var xxx_messageInfo_SecurityDefinitions proto.InternalMessageInfo func (m *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { if m != nil { @@ -3791,43 +4529,76 @@ type SecurityDefinitionsItem struct { // *SecurityDefinitionsItem_Oauth2PasswordSecurity // *SecurityDefinitionsItem_Oauth2ApplicationSecurity // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity - Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` + Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SecurityDefinitionsItem) Reset() { *m = SecurityDefinitionsItem{} } +func (m *SecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } +func (*SecurityDefinitionsItem) ProtoMessage() {} +func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{53} } -func (m *SecurityDefinitionsItem) Reset() { *m = SecurityDefinitionsItem{} } -func (m *SecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } -func (*SecurityDefinitionsItem) ProtoMessage() {} -func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } +func (m *SecurityDefinitionsItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SecurityDefinitionsItem.Unmarshal(m, b) +} +func (m *SecurityDefinitionsItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SecurityDefinitionsItem.Marshal(b, m, deterministic) +} +func (m *SecurityDefinitionsItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityDefinitionsItem.Merge(m, src) +} +func (m *SecurityDefinitionsItem) XXX_Size() int { + return xxx_messageInfo_SecurityDefinitionsItem.Size(m) +} +func (m *SecurityDefinitionsItem) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityDefinitionsItem.DiscardUnknown(m) +} + +var xxx_messageInfo_SecurityDefinitionsItem proto.InternalMessageInfo type isSecurityDefinitionsItem_Oneof interface { isSecurityDefinitionsItem_Oneof() } type SecurityDefinitionsItem_BasicAuthenticationSecurity struct { - BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,oneof"` + BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,proto3,oneof"` } + type SecurityDefinitionsItem_ApiKeySecurity struct { - ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,oneof"` + ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,proto3,oneof"` } + type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct { - Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,oneof"` + Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,proto3,oneof"` } + type SecurityDefinitionsItem_Oauth2PasswordSecurity struct { - Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,oneof"` + Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,proto3,oneof"` } + type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct { - Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,oneof"` + Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,proto3,oneof"` } + type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct { - Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,oneof"` + Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,proto3,oneof"` } func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { if m != nil { @@ -3878,9 +4649,9 @@ func (m *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCod return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*SecurityDefinitionsItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _SecurityDefinitionsItem_OneofMarshaler, _SecurityDefinitionsItem_OneofUnmarshaler, _SecurityDefinitionsItem_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*SecurityDefinitionsItem) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), (*SecurityDefinitionsItem_ApiKeySecurity)(nil), (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), @@ -3890,152 +4661,37 @@ func (*SecurityDefinitionsItem) XXX_OneofFuncs() (func(msg proto.Message, b *pro } } -func _SecurityDefinitionsItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*SecurityDefinitionsItem) - // oneof - switch x := m.Oneof.(type) { - case *SecurityDefinitionsItem_BasicAuthenticationSecurity: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.BasicAuthenticationSecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_ApiKeySecurity: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ApiKeySecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: - b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Oauth2ImplicitSecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_Oauth2PasswordSecurity: - b.EncodeVarint(4<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Oauth2PasswordSecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: - b.EncodeVarint(5<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Oauth2ApplicationSecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: - b.EncodeVarint(6<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Oauth2AccessCodeSecurity); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("SecurityDefinitionsItem.Oneof has unexpected type %T", x) - } - return nil -} - -func _SecurityDefinitionsItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*SecurityDefinitionsItem) - switch tag { - case 1: // oneof.basic_authentication_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(BasicAuthenticationSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{msg} - return true, err - case 2: // oneof.api_key_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ApiKeySecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{msg} - return true, err - case 3: // oneof.oauth2_implicit_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Oauth2ImplicitSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{msg} - return true, err - case 4: // oneof.oauth2_password_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Oauth2PasswordSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{msg} - return true, err - case 5: // oneof.oauth2_application_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Oauth2ApplicationSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{msg} - return true, err - case 6: // oneof.oauth2_access_code_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Oauth2AccessCodeSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{msg} - return true, err - default: - return false, nil - } -} - -func _SecurityDefinitionsItem_OneofSizer(msg proto.Message) (n int) { - m := msg.(*SecurityDefinitionsItem) - // oneof - switch x := m.Oneof.(type) { - case *SecurityDefinitionsItem_BasicAuthenticationSecurity: - s := proto.Size(x.BasicAuthenticationSecurity) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_ApiKeySecurity: - s := proto.Size(x.ApiKeySecurity) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: - s := proto.Size(x.Oauth2ImplicitSecurity) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_Oauth2PasswordSecurity: - s := proto.Size(x.Oauth2PasswordSecurity) - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: - s := proto.Size(x.Oauth2ApplicationSecurity) - n += proto.SizeVarint(5<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: - s := proto.Size(x.Oauth2AccessCodeSecurity) - n += proto.SizeVarint(6<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n +type SecurityRequirement struct { + AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -type SecurityRequirement struct { - AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +func (m *SecurityRequirement) Reset() { *m = SecurityRequirement{} } +func (m *SecurityRequirement) String() string { return proto.CompactTextString(m) } +func (*SecurityRequirement) ProtoMessage() {} +func (*SecurityRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{54} } -func (m *SecurityRequirement) Reset() { *m = SecurityRequirement{} } -func (m *SecurityRequirement) String() string { return proto.CompactTextString(m) } -func (*SecurityRequirement) ProtoMessage() {} -func (*SecurityRequirement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } +func (m *SecurityRequirement) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SecurityRequirement.Unmarshal(m, b) +} +func (m *SecurityRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SecurityRequirement.Marshal(b, m, deterministic) +} +func (m *SecurityRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityRequirement.Merge(m, src) +} +func (m *SecurityRequirement) XXX_Size() int { + return xxx_messageInfo_SecurityRequirement.Size(m) +} +func (m *SecurityRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_SecurityRequirement proto.InternalMessageInfo func (m *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { if m != nil { @@ -4045,13 +4701,36 @@ func (m *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { } type StringArray struct { - Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` + Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringArray) Reset() { *m = StringArray{} } +func (m *StringArray) String() string { return proto.CompactTextString(m) } +func (*StringArray) ProtoMessage() {} +func (*StringArray) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{55} +} + +func (m *StringArray) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StringArray.Unmarshal(m, b) +} +func (m *StringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StringArray.Marshal(b, m, deterministic) +} +func (m *StringArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringArray.Merge(m, src) +} +func (m *StringArray) XXX_Size() int { + return xxx_messageInfo_StringArray.Size(m) +} +func (m *StringArray) XXX_DiscardUnknown() { + xxx_messageInfo_StringArray.DiscardUnknown(m) } -func (m *StringArray) Reset() { *m = StringArray{} } -func (m *StringArray) String() string { return proto.CompactTextString(m) } -func (*StringArray) ProtoMessage() {} -func (*StringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } +var xxx_messageInfo_StringArray proto.InternalMessageInfo func (m *StringArray) GetValue() []string { if m != nil { @@ -4061,16 +4740,39 @@ func (m *StringArray) GetValue() []string { } type Tag struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Tag) Reset() { *m = Tag{} } -func (m *Tag) String() string { return proto.CompactTextString(m) } -func (*Tag) ProtoMessage() {} -func (*Tag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } +func (m *Tag) Reset() { *m = Tag{} } +func (m *Tag) String() string { return proto.CompactTextString(m) } +func (*Tag) ProtoMessage() {} +func (*Tag) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{56} +} + +func (m *Tag) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Tag.Unmarshal(m, b) +} +func (m *Tag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Tag.Marshal(b, m, deterministic) +} +func (m *Tag) XXX_Merge(src proto.Message) { + xxx_messageInfo_Tag.Merge(m, src) +} +func (m *Tag) XXX_Size() int { + return xxx_messageInfo_Tag.Size(m) +} +func (m *Tag) XXX_DiscardUnknown() { + xxx_messageInfo_Tag.DiscardUnknown(m) +} + +var xxx_messageInfo_Tag proto.InternalMessageInfo func (m *Tag) GetName() string { if m != nil { @@ -4101,13 +4803,36 @@ func (m *Tag) GetVendorExtension() []*NamedAny { } type TypeItem struct { - Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` + Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TypeItem) Reset() { *m = TypeItem{} } +func (m *TypeItem) String() string { return proto.CompactTextString(m) } +func (*TypeItem) ProtoMessage() {} +func (*TypeItem) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{57} +} + +func (m *TypeItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TypeItem.Unmarshal(m, b) +} +func (m *TypeItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TypeItem.Marshal(b, m, deterministic) +} +func (m *TypeItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeItem.Merge(m, src) +} +func (m *TypeItem) XXX_Size() int { + return xxx_messageInfo_TypeItem.Size(m) +} +func (m *TypeItem) XXX_DiscardUnknown() { + xxx_messageInfo_TypeItem.DiscardUnknown(m) } -func (m *TypeItem) Reset() { *m = TypeItem{} } -func (m *TypeItem) String() string { return proto.CompactTextString(m) } -func (*TypeItem) ProtoMessage() {} -func (*TypeItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } +var xxx_messageInfo_TypeItem proto.InternalMessageInfo func (m *TypeItem) GetValue() []string { if m != nil { @@ -4118,13 +4843,36 @@ func (m *TypeItem) GetValue() []string { // Any property starting with x- is valid. type VendorExtension struct { - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VendorExtension) Reset() { *m = VendorExtension{} } +func (m *VendorExtension) String() string { return proto.CompactTextString(m) } +func (*VendorExtension) ProtoMessage() {} +func (*VendorExtension) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{58} } -func (m *VendorExtension) Reset() { *m = VendorExtension{} } -func (m *VendorExtension) String() string { return proto.CompactTextString(m) } -func (*VendorExtension) ProtoMessage() {} -func (*VendorExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } +func (m *VendorExtension) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VendorExtension.Unmarshal(m, b) +} +func (m *VendorExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VendorExtension.Marshal(b, m, deterministic) +} +func (m *VendorExtension) XXX_Merge(src proto.Message) { + xxx_messageInfo_VendorExtension.Merge(m, src) +} +func (m *VendorExtension) XXX_Size() int { + return xxx_messageInfo_VendorExtension.Size(m) +} +func (m *VendorExtension) XXX_DiscardUnknown() { + xxx_messageInfo_VendorExtension.DiscardUnknown(m) +} + +var xxx_messageInfo_VendorExtension proto.InternalMessageInfo func (m *VendorExtension) GetAdditionalProperties() []*NamedAny { if m != nil { @@ -4134,18 +4882,41 @@ func (m *VendorExtension) GetAdditionalProperties() []*NamedAny { } type Xml struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"` - Prefix string `protobuf:"bytes,3,opt,name=prefix" json:"prefix,omitempty"` - Attribute bool `protobuf:"varint,4,opt,name=attribute" json:"attribute,omitempty"` - Wrapped bool `protobuf:"varint,5,opt,name=wrapped" json:"wrapped,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` + Attribute bool `protobuf:"varint,4,opt,name=attribute,proto3" json:"attribute,omitempty"` + Wrapped bool `protobuf:"varint,5,opt,name=wrapped,proto3" json:"wrapped,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Xml) Reset() { *m = Xml{} } +func (m *Xml) String() string { return proto.CompactTextString(m) } +func (*Xml) ProtoMessage() {} +func (*Xml) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{59} +} + +func (m *Xml) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Xml.Unmarshal(m, b) +} +func (m *Xml) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Xml.Marshal(b, m, deterministic) +} +func (m *Xml) XXX_Merge(src proto.Message) { + xxx_messageInfo_Xml.Merge(m, src) +} +func (m *Xml) XXX_Size() int { + return xxx_messageInfo_Xml.Size(m) +} +func (m *Xml) XXX_DiscardUnknown() { + xxx_messageInfo_Xml.DiscardUnknown(m) } -func (m *Xml) Reset() { *m = Xml{} } -func (m *Xml) String() string { return proto.CompactTextString(m) } -func (*Xml) ProtoMessage() {} -func (*Xml) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } +var xxx_messageInfo_Xml proto.InternalMessageInfo func (m *Xml) GetName() string { if m != nil { @@ -4252,9 +5023,9 @@ func init() { proto.RegisterType((*Xml)(nil), "openapi.v2.Xml") } -func init() { proto.RegisterFile("OpenAPIv2/OpenAPIv2.proto", fileDescriptor0) } +func init() { proto.RegisterFile("OpenAPIv2/OpenAPIv2.proto", fileDescriptor_336adc04ae589d92) } -var fileDescriptor0 = []byte{ +var fileDescriptor_336adc04ae589d92 = []byte{ // 3129 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57, 0xd5, 0xf3, 0x7e, 0x1c, 0x69, 0x46, 0xa3, 0x96, 0x2c, 0xb7, 0x24, 0xc7, 0x71, 0xe4, 0x3c, 0x6c, diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go index af10e0d1b..25affd063 100644 --- a/vendor/github.com/googleapis/gnostic/compiler/reader.go +++ b/vendor/github.com/googleapis/gnostic/compiler/reader.go @@ -71,6 +71,17 @@ func RemoveFromInfoCache(filename string) { delete(infoCache, filename) } +func GetInfoCache() map[string]interface{} { + if infoCache == nil { + initializeInfoCache() + } + return infoCache +} + +func ClearInfoCache() { + infoCache = make(map[string]interface{}) +} + // FetchFile gets a specified file from the local filesystem or a remote location. func FetchFile(fileurl string) ([]byte, error) { var bytes []byte @@ -168,7 +179,11 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) { parts := strings.Split(ref, "#") var filename string if parts[0] != "" { - filename = basedir + parts[0] + filename = parts[0] + if _, err := url.ParseRequestURI(parts[0]); err != nil { + // It is not an URL, so the file is local + filename = basedir + parts[0] + } } else { filename = basefile } diff --git a/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh b/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh deleted file mode 100644 index 68d02a02a..000000000 --- a/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh +++ /dev/null @@ -1,5 +0,0 @@ -go get github.com/golang/protobuf/protoc-gen-go - -protoc \ ---go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. *.proto - diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go index e6927896f..432dc06e6 100644 --- a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go @@ -1,12 +1,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: extension.proto +// source: extensions/extension.proto package openapiextension_v1 -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import any "github.com/golang/protobuf/ptypes/any" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -17,7 +19,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // The version number of OpenAPI compiler. type Version struct { @@ -36,16 +38,17 @@ func (m *Version) Reset() { *m = Version{} } func (m *Version) String() string { return proto.CompactTextString(m) } func (*Version) ProtoMessage() {} func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_extension_d25f09c742c58c90, []int{0} + return fileDescriptor_661e47e790f76671, []int{0} } + func (m *Version) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Version.Unmarshal(m, b) } func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Version.Marshal(b, m, deterministic) } -func (dst *Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_Version.Merge(dst, src) +func (m *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(m, src) } func (m *Version) XXX_Size() int { return xxx_messageInfo_Version.Size(m) @@ -100,16 +103,17 @@ func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) } func (*ExtensionHandlerRequest) ProtoMessage() {} func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_extension_d25f09c742c58c90, []int{1} + return fileDescriptor_661e47e790f76671, []int{1} } + func (m *ExtensionHandlerRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExtensionHandlerRequest.Unmarshal(m, b) } func (m *ExtensionHandlerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ExtensionHandlerRequest.Marshal(b, m, deterministic) } -func (dst *ExtensionHandlerRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtensionHandlerRequest.Merge(dst, src) +func (m *ExtensionHandlerRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionHandlerRequest.Merge(m, src) } func (m *ExtensionHandlerRequest) XXX_Size() int { return xxx_messageInfo_ExtensionHandlerRequest.Size(m) @@ -159,16 +163,17 @@ func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerRespon func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) } func (*ExtensionHandlerResponse) ProtoMessage() {} func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_extension_d25f09c742c58c90, []int{2} + return fileDescriptor_661e47e790f76671, []int{2} } + func (m *ExtensionHandlerResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExtensionHandlerResponse.Unmarshal(m, b) } func (m *ExtensionHandlerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ExtensionHandlerResponse.Marshal(b, m, deterministic) } -func (dst *ExtensionHandlerResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtensionHandlerResponse.Merge(dst, src) +func (m *ExtensionHandlerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionHandlerResponse.Merge(m, src) } func (m *ExtensionHandlerResponse) XXX_Size() int { return xxx_messageInfo_ExtensionHandlerResponse.Size(m) @@ -216,16 +221,17 @@ func (m *Wrapper) Reset() { *m = Wrapper{} } func (m *Wrapper) String() string { return proto.CompactTextString(m) } func (*Wrapper) ProtoMessage() {} func (*Wrapper) Descriptor() ([]byte, []int) { - return fileDescriptor_extension_d25f09c742c58c90, []int{3} + return fileDescriptor_661e47e790f76671, []int{3} } + func (m *Wrapper) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Wrapper.Unmarshal(m, b) } func (m *Wrapper) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Wrapper.Marshal(b, m, deterministic) } -func (dst *Wrapper) XXX_Merge(src proto.Message) { - xxx_messageInfo_Wrapper.Merge(dst, src) +func (m *Wrapper) XXX_Merge(src proto.Message) { + xxx_messageInfo_Wrapper.Merge(m, src) } func (m *Wrapper) XXX_Size() int { return xxx_messageInfo_Wrapper.Size(m) @@ -264,31 +270,31 @@ func init() { proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper") } -func init() { proto.RegisterFile("extension.proto", fileDescriptor_extension_d25f09c742c58c90) } - -var fileDescriptor_extension_d25f09c742c58c90 = []byte{ - // 357 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xc3, 0x40, - 0x18, 0x84, 0x49, 0xbf, 0x62, 0x56, 0x6c, 0x65, 0x2d, 0x1a, 0xc5, 0x43, 0x09, 0x08, 0x45, 0x64, - 0x4b, 0x15, 0xbc, 0xb7, 0x50, 0xd4, 0x8b, 0x2d, 0x7b, 0xa8, 0x37, 0xcb, 0x36, 0x7d, 0x9b, 0x46, - 0x92, 0xdd, 0x75, 0xf3, 0x61, 0xfb, 0x57, 0x3c, 0xfa, 0x4b, 0x25, 0xbb, 0x49, 0x3d, 0xa8, 0xb7, - 0xcc, 0xc3, 0x24, 0xef, 0xcc, 0x04, 0x75, 0x60, 0x9b, 0x02, 0x4f, 0x42, 0xc1, 0x89, 0x54, 0x22, - 0x15, 0xf8, 0x44, 0x48, 0xe0, 0x4c, 0x86, 0x3f, 0x3c, 0x1f, 0x5e, 0x9c, 0x07, 0x42, 0x04, 0x11, - 0x0c, 0xb4, 0x65, 0x99, 0xad, 0x07, 0x8c, 0xef, 0x8c, 0xdf, 0xf3, 0x91, 0x3d, 0x07, 0x55, 0x18, - 0x71, 0x17, 0x35, 0x63, 0xf6, 0x26, 0x94, 0x6b, 0xf5, 0xac, 0x7e, 0x93, 0x1a, 0xa1, 0x69, 0xc8, - 0x85, 0x72, 0x6b, 0x25, 0x2d, 0x44, 0x41, 0x25, 0x4b, 0xfd, 0x8d, 0x5b, 0x37, 0x54, 0x0b, 0x7c, - 0x8a, 0x5a, 0x49, 0xb6, 0x5e, 0x87, 0x5b, 0xb7, 0xd1, 0xb3, 0xfa, 0x0e, 0x2d, 0x95, 0xf7, 0x69, - 0xa1, 0xb3, 0x49, 0x15, 0xe8, 0x91, 0xf1, 0x55, 0x04, 0x8a, 0xc2, 0x7b, 0x06, 0x49, 0x8a, 0xef, - 0x91, 0xfd, 0xa1, 0x98, 0x94, 0x60, 0xee, 0x1e, 0xde, 0x5e, 0x92, 0x3f, 0x2a, 0x90, 0x17, 0xe3, - 0xa1, 0x95, 0x19, 0x3f, 0xa0, 0x63, 0x5f, 0xc4, 0x32, 0x8c, 0x40, 0x2d, 0x72, 0xd3, 0x40, 0x87, - 0xf9, 0xef, 0x03, 0x65, 0x4b, 0xda, 0xa9, 0xde, 0x2a, 0x81, 0x97, 0x23, 0xf7, 0x77, 0xb6, 0x44, - 0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd1, 0x68, 0xa5, 0xc3, 0x1d, 0xd0, 0x4a, 0x16, 0x03, 0x80, - 0x52, 0x7a, 0x96, 0x7a, 0xdf, 0xa1, 0x46, 0xe0, 0x6b, 0xd4, 0xcc, 0x59, 0x94, 0x41, 0x99, 0xa4, - 0x4b, 0xcc, 0xf0, 0xa4, 0x1a, 0x9e, 0x8c, 0xf8, 0x8e, 0x1a, 0x8b, 0xf7, 0x8a, 0xec, 0xb2, 0x54, - 0x71, 0xa6, 0xaa, 0x60, 0xe9, 0xe1, 0x2a, 0x89, 0xaf, 0x50, 0x7b, 0xdf, 0x62, 0xc1, 0x59, 0x0c, - 0xfa, 0x37, 0x38, 0xf4, 0x68, 0x4f, 0x9f, 0x59, 0x0c, 0x18, 0xa3, 0xc6, 0x8e, 0xc5, 0x91, 0x3e, - 0xeb, 0x50, 0xfd, 0x3c, 0xbe, 0x41, 0x6d, 0xa1, 0x02, 0x12, 0x70, 0x91, 0xa4, 0xa1, 0x4f, 0xf2, - 0xe1, 0x18, 0x4f, 0x25, 0xf0, 0xd1, 0xec, 0x69, 0x5f, 0x77, 0x3e, 0x9c, 0x59, 0x5f, 0xb5, 0xfa, - 0x74, 0x34, 0x59, 0xb6, 0x74, 0xc4, 0xbb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x84, 0x5c, 0x6b, - 0x80, 0x51, 0x02, 0x00, 0x00, +func init() { proto.RegisterFile("extensions/extension.proto", fileDescriptor_661e47e790f76671) } + +var fileDescriptor_661e47e790f76671 = []byte{ + // 362 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xeb, 0x40, + 0x18, 0x85, 0x49, 0xbf, 0x72, 0x33, 0x97, 0xdb, 0x2b, 0x63, 0xd1, 0x58, 0x5c, 0x94, 0x80, 0x50, + 0x44, 0xa6, 0x54, 0xc1, 0x7d, 0x0b, 0x45, 0xdd, 0xd8, 0x32, 0x8b, 0xba, 0xb3, 0x4c, 0xd3, 0xb7, + 0x69, 0x24, 0x99, 0x19, 0x27, 0x1f, 0xb6, 0x7f, 0xc5, 0xa5, 0xbf, 0x54, 0x32, 0x93, 0xc4, 0x85, + 0xba, 0x9b, 0xf3, 0x70, 0xda, 0xf7, 0x9c, 0x13, 0xd4, 0x87, 0x7d, 0x0a, 0x3c, 0x09, 0x05, 0x4f, + 0x46, 0xf5, 0x93, 0x48, 0x25, 0x52, 0x81, 0x8f, 0x85, 0x04, 0xce, 0x64, 0xf8, 0xc5, 0xf3, 0x71, + 0xff, 0x2c, 0x10, 0x22, 0x88, 0x60, 0xa4, 0x2d, 0xeb, 0x6c, 0x3b, 0x62, 0xfc, 0x60, 0xfc, 0x9e, + 0x8f, 0xec, 0x25, 0xa8, 0xc2, 0x88, 0x7b, 0xa8, 0x1d, 0xb3, 0x17, 0xa1, 0x5c, 0x6b, 0x60, 0x0d, + 0xdb, 0xd4, 0x08, 0x4d, 0x43, 0x2e, 0x94, 0xdb, 0x28, 0x69, 0x21, 0x0a, 0x2a, 0x59, 0xea, 0xef, + 0xdc, 0xa6, 0xa1, 0x5a, 0xe0, 0x13, 0xd4, 0x49, 0xb2, 0xed, 0x36, 0xdc, 0xbb, 0xad, 0x81, 0x35, + 0x74, 0x68, 0xa9, 0xbc, 0x77, 0x0b, 0x9d, 0xce, 0xaa, 0x40, 0xf7, 0x8c, 0x6f, 0x22, 0x50, 0x14, + 0x5e, 0x33, 0x48, 0x52, 0x7c, 0x8b, 0xec, 0x37, 0xc5, 0xa4, 0x04, 0x73, 0xf7, 0xef, 0xf5, 0x39, + 0xf9, 0xa1, 0x02, 0x79, 0x32, 0x1e, 0x5a, 0x99, 0xf1, 0x1d, 0x3a, 0xf2, 0x45, 0x2c, 0xc3, 0x08, + 0xd4, 0x2a, 0x37, 0x0d, 0x74, 0x98, 0xdf, 0xfe, 0xa0, 0x6c, 0x49, 0xff, 0x57, 0xbf, 0x2a, 0x81, + 0x97, 0x23, 0xf7, 0x7b, 0xb6, 0x44, 0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd3, 0x68, 0xa3, 0xc3, + 0xfd, 0xa1, 0x95, 0x2c, 0x06, 0x00, 0xa5, 0xf4, 0x2c, 0xcd, 0xa1, 0x43, 0x8d, 0xc0, 0x97, 0xa8, + 0x9d, 0xb3, 0x28, 0x83, 0x32, 0x49, 0x8f, 0x98, 0xe1, 0x49, 0x35, 0x3c, 0x99, 0xf0, 0x03, 0x35, + 0x16, 0xef, 0x19, 0xd9, 0x65, 0xa9, 0xe2, 0x4c, 0x55, 0xc1, 0xd2, 0xc3, 0x55, 0x12, 0x5f, 0xa0, + 0x6e, 0xdd, 0x62, 0xc5, 0x59, 0x0c, 0xfa, 0x33, 0x38, 0xf4, 0x5f, 0x4d, 0x1f, 0x59, 0x0c, 0x18, + 0xa3, 0xd6, 0x81, 0xc5, 0x91, 0x3e, 0xeb, 0x50, 0xfd, 0x9e, 0x5e, 0xa1, 0xae, 0x50, 0x01, 0x09, + 0xb8, 0x48, 0xd2, 0xd0, 0x27, 0xf9, 0x78, 0x8a, 0xe7, 0x12, 0xf8, 0x64, 0xf1, 0x50, 0xd7, 0x5d, + 0x8e, 0x17, 0xd6, 0x47, 0xa3, 0x39, 0x9f, 0xcc, 0xd6, 0x1d, 0x1d, 0xf1, 0xe6, 0x33, 0x00, 0x00, + 0xff, 0xff, 0xeb, 0xf3, 0xfa, 0x65, 0x5c, 0x02, 0x00, 0x00, } diff --git a/vendor/github.com/huandu/xstrings/.travis.yml b/vendor/github.com/huandu/xstrings/.travis.yml index 4f2ee4d97..d6460be41 100644 --- a/vendor/github.com/huandu/xstrings/.travis.yml +++ b/vendor/github.com/huandu/xstrings/.travis.yml @@ -1 +1,7 @@ language: go +install: + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls +script: + - go test -v -covermode=count -coverprofile=coverage.out + - 'if [ "$TRAVIS_PULL_REQUEST" = "false" ] && [ ! -z "$COVERALLS_TOKEN" ]; then $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN; fi' diff --git a/vendor/github.com/huandu/xstrings/README.md b/vendor/github.com/huandu/xstrings/README.md index a4a36f454..292bf2f39 100644 --- a/vendor/github.com/huandu/xstrings/README.md +++ b/vendor/github.com/huandu/xstrings/README.md @@ -2,6 +2,8 @@ [![Build Status](https://travis-ci.org/huandu/xstrings.svg?branch=master)](https://travis-ci.org/huandu/xstrings) [![GoDoc](https://godoc.org/github.com/huandu/xstrings?status.svg)](https://godoc.org/github.com/huandu/xstrings) +[![Go Report](https://goreportcard.com/badge/github.com/huandu/xstrings)](https://goreportcard.com/report/github.com/huandu/xstrings) +[![Coverage Status](https://coveralls.io/repos/github/huandu/xstrings/badge.svg?branch=master)](https://coveralls.io/github/huandu/xstrings?branch=master) Go package [xstrings](https://godoc.org/github.com/huandu/xstrings) is a collection of string functions, which are widely used in other languages but absent in Go package [strings](http://golang.org/pkg/strings). @@ -15,7 +17,7 @@ Please review [contributing guideline](CONTRIBUTING.md) and [create new issue](h Use `go get` to install this library. - go get github.com/huandu/xstrings + go get github.com/huandu/xstrings ## API document ## diff --git a/vendor/github.com/huandu/xstrings/convert.go b/vendor/github.com/huandu/xstrings/convert.go index 8253fa9c6..496f28cf4 100644 --- a/vendor/github.com/huandu/xstrings/convert.go +++ b/vendor/github.com/huandu/xstrings/convert.go @@ -34,6 +34,7 @@ func ToCamelCase(str string) string { str = str[size:] if r0 != '_' { + r0 = unicode.ToUpper(r0) break } @@ -41,11 +42,14 @@ func ToCamelCase(str string) string { } if len(str) == 0 { + // A special case for a string contains only 1 rune. + if size != 0 { + buf.WriteRune(r0) + } + return buf.String() } - r0 = unicode.ToUpper(r0) - for len(str) > 0 { r1 = r0 r0, size = utf8.DecodeRuneInString(str) diff --git a/vendor/github.com/huandu/xstrings/go.mod b/vendor/github.com/huandu/xstrings/go.mod index 5866b3a8e..3982c204c 100644 --- a/vendor/github.com/huandu/xstrings/go.mod +++ b/vendor/github.com/huandu/xstrings/go.mod @@ -1 +1,3 @@ -module github.com/huandu/xstrings \ No newline at end of file +module github.com/huandu/xstrings + +go 1.12 diff --git a/vendor/github.com/huandu/xstrings/translate.go b/vendor/github.com/huandu/xstrings/translate.go index d86a4cbbd..66e23f86d 100644 --- a/vendor/github.com/huandu/xstrings/translate.go +++ b/vendor/github.com/huandu/xstrings/translate.go @@ -26,7 +26,7 @@ type runeMap map[rune]rune // If a from/to pattern pair needs to be used more than once, it's recommended // to create a Translator and reuse it. type Translator struct { - quickDict *runeDict // A quick dictionary to look up rune by index. Only availabe for latin runes. + quickDict *runeDict // A quick dictionary to look up rune by index. Only available for latin runes. runeMap runeMap // Rune map for translation. ranges []*runeRangeMap // Ranges of runes. mappedRune rune // If mappedRune >= 0, all matched runes are translated to the mappedRune. diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml index b13a50ed1..dad29725f 100644 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ b/vendor/github.com/imdario/mergo/.travis.yml @@ -4,4 +4,6 @@ install: - go get golang.org/x/tools/cmd/cover - go get github.com/mattn/goveralls script: + - go test -race -v ./... +after_script: - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index f8de6c543..3fb6c64d0 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -26,10 +26,12 @@ func hasExportedField(dst reflect.Value) (exported bool) { } type Config struct { - Overwrite bool - AppendSlice bool - Transformers Transformers - overwriteWithEmptyValue bool + Overwrite bool + AppendSlice bool + TypeCheck bool + Transformers Transformers + overwriteWithEmptyValue bool + overwriteSliceWithEmptyValue bool } type Transformers interface { @@ -41,7 +43,9 @@ type Transformers interface { // short circuiting on recursive types. func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { overwrite := config.Overwrite + typeCheck := config.TypeCheck overwriteWithEmptySrc := config.overwriteWithEmptyValue + overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue config.overwriteWithEmptyValue = false if !src.IsValid() { @@ -128,11 +132,14 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dstSlice = reflect.ValueOf(dstElement.Interface()) } - if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + if typeCheck && srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } dstSlice = srcSlice } else if config.AppendSlice { if srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot append two slice with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) } dstSlice = reflect.AppendSlice(dstSlice, srcSlice) } @@ -143,7 +150,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co continue } - if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) { + if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } @@ -154,7 +161,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if !dst.CanSet() { break } - if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { dst.Set(src) } else if config.AppendSlice { if src.Type() != dst.Type() { @@ -168,11 +175,21 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if src.IsNil() { break } - if src.Kind() != reflect.Interface { + + if dst.Kind() != reflect.Ptr && src.Type().AssignableTo(dst.Type()) { if dst.IsNil() || overwrite { if dst.CanSet() && (overwrite || isEmptyValue(dst)) { dst.Set(src) } + } + break + } + + if src.Kind() != reflect.Interface { + if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { + if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } } else if src.Kind() == reflect.Ptr { if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { return @@ -198,6 +215,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dst.Set(src) } } + return } @@ -209,7 +227,7 @@ func Merge(dst, src interface{}, opts ...func(*Config)) error { return merge(dst, src, opts...) } -// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by +// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by // non-empty src attribute values. // Deprecated: use Merge(…) with WithOverride func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { @@ -228,11 +246,21 @@ func WithOverride(config *Config) { config.Overwrite = true } -// WithAppendSlice will make merge append slices instead of overwriting it +// WithOverride will make merge override empty dst slice with empty src slice. +func WithOverrideEmptySlice(config *Config) { + config.overwriteSliceWithEmptyValue = true +} + +// WithAppendSlice will make merge append slices instead of overwriting it. func WithAppendSlice(config *Config) { config.AppendSlice = true } +// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). +func WithTypeCheck(config *Config) { + config.TypeCheck = true +} + func merge(dst, src interface{}, opts ...func(*Config)) error { var ( vDst, vSrc reflect.Value diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go index 95ae54fbf..29b31cf78 100644 --- a/vendor/github.com/json-iterator/go/iter.go +++ b/vendor/github.com/json-iterator/go/iter.go @@ -74,6 +74,7 @@ type Iterator struct { buf []byte head int tail int + depth int captureStartedAt int captured []byte Error error @@ -88,6 +89,7 @@ func NewIterator(cfg API) *Iterator { buf: nil, head: 0, tail: 0, + depth: 0, } } @@ -99,6 +101,7 @@ func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { buf: make([]byte, bufSize), head: 0, tail: 0, + depth: 0, } } @@ -110,6 +113,7 @@ func ParseBytes(cfg API, input []byte) *Iterator { buf: input, head: 0, tail: len(input), + depth: 0, } } @@ -128,6 +132,7 @@ func (iter *Iterator) Reset(reader io.Reader) *Iterator { iter.reader = reader iter.head = 0 iter.tail = 0 + iter.depth = 0 return iter } @@ -137,6 +142,7 @@ func (iter *Iterator) ResetBytes(input []byte) *Iterator { iter.buf = input iter.head = 0 iter.tail = len(input) + iter.depth = 0 return iter } @@ -320,3 +326,24 @@ func (iter *Iterator) Read() interface{} { return nil } } + +// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9 +const maxDepth = 10000 + +func (iter *Iterator) incrementDepth() (success bool) { + iter.depth++ + if iter.depth <= maxDepth { + return true + } + iter.ReportError("incrementDepth", "exceeded max depth") + return false +} + +func (iter *Iterator) decrementDepth() (success bool) { + iter.depth-- + if iter.depth >= 0 { + return true + } + iter.ReportError("decrementDepth", "unexpected negative nesting") + return false +} diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go index 6188cb457..204fe0e09 100644 --- a/vendor/github.com/json-iterator/go/iter_array.go +++ b/vendor/github.com/json-iterator/go/iter_array.go @@ -28,26 +28,32 @@ func (iter *Iterator) ReadArray() (ret bool) { func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { c := iter.nextToken() if c == '[' { + if !iter.incrementDepth() { + return false + } c = iter.nextToken() if c != ']' { iter.unreadByte() if !callback(iter) { + iter.decrementDepth() return false } c = iter.nextToken() for c == ',' { if !callback(iter) { + iter.decrementDepth() return false } c = iter.nextToken() } if c != ']' { iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) + iter.decrementDepth() return false } - return true + return iter.decrementDepth() } - return true + return iter.decrementDepth() } if c == 'n' { iter.skipThreeBytes('u', 'l', 'l') diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go index 1c5757671..b65137114 100644 --- a/vendor/github.com/json-iterator/go/iter_object.go +++ b/vendor/github.com/json-iterator/go/iter_object.go @@ -112,6 +112,9 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { c := iter.nextToken() var field string if c == '{' { + if !iter.incrementDepth() { + return false + } c = iter.nextToken() if c == '"' { iter.unreadByte() @@ -121,6 +124,7 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() @@ -131,20 +135,23 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() } if c != '}' { iter.ReportError("ReadObjectCB", `object not ended with }`) + iter.decrementDepth() return false } - return true + return iter.decrementDepth() } if c == '}' { - return true + return iter.decrementDepth() } iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c})) + iter.decrementDepth() return false } if c == 'n' { @@ -159,15 +166,20 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { c := iter.nextToken() if c == '{' { + if !iter.incrementDepth() { + return false + } c = iter.nextToken() if c == '"' { iter.unreadByte() field := iter.ReadString() if iter.nextToken() != ':' { iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() return false } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() @@ -175,23 +187,27 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { field = iter.ReadString() if iter.nextToken() != ':' { iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() return false } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() } if c != '}' { iter.ReportError("ReadMapCB", `object not ended with }`) + iter.decrementDepth() return false } - return true + return iter.decrementDepth() } if c == '}' { - return true + return iter.decrementDepth() } iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + iter.decrementDepth() return false } if c == 'n' { diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go index 8fcdc3b69..9303de41e 100644 --- a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go +++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go @@ -22,6 +22,9 @@ func (iter *Iterator) skipNumber() { func (iter *Iterator) skipArray() { level := 1 + if !iter.incrementDepth() { + return + } for { for i := iter.head; i < iter.tail; i++ { switch iter.buf[i] { @@ -31,8 +34,14 @@ func (iter *Iterator) skipArray() { i = iter.head - 1 // it will be i++ soon case '[': // If open symbol, increase level level++ + if !iter.incrementDepth() { + return + } case ']': // If close symbol, increase level level-- + if !iter.decrementDepth() { + return + } // If we have returned to the original level, we're done if level == 0 { @@ -50,6 +59,10 @@ func (iter *Iterator) skipArray() { func (iter *Iterator) skipObject() { level := 1 + if !iter.incrementDepth() { + return + } + for { for i := iter.head; i < iter.tail; i++ { switch iter.buf[i] { @@ -59,8 +72,14 @@ func (iter *Iterator) skipObject() { i = iter.head - 1 // it will be i++ soon case '{': // If open symbol, increase level level++ + if !iter.incrementDepth() { + return + } case '}': // If close symbol, increase level level-- + if !iter.decrementDepth() { + return + } // If we have returned to the original level, we're done if level == 0 { diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go index 4459e203f..74974ba74 100644 --- a/vendor/github.com/json-iterator/go/reflect.go +++ b/vendor/github.com/json-iterator/go/reflect.go @@ -60,6 +60,7 @@ func (b *ctx) append(prefix string) *ctx { // ReadVal copy the underlying JSON into go interface, same as json.Unmarshal func (iter *Iterator) ReadVal(obj interface{}) { + depth := iter.depth cacheKey := reflect2.RTypeOf(obj) decoder := iter.cfg.getDecoderFromCache(cacheKey) if decoder == nil { @@ -76,6 +77,10 @@ func (iter *Iterator) ReadVal(obj interface{}) { return } decoder.Decode(ptr, iter) + if iter.depth != depth { + iter.ReportError("ReadVal", "unexpected mismatched nesting") + return + } } // WriteVal copy the go interface into underlying JSON, same as json.Marshal diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go index 05e8fbf1f..80320cd64 100644 --- a/vendor/github.com/json-iterator/go/reflect_extension.go +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -341,10 +341,10 @@ func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { if ctx.onlyTaggedField && !hastag && !field.Anonymous() { continue } - tagParts := strings.Split(tag, ",") - if tag == "-" { + if tag == "-" || field.Name() == "_" { continue } + tagParts := strings.Split(tag, ",") if field.Anonymous() && (tag == "" || tagParts[0] == "") { if field.Type().Kind() == reflect.Struct { structDescriptor := describeStruct(ctx, field.Type()) diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go index 547b4421e..9e2b623fe 100644 --- a/vendor/github.com/json-iterator/go/reflect_map.go +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -249,6 +249,10 @@ type mapEncoder struct { } func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } stream.WriteObjectStart() iter := encoder.mapType.UnsafeIterate(ptr) for i := 0; iter.HasNext(); i++ { @@ -286,16 +290,17 @@ func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { stream.WriteObjectStart() mapIter := encoder.mapType.UnsafeIterate(ptr) subStream := stream.cfg.BorrowStream(nil) + subStream.Attachment = stream.Attachment subIter := stream.cfg.BorrowIterator(nil) keyValues := encodedKeyValues{} for mapIter.HasNext() { - subStream.buf = make([]byte, 0, 64) key, elem := mapIter.UnsafeNext() + subStreamIndex := subStream.Buffered() encoder.keyEncoder.Encode(key, subStream) if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil { stream.Error = subStream.Error } - encodedKey := subStream.Buffer() + encodedKey := subStream.Buffer()[subStreamIndex:] subIter.ResetBytes(encodedKey) decodedKey := subIter.ReadString() if stream.indention > 0 { @@ -306,7 +311,7 @@ func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { encoder.elemEncoder.Encode(elem, subStream) keyValues = append(keyValues, encodedKV{ key: decodedKey, - keyValue: subStream.Buffer(), + keyValue: subStream.Buffer()[subStreamIndex:], }) } sort.Sort(keyValues) @@ -316,6 +321,9 @@ func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { } stream.Write(keyValue.keyValue) } + if subStream.Error != nil && stream.Error == nil { + stream.Error = subStream.Error + } stream.WriteObjectEnd() stream.cfg.ReturnStream(subStream) stream.cfg.ReturnIterator(subIter) diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go index fea50719d..3e21f3756 100644 --- a/vendor/github.com/json-iterator/go/reflect_marshaler.go +++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go @@ -3,8 +3,9 @@ package jsoniter import ( "encoding" "encoding/json" - "github.com/modern-go/reflect2" "unsafe" + + "github.com/modern-go/reflect2" ) var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() @@ -93,10 +94,17 @@ func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { stream.WriteNil() return } - bytes, err := json.Marshal(obj) + marshaler := obj.(json.Marshaler) + bytes, err := marshaler.MarshalJSON() if err != nil { stream.Error = err } else { + // html escape was already done by jsoniter + // but the extra '\n' should be trimed + l := len(bytes) + if l > 0 && bytes[l-1] == '\n' { + bytes = bytes[:l-1] + } stream.Write(bytes) } } diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go index 932641ac4..5ad5cc561 100644 --- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -500,6 +500,9 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } var c byte for c = ','; c == ','; c = iter.nextToken() { decoder.decodeOneField(ptr, iter) @@ -510,6 +513,7 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if c != '}' { iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) } + iter.decrementDepth() } func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { @@ -571,6 +575,9 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { if iter.readFieldHash() == decoder.fieldHash { decoder.fieldDecoder.Decode(ptr, iter) @@ -584,6 +591,7 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type twoFieldsStructDecoder struct { @@ -598,6 +606,9 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -614,6 +625,7 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type threeFieldsStructDecoder struct { @@ -630,6 +642,9 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -648,6 +663,7 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type fourFieldsStructDecoder struct { @@ -666,6 +682,9 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -686,6 +705,7 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type fiveFieldsStructDecoder struct { @@ -706,6 +726,9 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -728,6 +751,7 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type sixFieldsStructDecoder struct { @@ -750,6 +774,9 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -774,6 +801,7 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type sevenFieldsStructDecoder struct { @@ -798,6 +826,9 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -824,6 +855,7 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type eightFieldsStructDecoder struct { @@ -850,6 +882,9 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -878,6 +913,7 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type nineFieldsStructDecoder struct { @@ -906,6 +942,9 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -936,6 +975,7 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type tenFieldsStructDecoder struct { @@ -966,6 +1006,9 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -998,6 +1041,7 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type structFieldDecoder struct { diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go index d0759cf64..152e3ef5a 100644 --- a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go +++ b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go @@ -200,6 +200,7 @@ type stringModeStringEncoder struct { func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { tempStream := encoder.cfg.BorrowStream(nil) + tempStream.Attachment = stream.Attachment defer encoder.cfg.ReturnStream(tempStream) encoder.elemEncoder.Encode(ptr, tempStream) stream.WriteString(string(tempStream.Buffer())) diff --git a/vendor/github.com/mitchellh/copystructure/.travis.yml b/vendor/github.com/mitchellh/copystructure/.travis.yml new file mode 100644 index 000000000..d7b9589ab --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/.travis.yml @@ -0,0 +1,12 @@ +language: go + +go: + - 1.7 + - tip + +script: + - go test + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE new file mode 100644 index 000000000..229851590 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md new file mode 100644 index 000000000..bcb8c8d2c --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/README.md @@ -0,0 +1,21 @@ +# copystructure + +copystructure is a Go library for deep copying values in Go. + +This allows you to copy Go values that may contain reference values +such as maps, slices, or pointers, and copy their data as well instead +of just their references. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/copystructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). + +The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go new file mode 100644 index 000000000..db6a6aa1a --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copier_time.go @@ -0,0 +1,15 @@ +package copystructure + +import ( + "reflect" + "time" +) + +func init() { + Copiers[reflect.TypeOf(time.Time{})] = timeCopier +} + +func timeCopier(v interface{}) (interface{}, error) { + // Just... copy it. + return v.(time.Time), nil +} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go new file mode 100644 index 000000000..140435255 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copystructure.go @@ -0,0 +1,548 @@ +package copystructure + +import ( + "errors" + "reflect" + "sync" + + "github.com/mitchellh/reflectwalk" +) + +// Copy returns a deep copy of v. +func Copy(v interface{}) (interface{}, error) { + return Config{}.Copy(v) +} + +// CopierFunc is a function that knows how to deep copy a specific type. +// Register these globally with the Copiers variable. +type CopierFunc func(interface{}) (interface{}, error) + +// Copiers is a map of types that behave specially when they are copied. +// If a type is found in this map while deep copying, this function +// will be called to copy it instead of attempting to copy all fields. +// +// The key should be the type, obtained using: reflect.TypeOf(value with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) + +// Must is a helper that wraps a call to a function returning +// (interface{}, error) and panics if the error is non-nil. It is intended +// for use in variable initializations and should only be used when a copy +// error should be a crashing case. +func Must(v interface{}, err error) interface{} { + if err != nil { + panic("copy error: " + err.Error()) + } + + return v +} + +var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true") + +type Config struct { + // Lock any types that are a sync.Locker and are not a mutex while copying. + // If there is an RLocker method, use that to get the sync.Locker. + Lock bool + + // Copiers is a map of types associated with a CopierFunc. Use the global + // Copiers map if this is nil. + Copiers map[reflect.Type]CopierFunc +} + +func (c Config) Copy(v interface{}) (interface{}, error) { + if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr { + return nil, errPointerRequired + } + + w := new(walker) + if c.Lock { + w.useLocks = true + } + + if c.Copiers == nil { + c.Copiers = Copiers + } + + err := reflectwalk.Walk(v, w) + if err != nil { + return nil, err + } + + // Get the result. If the result is nil, then we want to turn it + // into a typed nil if we can. + result := w.Result + if result == nil { + val := reflect.ValueOf(v) + result = reflect.Indirect(reflect.New(val.Type())).Interface() + } + + return result, nil +} + +// Return the key used to index interfaces types we've seen. Store the number +// of pointers in the upper 32bits, and the depth in the lower 32bits. This is +// easy to calculate, easy to match a key with our current depth, and we don't +// need to deal with initializing and cleaning up nested maps or slices. +func ifaceKey(pointers, depth int) uint64 { + return uint64(pointers)<<32 | uint64(depth) +} + +type walker struct { + Result interface{} + + depth int + ignoreDepth int + vals []reflect.Value + cs []reflect.Value + + // This stores the number of pointers we've walked over, indexed by depth. + ps []int + + // If an interface is indirected by a pointer, we need to know the type of + // interface to create when creating the new value. Store the interface + // types here, indexed by both the walk depth and the number of pointers + // already seen at that depth. Use ifaceKey to calculate the proper uint64 + // value. + ifaceTypes map[uint64]reflect.Type + + // any locks we've taken, indexed by depth + locks []sync.Locker + // take locks while walking the structure + useLocks bool +} + +func (w *walker) Enter(l reflectwalk.Location) error { + w.depth++ + + // ensure we have enough elements to index via w.depth + for w.depth >= len(w.locks) { + w.locks = append(w.locks, nil) + } + + for len(w.ps) < w.depth+1 { + w.ps = append(w.ps, 0) + } + + return nil +} + +func (w *walker) Exit(l reflectwalk.Location) error { + locker := w.locks[w.depth] + w.locks[w.depth] = nil + if locker != nil { + defer locker.Unlock() + } + + // clear out pointers and interfaces as we exit the stack + w.ps[w.depth] = 0 + + for k := range w.ifaceTypes { + mask := uint64(^uint32(0)) + if k&mask == uint64(w.depth) { + delete(w.ifaceTypes, k) + } + } + + w.depth-- + if w.ignoreDepth > w.depth { + w.ignoreDepth = 0 + } + + if w.ignoring() { + return nil + } + + switch l { + case reflectwalk.Array: + fallthrough + case reflectwalk.Map: + fallthrough + case reflectwalk.Slice: + w.replacePointerMaybe() + + // Pop map off our container + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + // Pop off the key and value + mv := w.valPop() + mk := w.valPop() + m := w.cs[len(w.cs)-1] + + // If mv is the zero value, SetMapIndex deletes the key form the map, + // or in this case never adds it. We need to create a properly typed + // zero value so that this key can be set. + if !mv.IsValid() { + mv = reflect.Zero(m.Elem().Type().Elem()) + } + m.Elem().SetMapIndex(mk, mv) + case reflectwalk.ArrayElem: + // Pop off the value and the index and set it on the array + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + a := w.cs[len(w.cs)-1] + ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call + if ae.CanSet() { + ae.Set(v) + } + } + case reflectwalk.SliceElem: + // Pop off the value and the index and set it on the slice + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + se := s.Elem().Index(i) + if se.CanSet() { + se.Set(v) + } + } + case reflectwalk.Struct: + w.replacePointerMaybe() + + // Remove the struct from the container stack + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.StructField: + // Pop off the value and the field + v := w.valPop() + f := w.valPop().Interface().(reflect.StructField) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + + if sf.CanSet() { + sf.Set(v) + } + } + case reflectwalk.WalkLoc: + // Clear out the slices for GC + w.cs = nil + w.vals = nil + } + + return nil +} + +func (w *walker) Map(m reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(m) + + // Create the map. If the map itself is nil, then just make a nil map + var newMap reflect.Value + if m.IsNil() { + newMap = reflect.New(m.Type()) + } else { + newMap = wrapPtr(reflect.MakeMap(m.Type())) + } + + w.cs = append(w.cs, newMap) + w.valPush(newMap) + return nil +} + +func (w *walker) MapElem(m, k, v reflect.Value) error { + return nil +} + +func (w *walker) PointerEnter(v bool) error { + if v { + w.ps[w.depth]++ + } + return nil +} + +func (w *walker) PointerExit(v bool) error { + if v { + w.ps[w.depth]-- + } + return nil +} + +func (w *walker) Interface(v reflect.Value) error { + if !v.IsValid() { + return nil + } + if w.ifaceTypes == nil { + w.ifaceTypes = make(map[uint64]reflect.Type) + } + + w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type() + return nil +} + +func (w *walker) Primitive(v reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(v) + + // IsValid verifies the v is non-zero and CanInterface verifies + // that we're allowed to read this value (unexported fields). + var newV reflect.Value + if v.IsValid() && v.CanInterface() { + newV = reflect.New(v.Type()) + newV.Elem().Set(v) + } + + w.valPush(newV) + w.replacePointerMaybe() + return nil +} + +func (w *walker) Slice(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var newS reflect.Value + if s.IsNil() { + newS = reflect.New(s.Type()) + } else { + newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap())) + } + + w.cs = append(w.cs, newS) + w.valPush(newS) + return nil +} + +func (w *walker) SliceElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the slice here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Array(a reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(a) + + newA := reflect.New(a.Type()) + + w.cs = append(w.cs, newA) + w.valPush(newA) + return nil +} + +func (w *walker) ArrayElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the array here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Struct(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var v reflect.Value + if c, ok := Copiers[s.Type()]; ok { + // We have a Copier for this struct, so we use that copier to + // get the copy, and we ignore anything deeper than this. + w.ignoreDepth = w.depth + + dup, err := c(s.Interface()) + if err != nil { + return err + } + + // We need to put a pointer to the value on the value stack, + // so allocate a new pointer and set it. + v = reflect.New(s.Type()) + reflect.Indirect(v).Set(reflect.ValueOf(dup)) + } else { + // No copier, we copy ourselves and allow reflectwalk to guide + // us deeper into the structure for copying. + v = reflect.New(s.Type()) + } + + // Push the value onto the value stack for setting the struct field, + // and add the struct itself to the containers stack in case we walk + // deeper so that its own fields can be modified. + w.valPush(v) + w.cs = append(w.cs, v) + + return nil +} + +func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { + if w.ignoring() { + return nil + } + + // If PkgPath is non-empty, this is a private (unexported) field. + // We do not set this unexported since the Go runtime doesn't allow us. + if f.PkgPath != "" { + return reflectwalk.SkipEntry + } + + // Push the field onto the stack, we'll handle it when we exit + // the struct field in Exit... + w.valPush(reflect.ValueOf(f)) + return nil +} + +// ignore causes the walker to ignore any more values until we exit this on +func (w *walker) ignore() { + w.ignoreDepth = w.depth +} + +func (w *walker) ignoring() bool { + return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth +} + +func (w *walker) pointerPeek() bool { + return w.ps[w.depth] > 0 +} + +func (w *walker) valPop() reflect.Value { + result := w.vals[len(w.vals)-1] + w.vals = w.vals[:len(w.vals)-1] + + // If we're out of values, that means we popped everything off. In + // this case, we reset the result so the next pushed value becomes + // the result. + if len(w.vals) == 0 { + w.Result = nil + } + + return result +} + +func (w *walker) valPush(v reflect.Value) { + w.vals = append(w.vals, v) + + // If we haven't set the result yet, then this is the result since + // it is the first (outermost) value we're seeing. + if w.Result == nil && v.IsValid() { + w.Result = v.Interface() + } +} + +func (w *walker) replacePointerMaybe() { + // Determine the last pointer value. If it is NOT a pointer, then + // we need to push that onto the stack. + if !w.pointerPeek() { + w.valPush(reflect.Indirect(w.valPop())) + return + } + + v := w.valPop() + + // If the expected type is a pointer to an interface of any depth, + // such as *interface{}, **interface{}, etc., then we need to convert + // the value "v" from *CONCRETE to *interface{} so types match for + // Set. + // + // Example if v is type *Foo where Foo is a struct, v would become + // *interface{} instead. This only happens if we have an interface expectation + // at this depth. + // + // For more info, see GH-16 + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface { + y := reflect.New(iType) // Create *interface{} + y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced) + v = y // v is now typed *interface{} (where *v = Foo) + } + + for i := 1; i < w.ps[w.depth]; i++ { + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { + iface := reflect.New(iType).Elem() + iface.Set(v) + v = iface + } + + p := reflect.New(v.Type()) + p.Elem().Set(v) + v = p + } + + w.valPush(v) +} + +// if this value is a Locker, lock it and add it to the locks slice +func (w *walker) lock(v reflect.Value) { + if !w.useLocks { + return + } + + if !v.IsValid() || !v.CanInterface() { + return + } + + type rlocker interface { + RLocker() sync.Locker + } + + var locker sync.Locker + + // We can't call Interface() on a value directly, since that requires + // a copy. This is OK, since the pointer to a value which is a sync.Locker + // is also a sync.Locker. + if v.Kind() == reflect.Ptr { + switch l := v.Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } else if v.CanAddr() { + switch l := v.Addr().Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } + + // still no callable locker + if locker == nil { + return + } + + // don't lock a mutex directly + switch locker.(type) { + case *sync.Mutex, *sync.RWMutex: + return + } + + locker.Lock() + w.locks[w.depth] = locker +} + +// wrapPtr is a helper that takes v and always make it *v. copystructure +// stores things internally as pointers until the last moment before unwrapping +func wrapPtr(v reflect.Value) reflect.Value { + if !v.IsValid() { + return v + } + vPtr := reflect.New(v.Type()) + vPtr.Elem().Set(v) + return vPtr +} diff --git a/vendor/github.com/mitchellh/copystructure/go.mod b/vendor/github.com/mitchellh/copystructure/go.mod new file mode 100644 index 000000000..d01864309 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/go.mod @@ -0,0 +1,3 @@ +module github.com/mitchellh/copystructure + +require github.com/mitchellh/reflectwalk v1.0.0 diff --git a/vendor/github.com/mitchellh/copystructure/go.sum b/vendor/github.com/mitchellh/copystructure/go.sum new file mode 100644 index 000000000..be5724561 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/go.sum @@ -0,0 +1,2 @@ +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= diff --git a/vendor/github.com/mitchellh/reflectwalk/.travis.yml b/vendor/github.com/mitchellh/reflectwalk/.travis.yml new file mode 100644 index 000000000..4f2ee4d97 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE new file mode 100644 index 000000000..f9c841a51 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md new file mode 100644 index 000000000..ac82cd2e1 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/README.md @@ -0,0 +1,6 @@ +# reflectwalk + +reflectwalk is a Go library for "walking" a value in Go using reflection, +in the same way a directory tree can be "walked" on the filesystem. Walking +a complex structure can allow you to do manipulations on unknown structures +such as those decoded from JSON. diff --git a/vendor/github.com/mitchellh/reflectwalk/go.mod b/vendor/github.com/mitchellh/reflectwalk/go.mod new file mode 100644 index 000000000..52bb7c469 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/reflectwalk diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go new file mode 100644 index 000000000..6a7f17611 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location.go @@ -0,0 +1,19 @@ +package reflectwalk + +//go:generate stringer -type=Location location.go + +type Location uint + +const ( + None Location = iota + Map + MapKey + MapValue + Slice + SliceElem + Array + ArrayElem + Struct + StructField + WalkLoc +) diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go new file mode 100644 index 000000000..70760cf4c --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=Location location.go"; DO NOT EDIT. + +package reflectwalk + +import "fmt" + +const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc" + +var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73} + +func (i Location) String() string { + if i >= Location(len(_Location_index)-1) { + return fmt.Sprintf("Location(%d)", i) + } + return _Location_name[_Location_index[i]:_Location_index[i+1]] +} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go new file mode 100644 index 000000000..3a93a0b11 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go @@ -0,0 +1,402 @@ +// reflectwalk is a package that allows you to "walk" complex structures +// similar to how you may "walk" a filesystem: visiting every element one +// by one and calling callback functions allowing you to handle and manipulate +// those elements. +package reflectwalk + +import ( + "errors" + "reflect" +) + +// PrimitiveWalker implementations are able to handle primitive values +// within complex structures. Primitive values are numbers, strings, +// booleans, funcs, chans. +// +// These primitive values are often members of more complex +// structures (slices, maps, etc.) that are walkable by other interfaces. +type PrimitiveWalker interface { + Primitive(reflect.Value) error +} + +// InterfaceWalker implementations are able to handle interface values as they +// are encountered during the walk. +type InterfaceWalker interface { + Interface(reflect.Value) error +} + +// MapWalker implementations are able to handle individual elements +// found within a map structure. +type MapWalker interface { + Map(m reflect.Value) error + MapElem(m, k, v reflect.Value) error +} + +// SliceWalker implementations are able to handle slice elements found +// within complex structures. +type SliceWalker interface { + Slice(reflect.Value) error + SliceElem(int, reflect.Value) error +} + +// ArrayWalker implementations are able to handle array elements found +// within complex structures. +type ArrayWalker interface { + Array(reflect.Value) error + ArrayElem(int, reflect.Value) error +} + +// StructWalker is an interface that has methods that are called for +// structs when a Walk is done. +type StructWalker interface { + Struct(reflect.Value) error + StructField(reflect.StructField, reflect.Value) error +} + +// EnterExitWalker implementations are notified before and after +// they walk deeper into complex structures (into struct fields, +// into slice elements, etc.) +type EnterExitWalker interface { + Enter(Location) error + Exit(Location) error +} + +// PointerWalker implementations are notified when the value they're +// walking is a pointer or not. Pointer is called for _every_ value whether +// it is a pointer or not. +type PointerWalker interface { + PointerEnter(bool) error + PointerExit(bool) error +} + +// SkipEntry can be returned from walk functions to skip walking +// the value of this field. This is only valid in the following functions: +// +// - Struct: skips all fields from being walked +// - StructField: skips walking the struct value +// +var SkipEntry = errors.New("skip this entry") + +// Walk takes an arbitrary value and an interface and traverses the +// value, calling callbacks on the interface if they are supported. +// The interface should implement one or more of the walker interfaces +// in this package, such as PrimitiveWalker, StructWalker, etc. +func Walk(data, walker interface{}) (err error) { + v := reflect.ValueOf(data) + ew, ok := walker.(EnterExitWalker) + if ok { + err = ew.Enter(WalkLoc) + } + + if err == nil { + err = walk(v, walker) + } + + if ok && err == nil { + err = ew.Exit(WalkLoc) + } + + return +} + +func walk(v reflect.Value, w interface{}) (err error) { + // Determine if we're receiving a pointer and if so notify the walker. + // The logic here is convoluted but very important (tests will fail if + // almost any part is changed). I will try to explain here. + // + // First, we check if the value is an interface, if so, we really need + // to check the interface's VALUE to see whether it is a pointer. + // + // Check whether the value is then a pointer. If so, then set pointer + // to true to notify the user. + // + // If we still have a pointer or an interface after the indirections, then + // we unwrap another level + // + // At this time, we also set "v" to be the dereferenced value. This is + // because once we've unwrapped the pointer we want to use that value. + pointer := false + pointerV := v + + for { + if pointerV.Kind() == reflect.Interface { + if iw, ok := w.(InterfaceWalker); ok { + if err = iw.Interface(pointerV); err != nil { + return + } + } + + pointerV = pointerV.Elem() + } + + if pointerV.Kind() == reflect.Ptr { + pointer = true + v = reflect.Indirect(pointerV) + } + if pw, ok := w.(PointerWalker); ok { + if err = pw.PointerEnter(pointer); err != nil { + return + } + + defer func(pointer bool) { + if err != nil { + return + } + + err = pw.PointerExit(pointer) + }(pointer) + } + + if pointer { + pointerV = v + } + pointer = false + + // If we still have a pointer or interface we have to indirect another level. + switch pointerV.Kind() { + case reflect.Ptr, reflect.Interface: + continue + } + break + } + + // We preserve the original value here because if it is an interface + // type, we want to pass that directly into the walkPrimitive, so that + // we can set it. + originalV := v + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + k := v.Kind() + if k >= reflect.Int && k <= reflect.Complex128 { + k = reflect.Int + } + + switch k { + // Primitives + case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid: + err = walkPrimitive(originalV, w) + return + case reflect.Map: + err = walkMap(v, w) + return + case reflect.Slice: + err = walkSlice(v, w) + return + case reflect.Struct: + err = walkStruct(v, w) + return + case reflect.Array: + err = walkArray(v, w) + return + default: + panic("unsupported type: " + k.String()) + } +} + +func walkMap(v reflect.Value, w interface{}) error { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Map) + } + + if mw, ok := w.(MapWalker); ok { + if err := mw.Map(v); err != nil { + return err + } + } + + for _, k := range v.MapKeys() { + kv := v.MapIndex(k) + + if mw, ok := w.(MapWalker); ok { + if err := mw.MapElem(v, k, kv); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(MapKey) + } + + if err := walk(k, w); err != nil { + return err + } + + if ok { + ew.Exit(MapKey) + ew.Enter(MapValue) + } + + // get the map value again as it may have changed in the MapElem call + if err := walk(v.MapIndex(k), w); err != nil { + return err + } + + if ok { + ew.Exit(MapValue) + } + } + + if ewok { + ew.Exit(Map) + } + + return nil +} + +func walkPrimitive(v reflect.Value, w interface{}) error { + if pw, ok := w.(PrimitiveWalker); ok { + return pw.Primitive(v) + } + + return nil +} + +func walkSlice(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Slice) + } + + if sw, ok := w.(SliceWalker); ok { + if err := sw.Slice(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if sw, ok := w.(SliceWalker); ok { + if err := sw.SliceElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(SliceElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(SliceElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Slice) + } + + return nil +} + +func walkArray(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Array) + } + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.Array(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.ArrayElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(ArrayElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(ArrayElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Array) + } + + return nil +} + +func walkStruct(v reflect.Value, w interface{}) (err error) { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Struct) + } + + skip := false + if sw, ok := w.(StructWalker); ok { + err = sw.Struct(v) + if err == SkipEntry { + skip = true + err = nil + } + if err != nil { + return + } + } + + if !skip { + vt := v.Type() + for i := 0; i < vt.NumField(); i++ { + sf := vt.Field(i) + f := v.FieldByIndex([]int{i}) + + if sw, ok := w.(StructWalker); ok { + err = sw.StructField(sf, f) + + // SkipEntry just pretends this field doesn't even exist + if err == SkipEntry { + continue + } + + if err != nil { + return + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(StructField) + } + + err = walk(f, w) + if err != nil { + return + } + + if ok { + ew.Exit(StructField) + } + } + } + + if ewok { + ew.Exit(Struct) + } + + return nil +} diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml index 9da4c5b5e..72e8ccf0b 100644 --- a/vendor/github.com/onsi/ginkgo/.travis.yml +++ b/vendor/github.com/onsi/ginkgo/.travis.yml @@ -5,9 +5,6 @@ go: - 1.12.x - tip -# allow internal package imports, necessary for forked repositories -go_import_path: github.com/onsi/ginkgo - install: - go get -v -t ./... - go get golang.org/x/tools/cmd/cover diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md index 020496c9d..4920406ae 100644 --- a/vendor/github.com/onsi/ginkgo/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md @@ -1,43 +1,3 @@ -## 1.10.3 - -### Fixes -- Set go_import_path in travis.yml to allow internal packages in forks (#607) [3b721db] -- Add integration test [d90e0dc] -- Fix coverage files combining [e5dde8c] -- A new CLI option: -ginkgo.reportFile (#601) [034fd25] - -## 1.10.2 - -### Fixes -- speed up table entry generateIt() (#609) [5049dc5] -- Fix. Write errors to stderr instead of stdout (#610) [7bb3091] - -## 1.10.1 - -### Fixes -- stack backtrace: fix skipping (#600) [2a4c0bd] - -## 1.10.0 - -### Fixes -- stack backtrace: fix alignment and skipping [66915d6] -- fix typo in documentation [8f97b93] - -## 1.9.0 - -### Features -- Option to print output into report, when tests have passed [0545415] - -### Fixes -- Fixed typos in comments [0ecbc58] -- gofmt code [a7f8bfb] -- Simplify code [7454d00] -- Simplify concatenation, incrementation and function assignment [4825557] -- Avoid unnecessary conversions [9d9403c] -- JUnit: include more detailed information about panic [19cca4b] -- Print help to stdout when the user asks for help [4cb7441] - - ## 1.8.0 ### New Features diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go index 89ec2b29a..dab2a2470 100644 --- a/vendor/github.com/onsi/ginkgo/config/config.go +++ b/vendor/github.com/onsi/ginkgo/config/config.go @@ -20,7 +20,7 @@ import ( "fmt" ) -const VERSION = "1.10.3" +const VERSION = "1.8.0" type GinkgoConfigType struct { RandomSeed int64 @@ -52,15 +52,13 @@ type DefaultReporterConfigType struct { Succinct bool Verbose bool FullTrace bool - ReportPassed bool - ReportFile string } var DefaultReporterConfig = DefaultReporterConfigType{} func processPrefix(prefix string) string { if prefix != "" { - prefix += "." + prefix = prefix + "." } return prefix } @@ -100,9 +98,6 @@ func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) { flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.") flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report") flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs") - flagSet.BoolVar(&(DefaultReporterConfig.ReportPassed), prefix+"reportPassed", false, "If set, default reporter prints out captured output of passed tests.") - flagSet.StringVar(&(DefaultReporterConfig.ReportFile), prefix+"reportFile", "", "Override the default reporter output file path.") - } func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string { @@ -201,13 +196,5 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor result = append(result, fmt.Sprintf("--%strace", prefix)) } - if reporter.ReportPassed { - result = append(result, fmt.Sprintf("--%sreportPassed", prefix)) - } - - if reporter.ReportFile != "" { - result = append(result, fmt.Sprintf("--%sreportFile=%s", prefix, reporter.ReportFile)) - } - return result } diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go index 3cbf89a35..a6b96d88f 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go @@ -199,11 +199,6 @@ type Benchmarker interface { // ginkgo bootstrap func RunSpecs(t GinkgoTestingT, description string) bool { specReporters := []Reporter{buildDefaultReporter()} - if config.DefaultReporterConfig.ReportFile != "" { - reportFile := config.DefaultReporterConfig.ReportFile - specReporters[0] = reporters.NewJUnitReporter(reportFile) - return RunSpecsWithDefaultAndCustomReporters(t, description, specReporters) - } return RunSpecsWithCustomReporters(t, description, specReporters) } @@ -288,7 +283,7 @@ func GinkgoRecover() { //BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. // //In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally -//equivalent. The difference is purely semantic -- you typically Describe the behavior of an object +//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object //or method and, within that Describe, outline a number of Contexts and Whens. func Describe(text string, body func()) bool { globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) @@ -504,7 +499,7 @@ func AfterSuite(body interface{}, timeout ...float64) bool { //until that node is done before running. // //SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is -//run on all nodes, but *only* after the first function completes successfully. Ginkgo also makes it possible to send data from the first function (on Node 1) +//run on all nodes, but *only* after the first function completes succesfully. Ginkgo also makes it possible to send data from the first function (on Node 1) //to the second function (on all the other nodes). // //The functions have the following signatures. The first function (which only runs on node 1) has the signature: diff --git a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go index aa89d6cba..fa2f0bf73 100644 --- a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go +++ b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go @@ -11,35 +11,19 @@ import ( func New(skip int) types.CodeLocation { _, file, line, _ := runtime.Caller(skip + 1) - stackTrace := PruneStack(string(debug.Stack()), skip+1) + stackTrace := PruneStack(string(debug.Stack()), skip) return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace} } -// PruneStack removes references to functions that are internal to Ginkgo -// and the Go runtime from a stack string and a certain number of stack entries -// at the beginning of the stack. The stack string has the format -// as returned by runtime/debug.Stack. The leading goroutine information is -// optional and always removed if present. Beware that runtime/debug.Stack -// adds itself as first entry, so typically skip must be >= 1 to remove that -// entry. func PruneStack(fullStackTrace string, skip int) string { stack := strings.Split(fullStackTrace, "\n") - // Ensure that the even entries are the method names and the - // the odd entries the source code information. - if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") { - // Ignore "goroutine 29 [running]:" line. - stack = stack[1:] - } - // The "+1" is for skipping over the initial entry, which is - // runtime/debug.Stack() itself. if len(stack) > 2*(skip+1) { stack = stack[2*(skip+1):] } prunedStack := []string{} re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) for i := 0; i < len(stack)/2; i++ { - // We filter out based on the source code file name. - if !re.Match([]byte(stack[i*2+1])) { + if !re.Match([]byte(stack[i*2])) { prunedStack = append(prunedStack, stack[i*2]) prunedStack = append(prunedStack, stack[i*2+1]) } diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go index 393901e11..d6d54234c 100644 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go @@ -17,7 +17,7 @@ type benchmarker struct { func newBenchmarker() *benchmarker { return &benchmarker{ - measurements: make(map[string]*types.SpecMeasurement), + measurements: make(map[string]*types.SpecMeasurement, 0), } } diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go index f9ab30067..6b54afe01 100644 --- a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go +++ b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go @@ -54,11 +54,11 @@ func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporte config: config, stenographer: stenographer, - suiteBeginnings: make(chan configAndSuite), - beforeSuites: make(chan *types.SetupSummary), - afterSuites: make(chan *types.SetupSummary), - specCompletions: make(chan *types.SpecSummary), - suiteEndings: make(chan *types.SuiteSummary), + suiteBeginnings: make(chan configAndSuite, 0), + beforeSuites: make(chan *types.SetupSummary, 0), + afterSuites: make(chan *types.SetupSummary, 0), + specCompletions: make(chan *types.SpecSummary, 0), + suiteEndings: make(chan *types.SuiteSummary, 0), } go aggregator.mux() @@ -227,7 +227,7 @@ func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (fi aggregatedSuiteSummary.SuiteSucceeded = true for _, suiteSummary := range aggregator.aggregatedSuiteEndings { - if !suiteSummary.SuiteSucceeded { + if suiteSummary.SuiteSucceeded == false { aggregatedSuiteSummary.SuiteSucceeded = false } diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/server.go b/vendor/github.com/onsi/ginkgo/internal/remote/server.go index 93e9dac05..367c54daf 100644 --- a/vendor/github.com/onsi/ginkgo/internal/remote/server.go +++ b/vendor/github.com/onsi/ginkgo/internal/remote/server.go @@ -213,7 +213,7 @@ func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Re c := spec_iterator.Counter{} server.lock.Lock() c.Index = server.counter - server.counter++ + server.counter = server.counter + 1 server.lock.Unlock() json.NewEncoder(writer).Encode(c) diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go index 6eef40a0e..7fd68ee8e 100644 --- a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go +++ b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go @@ -107,11 +107,11 @@ func (spec *Spec) Summary(suiteID string) *types.SpecSummary { NumberOfSamples: spec.subject.Samples(), ComponentTexts: componentTexts, ComponentCodeLocations: componentCodeLocations, - State: spec.getState(), - RunTime: runTime, - Failure: spec.failure, - Measurements: spec.measurementsReport(), - SuiteID: suiteID, + State: spec.getState(), + RunTime: runTime, + Failure: spec.failure, + Measurements: spec.measurementsReport(), + SuiteID: suiteID, } } diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go index 8a2007137..27c0d1d6c 100644 --- a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go +++ b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go @@ -107,11 +107,11 @@ func (e *Specs) applyRegExpFocusAndSkip(description string, focusString string, toMatch := e.toMatch(description, i) if focusFilter != nil { - matchesFocus = focusFilter.Match(toMatch) + matchesFocus = focusFilter.Match([]byte(toMatch)) } if skipFilter != nil { - matchesSkip = skipFilter.Match(toMatch) + matchesSkip = skipFilter.Match([]byte(toMatch)) } if !matchesFocus || matchesSkip { diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go index c9a0a60d8..2c683cb8b 100644 --- a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go +++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go @@ -300,7 +300,7 @@ func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) { } func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) { - if len(summary.CapturedOutput) == 0 { + if failed && len(summary.CapturedOutput) == 0 { summary.CapturedOutput = string(runner.writer.Bytes()) } for i := len(runner.reporters) - 1; i >= 1; i-- { diff --git a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go index c76283b46..ac58dd5f7 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go @@ -62,9 +62,6 @@ func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct) } else { reporter.stenographer.AnnounceSuccesfulSpec(specSummary) - if reporter.config.ReportPassed { - reporter.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput) - } } case types.SpecStatePending: reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct) diff --git a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go index d76e2fe77..2c9f3c792 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go +++ b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go @@ -13,7 +13,6 @@ import ( "fmt" "math" "os" - "path/filepath" "strings" "github.com/onsi/ginkgo/config" @@ -33,17 +32,12 @@ type JUnitTestSuite struct { type JUnitTestCase struct { Name string `xml:"name,attr"` ClassName string `xml:"classname,attr"` - PassedMessage *JUnitPassedMessage `xml:"passed,omitempty"` FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"` Skipped *JUnitSkipped `xml:"skipped,omitempty"` Time float64 `xml:"time,attr"` SystemOut string `xml:"system-out,omitempty"` } -type JUnitPassedMessage struct { - Message string `xml:",chardata"` -} - type JUnitFailureMessage struct { Type string `xml:"type,attr"` Message string `xml:",chardata"` @@ -54,10 +48,9 @@ type JUnitSkipped struct { } type JUnitReporter struct { - suite JUnitTestSuite - filename string - testSuiteName string - ReporterConfig config.DefaultReporterConfigType + suite JUnitTestSuite + filename string + testSuiteName string } //NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename. @@ -67,13 +60,12 @@ func NewJUnitReporter(filename string) *JUnitReporter { } } -func (reporter *JUnitReporter) SpecSuiteWillBegin(ginkgoConfig config.GinkgoConfigType, summary *types.SuiteSummary) { +func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { reporter.suite = JUnitTestSuite{ Name: summary.SuiteDescription, TestCases: []JUnitTestCase{}, } reporter.testSuiteName = summary.SuiteDescription - reporter.ReporterConfig = config.DefaultReporterConfig } func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) { @@ -113,21 +105,11 @@ func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) { Name: strings.Join(specSummary.ComponentTexts[1:], " "), ClassName: reporter.testSuiteName, } - if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed { - testCase.PassedMessage = &JUnitPassedMessage{ - Message: specSummary.CapturedOutput, - } - } if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { testCase.FailureMessage = &JUnitFailureMessage{ Type: reporter.failureTypeForState(specSummary.State), Message: failureMessage(specSummary.Failure), } - if specSummary.State == types.SpecStatePanicked { - testCase.FailureMessage.Message += fmt.Sprintf("\n\nPanic: %s\n\nFull stack:\n%s", - specSummary.Failure.ForwardedPanic, - specSummary.Failure.Location.FullStackTrace) - } testCase.SystemOut = specSummary.CapturedOutput } if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { @@ -142,29 +124,17 @@ func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { reporter.suite.Time = math.Trunc(summary.RunTime.Seconds()*1000) / 1000 reporter.suite.Failures = summary.NumberOfFailedSpecs reporter.suite.Errors = 0 - if reporter.ReporterConfig.ReportFile != "" { - reporter.filename = reporter.ReporterConfig.ReportFile - fmt.Printf("\nJUnit path was configured: %s\n", reporter.filename) - } - filePath, _ := filepath.Abs(reporter.filename) - dirPath := filepath.Dir(filePath) - err := os.MkdirAll(dirPath, os.ModePerm) + file, err := os.Create(reporter.filename) if err != nil { - fmt.Printf("\nFailed to create JUnit directory: %s\n\t%s", filePath, err.Error()) - } - file, err := os.Create(filePath) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to create JUnit report file: %s\n\t%s", filePath, err.Error()) + fmt.Printf("Failed to create JUnit report file: %s\n\t%s", reporter.filename, err.Error()) } defer file.Close() file.WriteString(xml.Header) encoder := xml.NewEncoder(file) encoder.Indent(" ", " ") err = encoder.Encode(reporter.suite) - if err == nil { - fmt.Fprintf(os.Stdout, "\nJUnit report was created: %s\n", filePath) - } else { - fmt.Fprintf(os.Stderr,"\nFailed to generate JUnit report data:\n\t%s", err.Error()) + if err != nil { + fmt.Printf("Failed to generate JUnit report\n\t%s", err.Error()) } } diff --git a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go index c8e27b2a7..36ee2a600 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go +++ b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go @@ -22,9 +22,8 @@ const ( ) type TeamCityReporter struct { - writer io.Writer - testSuiteName string - ReporterConfig config.DefaultReporterConfigType + writer io.Writer + testSuiteName string } func NewTeamCityReporter(writer io.Writer) *TeamCityReporter { @@ -66,10 +65,6 @@ func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) { func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) { testName := escape(strings.Join(specSummary.ComponentTexts[1:], " ")) - if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed { - details := escape(specSummary.CapturedOutput) - fmt.Fprintf(reporter.writer, "%s[testPassed name='%s' details='%s']", messageId, testName, details) - } if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { message := escape(specSummary.Failure.ComponentCodeLocation.String()) details := escape(specSummary.Failure.Message) diff --git a/vendor/github.com/onsi/ginkgo/types/types.go b/vendor/github.com/onsi/ginkgo/types/types.go index e4e32b761..0e89521be 100644 --- a/vendor/github.com/onsi/ginkgo/types/types.go +++ b/vendor/github.com/onsi/ginkgo/types/types.go @@ -17,7 +17,7 @@ each node does not deterministically know how many specs it will end up running. Unfortunately making such a change would break backward compatibility. -Until Ginkgo 2.0 comes out we will continue to reuse this struct but populate unknown fields +Until Ginkgo 2.0 comes out we will continue to reuse this struct but populate unkown fields with -1. */ type SuiteSummary struct { diff --git a/vendor/github.com/pborman/uuid/.travis.yml b/vendor/github.com/pborman/uuid/.travis.yml deleted file mode 100644 index 3deb4a124..000000000 --- a/vendor/github.com/pborman/uuid/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go - -go: - - "1.9" - - "1.10" - - "1.11" - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTING.md b/vendor/github.com/pborman/uuid/CONTRIBUTING.md deleted file mode 100644 index 04fdf09f1..000000000 --- a/vendor/github.com/pborman/uuid/CONTRIBUTING.md +++ /dev/null @@ -1,10 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTORS b/vendor/github.com/pborman/uuid/CONTRIBUTORS deleted file mode 100644 index b382a04ed..000000000 --- a/vendor/github.com/pborman/uuid/CONTRIBUTORS +++ /dev/null @@ -1 +0,0 @@ -Paul Borman diff --git a/vendor/github.com/pborman/uuid/README.md b/vendor/github.com/pborman/uuid/README.md deleted file mode 100644 index 810ad40dc..000000000 --- a/vendor/github.com/pborman/uuid/README.md +++ /dev/null @@ -1,15 +0,0 @@ -This project was automatically exported from code.google.com/p/go-uuid - -# uuid ![build status](https://travis-ci.org/pborman/uuid.svg?branch=master) -The uuid package generates and inspects UUIDs based on [RFC 4122](http://tools.ietf.org/html/rfc4122) and DCE 1.1: Authentication and Security Services. - -This package now leverages the github.com/google/uuid package (which is based off an earlier version of this package). - -###### Install -`go get github.com/pborman/uuid` - -###### Documentation -[![GoDoc](https://godoc.org/github.com/pborman/uuid?status.svg)](http://godoc.org/github.com/pborman/uuid) - -Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: -http://godoc.org/github.com/pborman/uuid diff --git a/vendor/github.com/pborman/uuid/dce.go b/vendor/github.com/pborman/uuid/dce.go deleted file mode 100644 index 50a0f2d09..000000000 --- a/vendor/github.com/pborman/uuid/dce.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) UUID { - uuid := NewUUID() - if uuid != nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCEPerson(Person, uint32(os.Getuid())) -func NewDCEPerson() UUID { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCEGroup(Group, uint32(os.Getgid())) -func NewDCEGroup() UUID { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID or false. -func (uuid UUID) Domain() (Domain, bool) { - if v, _ := uuid.Version(); v != 2 { - return 0, false - } - return Domain(uuid[9]), true -} - -// Id returns the id for a Version 2 UUID or false. -func (uuid UUID) Id() (uint32, bool) { - if v, _ := uuid.Version(); v != 2 { - return 0, false - } - return binary.BigEndian.Uint32(uuid[0:4]), true -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} diff --git a/vendor/github.com/pborman/uuid/doc.go b/vendor/github.com/pborman/uuid/doc.go deleted file mode 100644 index 727d76167..000000000 --- a/vendor/github.com/pborman/uuid/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The uuid package generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security -// Services. -// -// This package is a partial wrapper around the github.com/google/uuid package. -// This package represents a UUID as []byte while github.com/google/uuid -// represents a UUID as [16]byte. -package uuid diff --git a/vendor/github.com/pborman/uuid/go.mod b/vendor/github.com/pborman/uuid/go.mod deleted file mode 100644 index 099fc7de0..000000000 --- a/vendor/github.com/pborman/uuid/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/pborman/uuid - -require github.com/google/uuid v1.0.0 diff --git a/vendor/github.com/pborman/uuid/go.sum b/vendor/github.com/pborman/uuid/go.sum deleted file mode 100644 index db2574a9c..000000000 --- a/vendor/github.com/pborman/uuid/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/vendor/github.com/pborman/uuid/hash.go b/vendor/github.com/pborman/uuid/hash.go deleted file mode 100644 index a0420c1ef..000000000 --- a/vendor/github.com/pborman/uuid/hash.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known Name Space IDs and UUIDs -var ( - NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8") - NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8") - NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8") - NIL = Parse("00000000-0000-0000-0000-000000000000") -) - -// NewHash returns a new UUID derived from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space) - h.Write([]byte(data)) - s := h.Sum(nil) - uuid := make([]byte, 16) - copy(uuid, s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} diff --git a/vendor/github.com/pborman/uuid/marshal.go b/vendor/github.com/pborman/uuid/marshal.go deleted file mode 100644 index 35b89352a..000000000 --- a/vendor/github.com/pborman/uuid/marshal.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "errors" - "fmt" - - guuid "github.com/google/uuid" -) - -// MarshalText implements encoding.TextMarshaler. -func (u UUID) MarshalText() ([]byte, error) { - if len(u) != 16 { - return nil, nil - } - var js [36]byte - encodeHex(js[:], u) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (u *UUID) UnmarshalText(data []byte) error { - if len(data) == 0 { - return nil - } - id := Parse(string(data)) - if id == nil { - return errors.New("invalid UUID") - } - *u = id - return nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (u UUID) MarshalBinary() ([]byte, error) { - return u[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (u *UUID) UnmarshalBinary(data []byte) error { - if len(data) == 0 { - return nil - } - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - var id [16]byte - copy(id[:], data) - *u = id[:] - return nil -} - -// MarshalText implements encoding.TextMarshaler. -func (u Array) MarshalText() ([]byte, error) { - var js [36]byte - encodeHex(js[:], u[:]) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (u *Array) UnmarshalText(data []byte) error { - id, err := guuid.ParseBytes(data) - if err != nil { - return err - } - *u = Array(id) - return nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (u Array) MarshalBinary() ([]byte, error) { - return u[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (u *Array) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(u[:], data) - return nil -} diff --git a/vendor/github.com/pborman/uuid/node.go b/vendor/github.com/pborman/uuid/node.go deleted file mode 100644 index e524e0101..000000000 --- a/vendor/github.com/pborman/uuid/node.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - guuid "github.com/google/uuid" -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - return guuid.NodeInterface() -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - return guuid.SetNodeInterface(name) -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - return guuid.NodeID() -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - return guuid.SetNodeID(id) -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - if len(uuid) != 16 { - return nil - } - node := make([]byte, 6) - copy(node, uuid[10:]) - return node -} diff --git a/vendor/github.com/pborman/uuid/sql.go b/vendor/github.com/pborman/uuid/sql.go deleted file mode 100644 index 929c3847e..000000000 --- a/vendor/github.com/pborman/uuid/sql.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "database/sql/driver" - "errors" - "fmt" -) - -// Scan implements sql.Scanner so UUIDs can be read from databases transparently -// Currently, database types that map to string and []byte are supported. Please -// consult database-specific driver documentation for matching types. -func (uuid *UUID) Scan(src interface{}) error { - switch src.(type) { - case string: - // if an empty UUID comes from a table, we return a null UUID - if src.(string) == "" { - return nil - } - - // see uuid.Parse for required string format - parsed := Parse(src.(string)) - - if parsed == nil { - return errors.New("Scan: invalid UUID format") - } - - *uuid = parsed - case []byte: - b := src.([]byte) - - // if an empty UUID comes from a table, we return a null UUID - if len(b) == 0 { - return nil - } - - // assumes a simple slice of bytes if 16 bytes - // otherwise attempts to parse - if len(b) == 16 { - parsed := make([]byte, 16) - copy(parsed, b) - *uuid = UUID(parsed) - } else { - u := Parse(string(b)) - - if u == nil { - return errors.New("Scan: invalid UUID format") - } - - *uuid = u - } - - default: - return fmt.Errorf("Scan: unable to scan type %T into UUID", src) - } - - return nil -} - -// Value implements sql.Valuer so that UUIDs can be written to databases -// transparently. Currently, UUIDs map to strings. Please consult -// database-specific driver documentation for matching types. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.String(), nil -} diff --git a/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/pborman/uuid/time.go deleted file mode 100644 index 5c0960d87..000000000 --- a/vendor/github.com/pborman/uuid/time.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - - guuid "github.com/google/uuid" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time = guuid.Time - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// clock sequence as well as adjusting the clock sequence as needed. An error -// is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { return guuid.GetTime() } - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence a new random -// clock sequence is generated the first time a clock sequence is requested by -// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated -// for -func ClockSequence() int { return guuid.ClockSequence() } - -// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { guuid.SetClockSequence(seq) } - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. It returns false if uuid is not valid. The time is only well defined -// for version 1 and 2 UUIDs. -func (uuid UUID) Time() (Time, bool) { - if len(uuid) != 16 { - return 0, false - } - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time), true -} - -// ClockSequence returns the clock sequence encoded in uuid. It returns false -// if uuid is not valid. The clock sequence is only well defined for version 1 -// and 2 UUIDs. -func (uuid UUID) ClockSequence() (int, bool) { - if len(uuid) != 16 { - return 0, false - } - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true -} diff --git a/vendor/github.com/pborman/uuid/util.go b/vendor/github.com/pborman/uuid/util.go deleted file mode 100644 index 255b5e248..000000000 --- a/vendor/github.com/pborman/uuid/util.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts the the first two hex bytes of x into a byte. -func xtob(x string) (byte, bool) { - b1 := xvalues[x[0]] - b2 := xvalues[x[1]] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} diff --git a/vendor/github.com/pborman/uuid/uuid.go b/vendor/github.com/pborman/uuid/uuid.go deleted file mode 100644 index 337000420..000000000 --- a/vendor/github.com/pborman/uuid/uuid.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "io" - - guuid "github.com/google/uuid" -) - -// Array is a pass-by-value UUID that can be used as an effecient key in a map. -type Array [16]byte - -// UUID converts uuid into a slice. -func (uuid Array) UUID() UUID { - return uuid[:] -} - -// String returns the string representation of uuid, -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. -func (uuid Array) String() string { - return guuid.UUID(uuid).String() -} - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID []byte - -// A Version represents a UUIDs version. -type Version = guuid.Version - -// A Variant represents a UUIDs variant. -type Variant = guuid.Variant - -// Constants returned by Variant. -const ( - Invalid = guuid.Invalid // Invalid UUID - RFC4122 = guuid.RFC4122 // The variant specified in RFC4122 - Reserved = guuid.Reserved // Reserved, NCS backward compatibility. - Microsoft = guuid.Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future = guuid.Future // Reserved for future definition. -) - -var rander = rand.Reader // random function - -// New returns a new random (version 4) UUID as a string. It is a convenience -// function for NewRandom().String(). -func New() string { - return NewRandom().String() -} - -// Parse decodes s into a UUID or returns nil. See github.com/google/uuid for -// the formats parsed. -func Parse(s string) UUID { - gu, err := guuid.Parse(s) - if err == nil { - return gu[:] - } - return nil -} - -// ParseBytes is like Parse, except it parses a byte slice instead of a string. -func ParseBytes(b []byte) (UUID, error) { - gu, err := guuid.ParseBytes(b) - if err == nil { - return gu[:], nil - } - return nil, err -} - -// Equal returns true if uuid1 and uuid2 are equal. -func Equal(uuid1, uuid2 UUID) bool { - return bytes.Equal(uuid1, uuid2) -} - -// Array returns an array representation of uuid that can be used as a map key. -// Array panics if uuid is not valid. -func (uuid UUID) Array() Array { - if len(uuid) != 16 { - panic("invalid uuid") - } - var a Array - copy(a[:], uuid) - return a -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - if len(uuid) != 16 { - return "" - } - var buf [36]byte - encodeHex(buf[:], uuid) - return string(buf[:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - if len(uuid) != 16 { - return "" - } - var buf [36 + 9]byte - copy(buf[:], "urn:uuid:") - encodeHex(buf[9:], uuid) - return string(buf[:]) -} - -func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst[:], uuid[:4]) - dst[8] = '-' - hex.Encode(dst[9:13], uuid[4:6]) - dst[13] = '-' - hex.Encode(dst[14:18], uuid[6:8]) - dst[18] = '-' - hex.Encode(dst[19:23], uuid[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], uuid[10:]) -} - -// Variant returns the variant encoded in uuid. It returns Invalid if -// uuid is invalid. -func (uuid UUID) Variant() Variant { - if len(uuid) != 16 { - return Invalid - } - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } -} - -// Version returns the version of uuid. It returns false if uuid is not -// valid. -func (uuid UUID) Version() (Version, bool) { - if len(uuid) != 16 { - return 0, false - } - return Version(uuid[6] >> 4), true -} - -// SetRand sets the random number generator to r, which implements io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - guuid.SetRand(r) -} diff --git a/vendor/github.com/pborman/uuid/version1.go b/vendor/github.com/pborman/uuid/version1.go deleted file mode 100644 index 7af948da7..000000000 --- a/vendor/github.com/pborman/uuid/version1.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - guuid "github.com/google/uuid" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil. -func NewUUID() UUID { - gu, err := guuid.NewUUID() - if err == nil { - return UUID(gu[:]) - } - return nil -} diff --git a/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/pborman/uuid/version4.go deleted file mode 100644 index b459d46d1..000000000 --- a/vendor/github.com/pborman/uuid/version4.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import guuid "github.com/google/uuid" - -// Random returns a Random (Version 4) UUID or panics. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// A note about uniqueness derived from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 × 10−11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() UUID { - if gu, err := guuid.NewRandom(); err == nil { - return UUID(gu[:]) - } - return nil -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 1d034f871..e3232d79f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -19,6 +19,7 @@ import ( "sort" "strings" + "github.com/cespare/xxhash/v2" "github.com/golang/protobuf/proto" "github.com/prometheus/common/model" @@ -126,24 +127,24 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * return d } - vh := hashNew() + xxh := xxhash.New() for _, val := range labelValues { - vh = hashAdd(vh, val) - vh = hashAddByte(vh, separatorByte) + xxh.WriteString(val) + xxh.Write(separatorByteSlice) } - d.id = vh + d.id = xxh.Sum64() // Sort labelNames so that order doesn't matter for the hash. sort.Strings(labelNames) // Now hash together (in this order) the help string and the sorted // label names. - lh := hashNew() - lh = hashAdd(lh, help) - lh = hashAddByte(lh, separatorByte) + xxh.Reset() + xxh.WriteString(help) + xxh.Write(separatorByteSlice) for _, labelName := range labelNames { - lh = hashAdd(lh, labelName) - lh = hashAddByte(lh, separatorByte) + xxh.WriteString(labelName) + xxh.Write(separatorByteSlice) } - d.dimHash = lh + d.dimHash = xxh.Sum64() d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) for n, v := range constLabels { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index 71d406bd9..56d8cc209 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -273,9 +273,12 @@ type GaugeFunc interface { // NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The // value reported is determined by calling the given function from within the // Write method. Take into account that metric collection may happen -// concurrently. If that results in concurrent calls to Write, like in the case -// where a GaugeFunc is directly registered with Prometheus, the provided -// function must be concurrency-safe. +// concurrently. Therefore, it must be safe to call the provided function +// concurrently. +// +// NewGaugeFunc is a good way to create an “info” style metric with a constant +// value of 1. Example: +// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56 func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { return newValueFunc(NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index d7ea67bd2..ac2614d52 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -138,7 +138,7 @@ type HistogramOpts struct { // better covered by target labels set by the scraping Prometheus // server, or by one specific metric (e.g. a build_info or a // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels ConstLabels Labels // Buckets defines the buckets into which observations are counted. Each @@ -187,7 +187,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr desc: desc, upperBounds: opts.Buckets, labelPairs: makeLabelPairs(desc, labelValues), - counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}}, + counts: [2]*histogramCounts{{}, {}}, } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index 55e6d86d5..0df1eff88 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -18,11 +18,12 @@ import ( "time" "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" ) -const separatorByte byte = 255 +var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. // A Metric models a single sample value with its meta data being exported to // Prometheus. Implementations of Metric in this package are Gauge, Counter, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index fa535684f..d1354b101 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -62,6 +62,8 @@ func (r *responseWriterDelegator) WriteHeader(code int) { } func (r *responseWriterDelegator) Write(b []byte) (int, error) { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. if !r.wroteHeader { r.WriteHeader(http.StatusOK) } @@ -82,12 +84,19 @@ func (d closeNotifierDelegator) CloseNotify() <-chan bool { return d.ResponseWriter.(http.CloseNotifier).CloseNotify() } func (d flusherDelegator) Flush() { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } d.ResponseWriter.(http.Flusher).Flush() } func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { return d.ResponseWriter.(http.Hijacker).Hijack() } func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. if !d.wroteHeader { d.WriteHeader(http.StatusOK) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 6c32516aa..c05d6ee1b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -25,6 +25,7 @@ import ( "sync" "unicode/utf8" + "github.com/cespare/xxhash/v2" "github.com/golang/protobuf/proto" "github.com/prometheus/common/expfmt" @@ -74,7 +75,7 @@ func NewRegistry() *Registry { // NewPedanticRegistry returns a registry that checks during collection if each // collected Metric is consistent with its reported Desc, and if the Desc has // actually been registered with the registry. Unchecked Collectors (those whose -// Describe methed does not yield any descriptors) are excluded from the check. +// Describe method does not yield any descriptors) are excluded from the check. // // Usually, a Registry will be happy as long as the union of all collected // Metrics is consistent and valid even if some metrics are not consistent with @@ -266,7 +267,7 @@ func (r *Registry) Register(c Collector) error { descChan = make(chan *Desc, capDescChan) newDescIDs = map[uint64]struct{}{} newDimHashesByName = map[string]uint64{} - collectorID uint64 // Just a sum of all desc IDs. + collectorID uint64 // All desc IDs XOR'd together. duplicateDescErr error ) go func() { @@ -293,12 +294,12 @@ func (r *Registry) Register(c Collector) error { if _, exists := r.descIDs[desc.id]; exists { duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) } - // If it is not a duplicate desc in this collector, add it to + // If it is not a duplicate desc in this collector, XOR it to // the collectorID. (We allow duplicate descs within the same // collector, but their existence must be a no-op.) if _, exists := newDescIDs[desc.id]; !exists { newDescIDs[desc.id] = struct{}{} - collectorID += desc.id + collectorID ^= desc.id } // Are all the label names and the help string consistent with @@ -360,7 +361,7 @@ func (r *Registry) Unregister(c Collector) bool { var ( descChan = make(chan *Desc, capDescChan) descIDs = map[uint64]struct{}{} - collectorID uint64 // Just a sum of the desc IDs. + collectorID uint64 // All desc IDs XOR'd together. ) go func() { c.Describe(descChan) @@ -368,7 +369,7 @@ func (r *Registry) Unregister(c Collector) bool { }() for desc := range descChan { if _, exists := descIDs[desc.id]; !exists { - collectorID += desc.id + collectorID ^= desc.id descIDs[desc.id] = struct{}{} } } @@ -875,9 +876,9 @@ func checkMetricConsistency( } // Is the metric unique (i.e. no other metric with the same name and the same labels)? - h := hashNew() - h = hashAdd(h, name) - h = hashAddByte(h, separatorByte) + h := xxhash.New() + h.WriteString(name) + h.Write(separatorByteSlice) // Make sure label pairs are sorted. We depend on it for the consistency // check. if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { @@ -888,18 +889,19 @@ func checkMetricConsistency( dtoMetric.Label = copiedLabels } for _, lp := range dtoMetric.Label { - h = hashAdd(h, lp.GetName()) - h = hashAddByte(h, separatorByte) - h = hashAdd(h, lp.GetValue()) - h = hashAddByte(h, separatorByte) + h.WriteString(lp.GetName()) + h.Write(separatorByteSlice) + h.WriteString(lp.GetValue()) + h.Write(separatorByteSlice) } - if _, exists := metricHashes[h]; exists { + hSum := h.Sum64() + if _, exists := metricHashes[hSum]; exists { return fmt.Errorf( "collected metric %q { %s} was collected before with the same name and label values", name, dtoMetric, ) } - metricHashes[h] = struct{}{} + metricHashes[hSum] = struct{}{} return nil } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index c970fdee0..ae42e761a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -208,7 +208,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { s := &noObjectivesSummary{ desc: desc, labelPairs: makeLabelPairs(desc, labelValues), - counts: [2]*summaryCounts{&summaryCounts{}, &summaryCounts{}}, + counts: [2]*summaryCounts{{}, {}}, } s.init(s) // Init self-collection. return s diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 14ed9e856..19df3fe6b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -24,7 +24,7 @@ import ( // their label values. metricVec is not used directly (and therefore // unexported). It is used as a building block for implementations of vectors of // a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. -// It also handles label currying. It uses basicMetricVec internally. +// It also handles label currying. type metricVec struct { *metricMap diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 8e473d0fe..0327865ee 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -14,9 +14,10 @@ package expfmt import ( - "bytes" + "bufio" "fmt" "io" + "io/ioutil" "math" "strconv" "strings" @@ -27,7 +28,7 @@ import ( dto "github.com/prometheus/client_model/go" ) -// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer +// enhancedWriter has all the enhanced write functions needed here. bufio.Writer // implements it. type enhancedWriter interface { io.Writer @@ -37,14 +38,13 @@ type enhancedWriter interface { } const ( - initialBufSize = 512 initialNumBufSize = 24 ) var ( bufPool = sync.Pool{ New: func() interface{} { - return bytes.NewBuffer(make([]byte, 0, initialBufSize)) + return bufio.NewWriter(ioutil.Discard) }, } numBufPool = sync.Pool{ @@ -75,16 +75,14 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e } // Try the interface upgrade. If it doesn't work, we'll use a - // bytes.Buffer from the sync.Pool and write out its content to out in a - // single go in the end. + // bufio.Writer from the sync.Pool. w, ok := out.(enhancedWriter) if !ok { - b := bufPool.Get().(*bytes.Buffer) - b.Reset() + b := bufPool.Get().(*bufio.Writer) + b.Reset(out) w = b defer func() { - bWritten, bErr := out.Write(b.Bytes()) - written = bWritten + bErr := b.Flush() if err == nil { err = bErr } diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index ec3d86ba7..342e5940d 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -325,7 +325,7 @@ func (p *TextParser) startLabelValue() stateFn { // - Other labels have to be added to currentLabels for signature calculation. if p.currentMF.GetType() == dto.MetricType_SUMMARY { if p.currentLabelPair.GetName() == model.QuantileLabel { - if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) return nil @@ -337,7 +337,7 @@ func (p *TextParser) startLabelValue() stateFn { // Similar special treatment of histograms. if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { if p.currentLabelPair.GetName() == model.BucketLabel { - if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) return nil @@ -392,7 +392,7 @@ func (p *TextParser) readingValue() stateFn { if p.readTokenUntilWhitespace(); p.err != nil { return nil // Unexpected end of input. } - value, err := strconv.ParseFloat(p.currentToken.String(), 64) + value, err := parseFloat(p.currentToken.String()) if err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) @@ -755,3 +755,10 @@ func histogramMetricName(name string) string { return name } } + +func parseFloat(s string) (float64, error) { + if strings.ContainsAny(s, "pP_") { + return 0, fmt.Errorf("unsupported character in float") + } + return strconv.ParseFloat(s, 64) +} diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index 438ca92ec..7c4ce1fa8 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,6 +1,4 @@ -# Run only staticcheck for now. Additional linters will be enabled one-by-one. linters: enable: - staticcheck - govet - disable-all: true diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md index 40503edbf..943de7615 100644 --- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md +++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md @@ -2,17 +2,120 @@ Prometheus uses GitHub to manage reviews of pull requests. +* If you are a new contributor see: [Steps to Contribute](#steps-to-contribute) + * If you have a trivial fix or improvement, go ahead and create a pull request, - addressing (with `@...`) the maintainer of this repository (see + addressing (with `@...`) a suitable maintainer of this repository (see [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. * If you plan to do something more involved, first discuss your ideas on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). This will avoid unnecessary work and surely give you and us a good deal - of inspiration. + of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on. * Relevant coding style guidelines are the [Go Code Review Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) and the _Formatting and style_ section of Peter Bourgon's [Go: Best Practices for Production - Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). + Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style). + +* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works) + +## Steps to Contribute + +Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue. + +Please check the [`help-wanted`](https://github.com/prometheus/procfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community). + +For quickly compiling and testing your changes do: +``` +make test # Make sure all the tests pass before you commit and push :) +``` + +We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. + +## Pull Request Checklist + +* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes. + +* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests). + +* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)). + +* Add tests relevant to the fixed bug or new feature. + +## Dependency management + +The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.12 or greater installed. + +All dependencies are vendored in the `vendor/` directory. + +To add or update a new dependency, use the `go get` command: + +```bash +# Pick the latest tagged release. +go get example.com/some/module/pkg + +# Pick a specific version. +go get example.com/some/module/pkg@vX.Y.Z +``` + +Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory: + + +```bash +# The GO111MODULE variable can be omitted when the code isn't located in GOPATH. +GO111MODULE=on go mod tidy + +GO111MODULE=on go mod vendor +``` + +You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request. + + +## API Implementation Guidelines + +### Naming and Documentation + +Public functions and structs should normally be named according to the file(s) being read and parsed. For example, +the `fs.BuddyInfo()` function reads the file `/proc/buddyinfo`. In addition, the godoc for each public function +should contain the path to the file(s) being read and a URL of the linux kernel documentation describing the file(s). + +### Reading vs. Parsing + +Most functionality in this library consists of reading files and then parsing the text into structured data. In most +cases reading and parsing should be separated into different functions/methods with a public `fs.Thing()` method and +a private `parseThing(r Reader)` function. This provides a logical separation and allows parsing to be tested +directly without the need to read from the filesystem. Using a `Reader` argument is preferred over other data types +such as `string` or `*File` because it provides the most flexibility regarding the data source. When a set of files +in a directory needs to be parsed, then a `path` string parameter to the parse function can be used instead. + +### /proc and /sys filesystem I/O + +The `proc` and `sys` filesystems are pseudo file systems and work a bit differently from standard disk I/O. +Many of the files are changing continuously and the data being read can in some cases change between subsequent +reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls +to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the +full file in a single operation using an internal utility function called `util.ReadFileNoStat`. +This function is similar to `ioutil.ReadFile`, but it avoids the system call to `stat` to get the current size of +the file. + +Note that parsing the file's contents can still be performed one line at a time. This is done by first reading +the full file, and then using a scanner on the `[]byte` or `string` containing the data. + +``` + data, err := util.ReadFileNoStat("/proc/cpuinfo") + if err != nil { + return err + } + reader := bytes.NewReader(data) + scanner := bufio.NewScanner(reader) +``` + +The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files +can be read using an internal function called `util.SysReadFile` which is similar to `ioutil.ReadFile` but does +not bother to check the size of the file before reading. +``` + data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity") +``` + diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md index 6f8850feb..55d1e3261 100644 --- a/vendor/github.com/prometheus/procfs/README.md +++ b/vendor/github.com/prometheus/procfs/README.md @@ -1,6 +1,6 @@ # procfs -This procfs package provides functions to retrieve system, kernel and process +This package provides functions to retrieve system, kernel, and process metrics from the pseudo-filesystems /proc and /sys. *WARNING*: This package is a work in progress. Its API may still break in @@ -13,7 +13,8 @@ backwards-incompatible ways without warnings. Use it at your own risk. ## Usage The procfs library is organized by packages based on whether the gathered data is coming from -/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, /sys, or both. For example, current cpu statistics are gathered from +/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, +/sys, or both. For example, cpu statistics are gathered from `/proc/stat` and are available via the root procfs package. First, the proc filesystem mount point is initialized, and then the stat information is read. @@ -29,10 +30,17 @@ Some sub-packages such as `blockdevice`, require access to both the proc and sys stats, err := fs.ProcDiskstats() ``` +## Package Organization + +The packages in this project are organized according to (1) whether the data comes from the `/proc` or +`/sys` filesystem and (2) the type of information being retrieved. For example, most process information +can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives +is available in the `blockdevices` sub-package. + ## Building and Testing -The procfs library is normally built as part of another application. However, when making -changes to the library, the `make test` command can be used to run the API test suite. +The procfs library is intended to be built as part of another application, so there are no distributable binaries. +However, most of the API includes unit tests which can be run with `make test`. ### Updating Test Fixtures diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go new file mode 100644 index 000000000..916c9182a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -0,0 +1,85 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "net" + "strings" +) + +// ARPEntry contains a single row of the columnar data represented in +// /proc/net/arp. +type ARPEntry struct { + // IP address + IPAddr net.IP + // MAC address + HWAddr net.HardwareAddr + // Name of the device + Device string +} + +// GatherARPEntries retrieves all the ARP entries, parse the relevant columns, +// and then return a slice of ARPEntry's. +func (fs FS) GatherARPEntries() ([]ARPEntry, error) { + data, err := ioutil.ReadFile(fs.proc.Path("net/arp")) + if err != nil { + return nil, fmt.Errorf("error reading arp %s: %s", fs.proc.Path("net/arp"), err) + } + + return parseARPEntries(data) +} + +func parseARPEntries(data []byte) ([]ARPEntry, error) { + lines := strings.Split(string(data), "\n") + entries := make([]ARPEntry, 0) + var err error + const ( + expectedDataWidth = 6 + expectedHeaderWidth = 9 + ) + for _, line := range lines { + columns := strings.Fields(line) + width := len(columns) + + if width == expectedHeaderWidth || width == 0 { + continue + } else if width == expectedDataWidth { + entry, err := parseARPEntry(columns) + if err != nil { + return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %s", err) + } + entries = append(entries, entry) + } else { + return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth) + } + + } + + return entries, err +} + +func parseARPEntry(columns []string) (ARPEntry, error) { + ip := net.ParseIP(columns[0]) + mac := net.HardwareAddr(columns[3]) + + entry := ARPEntry{ + IPAddr: ip, + HWAddr: mac, + Device: columns[5], + } + + return entry, nil +} diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go index 63d4229a4..10bd067a0 100644 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -31,7 +31,7 @@ type BuddyInfo struct { Sizes []float64 } -// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. +// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. func (fs FS) BuddyInfo() ([]BuddyInfo, error) { file, err := os.Open(fs.proc.Path("buddyinfo")) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go new file mode 100644 index 000000000..2e0221552 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -0,0 +1,167 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// CPUInfo contains general information about a system CPU found in /proc/cpuinfo +type CPUInfo struct { + Processor uint + VendorID string + CPUFamily string + Model string + ModelName string + Stepping string + Microcode string + CPUMHz float64 + CacheSize string + PhysicalID string + Siblings uint + CoreID string + CPUCores uint + APICID string + InitialAPICID string + FPU string + FPUException string + CPUIDLevel uint + WP string + Flags []string + Bugs []string + BogoMips float64 + CLFlushSize uint + CacheAlignment uint + AddressSizes string + PowerManagement string +} + +// CPUInfo returns information about current system CPUs. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) CPUInfo() ([]CPUInfo, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo")) + if err != nil { + return nil, err + } + return parseCPUInfo(data) +} + +// parseCPUInfo parses data from /proc/cpuinfo +func parseCPUInfo(info []byte) ([]CPUInfo, error) { + cpuinfo := []CPUInfo{} + i := -1 + scanner := bufio.NewScanner(bytes.NewReader(info)) + for scanner.Scan() { + line := scanner.Text() + if strings.TrimSpace(line) == "" { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "vendor_id": + cpuinfo[i].VendorID = field[1] + case "cpu family": + cpuinfo[i].CPUFamily = field[1] + case "model": + cpuinfo[i].Model = field[1] + case "model name": + cpuinfo[i].ModelName = field[1] + case "stepping": + cpuinfo[i].Stepping = field[1] + case "microcode": + cpuinfo[i].Microcode = field[1] + case "cpu MHz": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + case "cache size": + cpuinfo[i].CacheSize = field[1] + case "physical id": + cpuinfo[i].PhysicalID = field[1] + case "siblings": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Siblings = uint(v) + case "core id": + cpuinfo[i].CoreID = field[1] + case "cpu cores": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CPUCores = uint(v) + case "apicid": + cpuinfo[i].APICID = field[1] + case "initial apicid": + cpuinfo[i].InitialAPICID = field[1] + case "fpu": + cpuinfo[i].FPU = field[1] + case "fpu_exception": + cpuinfo[i].FPUException = field[1] + case "cpuid level": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CPUIDLevel = uint(v) + case "wp": + cpuinfo[i].WP = field[1] + case "flags": + cpuinfo[i].Flags = strings.Fields(field[1]) + case "bugs": + cpuinfo[i].Bugs = strings.Fields(field[1]) + case "bogomips": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + case "clflush size": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CLFlushSize = uint(v) + case "cache_alignment": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CacheAlignment = uint(v) + case "address sizes": + cpuinfo[i].AddressSizes = field[1] + case "power management": + cpuinfo[i].PowerManagement = field[1] + } + } + return cpuinfo, nil + +} diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go new file mode 100644 index 000000000..19d4041b2 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -0,0 +1,131 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Crypto holds info parsed from /proc/crypto. +type Crypto struct { + Alignmask *uint64 + Async bool + Blocksize *uint64 + Chunksize *uint64 + Ctxsize *uint64 + Digestsize *uint64 + Driver string + Geniv string + Internal string + Ivsize *uint64 + Maxauthsize *uint64 + MaxKeysize *uint64 + MinKeysize *uint64 + Module string + Name string + Priority *int64 + Refcnt *int64 + Seedsize *uint64 + Selftest string + Type string + Walksize *uint64 +} + +// Crypto parses an crypto-file (/proc/crypto) and returns a slice of +// structs containing the relevant info. More information available here: +// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html +func (fs FS) Crypto() ([]Crypto, error) { + data, err := ioutil.ReadFile(fs.proc.Path("crypto")) + if err != nil { + return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err) + } + crypto, err := parseCrypto(data) + if err != nil { + return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err) + } + return crypto, nil +} + +func parseCrypto(cryptoData []byte) ([]Crypto, error) { + crypto := []Crypto{} + + cryptoBlocks := bytes.Split(cryptoData, []byte("\n\n")) + + for _, block := range cryptoBlocks { + var newCryptoElem Crypto + + lines := strings.Split(string(block), "\n") + for _, line := range lines { + if strings.TrimSpace(line) == "" || line[0] == ' ' { + continue + } + fields := strings.Split(line, ":") + key := strings.TrimSpace(fields[0]) + value := strings.TrimSpace(fields[1]) + vp := util.NewValueParser(value) + + switch strings.TrimSpace(key) { + case "async": + b, err := strconv.ParseBool(value) + if err == nil { + newCryptoElem.Async = b + } + case "blocksize": + newCryptoElem.Blocksize = vp.PUInt64() + case "chunksize": + newCryptoElem.Chunksize = vp.PUInt64() + case "digestsize": + newCryptoElem.Digestsize = vp.PUInt64() + case "driver": + newCryptoElem.Driver = value + case "geniv": + newCryptoElem.Geniv = value + case "internal": + newCryptoElem.Internal = value + case "ivsize": + newCryptoElem.Ivsize = vp.PUInt64() + case "maxauthsize": + newCryptoElem.Maxauthsize = vp.PUInt64() + case "max keysize": + newCryptoElem.MaxKeysize = vp.PUInt64() + case "min keysize": + newCryptoElem.MinKeysize = vp.PUInt64() + case "module": + newCryptoElem.Module = value + case "name": + newCryptoElem.Name = value + case "priority": + newCryptoElem.Priority = vp.PInt64() + case "refcnt": + newCryptoElem.Refcnt = vp.PInt64() + case "seedsize": + newCryptoElem.Seedsize = vp.PUInt64() + case "selftest": + newCryptoElem.Selftest = value + case "type": + newCryptoElem.Type = value + case "walksize": + newCryptoElem.Walksize = vp.PUInt64() + } + } + crypto = append(crypto, newCryptoElem) + } + return crypto, nil +} diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar index 6b42e7ba1..c50a18ace 100644 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ b/vendor/github.com/prometheus/procfs/fixtures.ttar @@ -47,6 +47,48 @@ SymlinkTo: ../../symlinktargets/ghi Path: fixtures/proc/26231/fd/3 SymlinkTo: ../../symlinktargets/uvw # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26231/fdinfo +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fdinfo/0 +Lines: 6 +pos: 0 +flags: 02004000 +mnt_id: 13 +inotify wd:3 ino:1 sdev:34 mask:fce ignored_mask:0 fhandle-bytes:c fhandle-type:81 f_handle:000000000100000000000000 +inotify wd:2 ino:1300016 sdev:fd00002 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:16003001ed3f022a +inotify wd:1 ino:2e0001 sdev:fd00000 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:01002e00138e7c65 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fdinfo/1 +Lines: 4 +pos: 0 +flags: 02004002 +mnt_id: 13 +eventfd-count: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fdinfo/10 +Lines: 3 +pos: 0 +flags: 02004002 +mnt_id: 9 +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fdinfo/2 +Lines: 3 +pos: 0 +flags: 02004002 +mnt_id: 9 +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fdinfo/3 +Lines: 3 +pos: 0 +flags: 02004002 +mnt_id: 9 +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/26231/io Lines: 7 rchar: 750339 @@ -126,6 +168,11 @@ SymlinkTo: net:[4026531993] Path: fixtures/proc/26231/root SymlinkTo: / # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/schedstat +Lines: 1 +411605849 93680043 79 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/26231/stat Lines: 1 26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 @@ -137,10 +184,10 @@ Lines: 53 Name: prometheus Umask: 0022 State: S (sleeping) -Tgid: 1 +Tgid: 26231 Ngid: 0 -Pid: 1 -PPid: 0 +Pid: 26231 +PPid: 1 TracerPid: 0 Uid: 0 0 0 0 Gid: 0 0 0 0 @@ -258,6 +305,18 @@ Lines: 1 com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26233/schedstat +Lines: 8 + ____________________________________ +< this is a malformed schedstat file > + ------------------------------------ + \ ^__^ + \ (oo)\_______ + (__)\ )\/\ + ||----w | + || || +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/584 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -274,6 +333,1201 @@ Node 0, zone DMA32 759 572 791 475 194 45 12 0 Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/cpuinfo +Lines: 216 +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 799.998 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 0 +cpu cores : 4 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.037 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 1 +cpu cores : 4 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.010 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 2 +cpu cores : 4 +apicid : 4 +initial apicid : 4 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.028 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 3 +cpu cores : 4 +apicid : 6 +initial apicid : 6 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 4 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 799.989 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 0 +cpu cores : 4 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 5 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.083 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 1 +cpu cores : 4 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 6 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.017 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 2 +cpu cores : 4 +apicid : 5 +initial apicid : 5 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 7 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.030 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 3 +cpu cores : 4 +apicid : 7 +initial apicid : 7 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/crypto +Lines: 971 +name : ccm(aes) +driver : ccm_base(ctr(aes-aesni),cbcmac(aes-aesni)) +module : ccm +priority : 300 +refcnt : 4 +selftest : passed +internal : no +type : aead +async : no +blocksize : 1 +ivsize : 16 +maxauthsize : 16 +geniv : + +name : cbcmac(aes) +driver : cbcmac(aes-aesni) +module : ccm +priority : 300 +refcnt : 7 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 16 + +name : ecdh +driver : ecdh-generic +module : ecdh_generic +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : kpp + +name : ecb(arc4) +driver : ecb(arc4)-generic +module : arc4 +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : no +blocksize : 1 +min keysize : 1 +max keysize : 256 +ivsize : 0 +chunksize : 1 +walksize : 1 + +name : arc4 +driver : arc4-generic +module : arc4 +priority : 0 +refcnt : 3 +selftest : passed +internal : no +type : cipher +blocksize : 1 +min keysize : 1 +max keysize : 256 + +name : crct10dif +driver : crct10dif-pclmul +module : crct10dif_pclmul +priority : 200 +refcnt : 2 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 2 + +name : crc32 +driver : crc32-pclmul +module : crc32_pclmul +priority : 200 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 4 + +name : __ghash +driver : cryptd(__ghash-pclmulqdqni) +module : kernel +priority : 50 +refcnt : 1 +selftest : passed +internal : yes +type : ahash +async : yes +blocksize : 16 +digestsize : 16 + +name : ghash +driver : ghash-clmulni +module : ghash_clmulni_intel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : ahash +async : yes +blocksize : 16 +digestsize : 16 + +name : __ghash +driver : __ghash-pclmulqdqni +module : ghash_clmulni_intel +priority : 0 +refcnt : 1 +selftest : passed +internal : yes +type : shash +blocksize : 16 +digestsize : 16 + +name : crc32c +driver : crc32c-intel +module : crc32c_intel +priority : 200 +refcnt : 5 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 4 + +name : cbc(aes) +driver : cbc(aes-aesni) +module : kernel +priority : 300 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : no +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : ctr(aes) +driver : ctr(aes-aesni) +module : kernel +priority : 300 +refcnt : 5 +selftest : passed +internal : no +type : skcipher +async : no +blocksize : 1 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : pkcs1pad(rsa,sha256) +driver : pkcs1pad(rsa-generic,sha256) +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : akcipher + +name : __xts(aes) +driver : cryptd(__xts-aes-aesni) +module : kernel +priority : 451 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : yes +blocksize : 16 +min keysize : 32 +max keysize : 64 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : xts(aes) +driver : xts-aes-aesni +module : kernel +priority : 401 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : yes +blocksize : 16 +min keysize : 32 +max keysize : 64 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __ctr(aes) +driver : cryptd(__ctr-aes-aesni) +module : kernel +priority : 450 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : yes +blocksize : 1 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : ctr(aes) +driver : ctr-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : yes +blocksize : 1 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __cbc(aes) +driver : cryptd(__cbc-aes-aesni) +module : kernel +priority : 450 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : yes +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : cbc(aes) +driver : cbc-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : yes +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __ecb(aes) +driver : cryptd(__ecb-aes-aesni) +module : kernel +priority : 450 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : yes +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 0 +chunksize : 16 +walksize : 16 + +name : ecb(aes) +driver : ecb-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : yes +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 0 +chunksize : 16 +walksize : 16 + +name : __generic-gcm-aes-aesni +driver : cryptd(__driver-generic-gcm-aes-aesni) +module : kernel +priority : 50 +refcnt : 1 +selftest : passed +internal : yes +type : aead +async : yes +blocksize : 1 +ivsize : 12 +maxauthsize : 16 +geniv : + +name : gcm(aes) +driver : generic-gcm-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : aead +async : yes +blocksize : 1 +ivsize : 12 +maxauthsize : 16 +geniv : + +name : __generic-gcm-aes-aesni +driver : __driver-generic-gcm-aes-aesni +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : yes +type : aead +async : no +blocksize : 1 +ivsize : 12 +maxauthsize : 16 +geniv : + +name : __gcm-aes-aesni +driver : cryptd(__driver-gcm-aes-aesni) +module : kernel +priority : 50 +refcnt : 1 +selftest : passed +internal : yes +type : aead +async : yes +blocksize : 1 +ivsize : 8 +maxauthsize : 16 +geniv : + +name : rfc4106(gcm(aes)) +driver : rfc4106-gcm-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : aead +async : yes +blocksize : 1 +ivsize : 8 +maxauthsize : 16 +geniv : + +name : __gcm-aes-aesni +driver : __driver-gcm-aes-aesni +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : yes +type : aead +async : no +blocksize : 1 +ivsize : 8 +maxauthsize : 16 +geniv : + +name : __xts(aes) +driver : __xts-aes-aesni +module : kernel +priority : 401 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : no +blocksize : 16 +min keysize : 32 +max keysize : 64 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __ctr(aes) +driver : __ctr-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : no +blocksize : 1 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __cbc(aes) +driver : __cbc-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : no +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __ecb(aes) +driver : __ecb-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : no +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 0 +chunksize : 16 +walksize : 16 + +name : __aes +driver : __aes-aesni +module : kernel +priority : 300 +refcnt : 1 +selftest : passed +internal : yes +type : cipher +blocksize : 16 +min keysize : 16 +max keysize : 32 + +name : aes +driver : aes-aesni +module : kernel +priority : 300 +refcnt : 8 +selftest : passed +internal : no +type : cipher +blocksize : 16 +min keysize : 16 +max keysize : 32 + +name : hmac(sha1) +driver : hmac(sha1-generic) +module : kernel +priority : 100 +refcnt : 9 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 20 + +name : ghash +driver : ghash-generic +module : kernel +priority : 100 +refcnt : 3 +selftest : passed +internal : no +type : shash +blocksize : 16 +digestsize : 16 + +name : jitterentropy_rng +driver : jitterentropy_rng +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_hmac_sha256 +module : kernel +priority : 221 +refcnt : 2 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_hmac_sha512 +module : kernel +priority : 220 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_hmac_sha384 +module : kernel +priority : 219 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_hmac_sha1 +module : kernel +priority : 218 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_sha256 +module : kernel +priority : 217 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_sha512 +module : kernel +priority : 216 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_sha384 +module : kernel +priority : 215 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_sha1 +module : kernel +priority : 214 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_ctr_aes256 +module : kernel +priority : 213 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_ctr_aes192 +module : kernel +priority : 212 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_ctr_aes128 +module : kernel +priority : 211 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : hmac(sha256) +driver : hmac(sha256-generic) +module : kernel +priority : 100 +refcnt : 10 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 32 + +name : stdrng +driver : drbg_pr_hmac_sha256 +module : kernel +priority : 210 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_hmac_sha512 +module : kernel +priority : 209 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_hmac_sha384 +module : kernel +priority : 208 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_hmac_sha1 +module : kernel +priority : 207 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_sha256 +module : kernel +priority : 206 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_sha512 +module : kernel +priority : 205 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_sha384 +module : kernel +priority : 204 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_sha1 +module : kernel +priority : 203 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_ctr_aes256 +module : kernel +priority : 202 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_ctr_aes192 +module : kernel +priority : 201 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_ctr_aes128 +module : kernel +priority : 200 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : 842 +driver : 842-scomp +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : scomp + +name : 842 +driver : 842-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : compression + +name : lzo-rle +driver : lzo-rle-scomp +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : scomp + +name : lzo-rle +driver : lzo-rle-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : compression + +name : lzo +driver : lzo-scomp +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : scomp + +name : lzo +driver : lzo-generic +module : kernel +priority : 0 +refcnt : 9 +selftest : passed +internal : no +type : compression + +name : crct10dif +driver : crct10dif-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 2 + +name : crc32c +driver : crc32c-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 4 + +name : zlib-deflate +driver : zlib-deflate-scomp +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : scomp + +name : deflate +driver : deflate-scomp +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : scomp + +name : deflate +driver : deflate-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : compression + +name : aes +driver : aes-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : cipher +blocksize : 16 +min keysize : 16 +max keysize : 32 + +name : sha224 +driver : sha224-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 28 + +name : sha256 +driver : sha256-generic +module : kernel +priority : 100 +refcnt : 11 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 32 + +name : sha1 +driver : sha1-generic +module : kernel +priority : 100 +refcnt : 11 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 20 + +name : md5 +driver : md5-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 16 + +name : ecb(cipher_null) +driver : ecb-cipher_null +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : no +blocksize : 1 +min keysize : 0 +max keysize : 0 +ivsize : 0 +chunksize : 1 +walksize : 1 + +name : digest_null +driver : digest_null-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 0 + +name : compress_null +driver : compress_null-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : compression + +name : cipher_null +driver : cipher_null-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : cipher +blocksize : 1 +min keysize : 0 +max keysize : 0 + +name : rsa +driver : rsa-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : akcipher + +name : dh +driver : dh-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : kpp + +name : aes +driver : aes-asm +module : kernel +priority : 200 +refcnt : 1 +selftest : passed +internal : no +type : cipher +blocksize : 16 +min keysize : 16 +max keysize : 32 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/diskstats Lines: 49 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 @@ -420,9 +1674,61 @@ md101 : active (read-only) raid0 sdb[2] sdd[1] sdc[0] unused devices: Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/meminfo +Lines: 42 +MemTotal: 15666184 kB +MemFree: 440324 kB +Buffers: 1020128 kB +Cached: 12007640 kB +SwapCached: 0 kB +Active: 6761276 kB +Inactive: 6532708 kB +Active(anon): 267256 kB +Inactive(anon): 268 kB +Active(file): 6494020 kB +Inactive(file): 6532440 kB +Unevictable: 0 kB +Mlocked: 0 kB +SwapTotal: 0 kB +SwapFree: 0 kB +Dirty: 768 kB +Writeback: 0 kB +AnonPages: 266216 kB +Mapped: 44204 kB +Shmem: 1308 kB +Slab: 1807264 kB +SReclaimable: 1738124 kB +SUnreclaim: 69140 kB +KernelStack: 1616 kB +PageTables: 5288 kB +NFS_Unstable: 0 kB +Bounce: 0 kB +WritebackTmp: 0 kB +CommitLimit: 7833092 kB +Committed_AS: 530844 kB +VmallocTotal: 34359738367 kB +VmallocUsed: 36596 kB +VmallocChunk: 34359637840 kB +HardwareCorrupted: 0 kB +AnonHugePages: 12288 kB +HugePages_Total: 0 +HugePages_Free: 0 +HugePages_Rsvd: 0 +HugePages_Surp: 0 +Hugepagesize: 2048 kB +DirectMap4k: 91136 kB +DirectMap2M: 16039936 kB +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/net Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/arp +Lines: 2 +IP address HW type Flags HW address Mask Device +192.168.224.1 0x1 0x2 00:50:56:c0:00:08 * ens33 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/net/dev Lines: 6 Inter-| Receive | Transmit @@ -495,6 +1801,30 @@ proc4 2 2 10853 proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/sockstat +Lines: 6 +sockets: used 1602 +TCP: inuse 35 orphan 0 tw 4 alloc 59 mem 22 +UDP: inuse 12 mem 62 +UDPLITE: inuse 0 +RAW: inuse 0 +FRAG: inuse 0 memory 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/sockstat6 +Lines: 5 +TCP6: inuse 17 +UDP6: inuse 9 +UDPLITE6: inuse 0 +RAW6: inuse 1 +FRAG6: inuse 0 memory 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/softnet_stat +Lines: 1 +00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/net/unix Lines: 6 Num RefCount Protocol Flags Type St Inode Path @@ -567,6 +1897,16 @@ some avg10=0.10 avg60=2.00 avg300=3.85 total=15 full avg10=0.20 avg60=3.00 avg300=4.95 total=25 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/schedstat +Lines: 6 +version 15 +timestamp 15819019232 +cpu0 498494191 0 3533438552 2553969831 3853684107 2465731542 2045936778163039 343796328169361 4767485306 +domain0 00000000,00000003 212499247 210112015 1861015 1860405436 536440 369895 32599 210079416 25368550 24241256 384652 927363878 807233 6366 1647 24239609 2122447165 1886868564 121112060 2848625533 125678146 241025 1032026 1885836538 2545 12 2533 0 0 0 0 0 0 1387952561 21076581 0 +cpu1 518377256 0 4155211005 2778589869 10466382 2867629021 1904686152592476 364107263788241 5145567945 +domain0 00000000,00000003 217653037 215526982 1577949 1580427380 557469 393576 28538 215498444 28721913 27662819 371153 870843407 745912 5523 1639 27661180 2331056874 2107732788 111442342 652402556 123615235 196159 1045245 2106687543 2400 3 2397 0 0 0 0 0 0 1437804657 26220076 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/self SymlinkTo: 26231 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -619,245 +1959,732 @@ Path: fixtures/proc/symlinktargets/xyz Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/dm-0 +Directory: fixtures/proc/sys Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/dm-0/stat -Lines: 1 -6447303 0 710266738 1529043 953216 0 31201176 4557464 0 796160 6088971 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda +Directory: fixtures/proc/sys/vm Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/stat +Path: fixtures/proc/sys/vm/admin_reserve_kbytes Lines: 1 -9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0 -Mode: 755 +8192 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/board_id +Path: fixtures/proc/sys/vm/block_dump Lines: 1 -SM_1141000001000 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/fw_ver +Path: fixtures/proc/sys/vm/compact_unevictable_allowed Lines: 1 -2.31.5050 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/hca_type +Path: fixtures/proc/sys/vm/dirty_background_bytes Lines: 1 -MT4099 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors +Path: fixtures/proc/sys/vm/dirty_background_ratio Lines: 1 -0 +10 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_downed +Path: fixtures/proc/sys/vm/dirty_bytes Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery +Path: fixtures/proc/sys/vm/dirty_expire_centisecs Lines: 1 -0 +3000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/local_link_integrity_errors +Path: fixtures/proc/sys/vm/dirty_ratio Lines: 1 -0 +20 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors +Path: fixtures/proc/sys/vm/dirty_writeback_centisecs Lines: 1 -0 +500 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data +Path: fixtures/proc/sys/vm/dirtytime_expire_seconds Lines: 1 -2221223609 +43200 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors +Path: fixtures/proc/sys/vm/drop_caches Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets +Path: fixtures/proc/sys/vm/extfrag_threshold Lines: 1 -87169372 +500 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_remote_physical_errors +Path: fixtures/proc/sys/vm/hugetlb_shm_group Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_switch_relay_errors +Path: fixtures/proc/sys/vm/laptop_mode Lines: 1 -0 +5 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors +Path: fixtures/proc/sys/vm/legacy_va_layout Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data +Path: fixtures/proc/sys/vm/lowmem_reserve_ratio Lines: 1 -26509113295 +256 256 32 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards +Path: fixtures/proc/sys/vm/max_map_count Lines: 1 -0 +65530 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets +Path: fixtures/proc/sys/vm/memory_failure_early_kill Lines: 1 -85734114 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait +Path: fixtures/proc/sys/vm/memory_failure_recovery Lines: 1 -3599 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/symbol_error +Path: fixtures/proc/sys/vm/min_free_kbytes Lines: 1 -0 +67584 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/phys_state +Path: fixtures/proc/sys/vm/min_slab_ratio Lines: 1 -5: LinkUp +5 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/rate +Path: fixtures/proc/sys/vm/min_unmapped_ratio Lines: 1 -40 Gb/sec (4X QDR) +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/state +Path: fixtures/proc/sys/vm/mmap_min_addr Lines: 1 -4: ACTIVE +65536 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors +Path: fixtures/proc/sys/vm/nr_hugepages Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_downed +Path: fixtures/proc/sys/vm/nr_hugepages_mempolicy Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_error_recovery +Path: fixtures/proc/sys/vm/nr_overcommit_hugepages Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/local_link_integrity_errors +Path: fixtures/proc/sys/vm/numa_stat Lines: 1 -0 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_constraint_errors +Path: fixtures/proc/sys/vm/numa_zonelist_order Lines: 1 -0 +Node Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_data +Path: fixtures/proc/sys/vm/oom_dump_tasks Lines: 1 -2460436784 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_errors +Path: fixtures/proc/sys/vm/oom_kill_allocating_task Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_packets +Path: fixtures/proc/sys/vm/overcommit_kbytes Lines: 1 -89332064 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_remote_physical_errors +Path: fixtures/proc/sys/vm/overcommit_memory Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_switch_relay_errors +Path: fixtures/proc/sys/vm/overcommit_ratio Lines: 1 -0 +50 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_constraint_errors +Path: fixtures/proc/sys/vm/page-cluster Lines: 1 -0 +3 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_data +Path: fixtures/proc/sys/vm/panic_on_oom Lines: 1 -26540356890 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_discards +Path: fixtures/proc/sys/vm/percpu_pagelist_fraction Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_packets +Path: fixtures/proc/sys/vm/stat_interval Lines: 1 -88622850 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_wait +Path: fixtures/proc/sys/vm/swappiness Lines: 1 -3846 +60 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/symbol_error +Path: fixtures/proc/sys/vm/user_reserve_kbytes Lines: 1 -0 +131072 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/phys_state +Path: fixtures/proc/sys/vm/vfs_cache_pressure Lines: 1 -5: LinkUp +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/watermark_boost_factor +Lines: 1 +15000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/watermark_scale_factor +Lines: 1 +10 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/zone_reclaim_mode +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/zoneinfo +Lines: 262 +Node 0, zone DMA + per-node stats + nr_inactive_anon 230981 + nr_active_anon 547580 + nr_inactive_file 316904 + nr_active_file 346282 + nr_unevictable 115467 + nr_slab_reclaimable 131220 + nr_slab_unreclaimable 47320 + nr_isolated_anon 0 + nr_isolated_file 0 + workingset_nodes 11627 + workingset_refault 466886 + workingset_activate 276925 + workingset_restore 84055 + workingset_nodereclaim 487 + nr_anon_pages 795576 + nr_mapped 215483 + nr_file_pages 761874 + nr_dirty 908 + nr_writeback 0 + nr_writeback_temp 0 + nr_shmem 224925 + nr_shmem_hugepages 0 + nr_shmem_pmdmapped 0 + nr_anon_transparent_hugepages 0 + nr_unstable 0 + nr_vmscan_write 12950 + nr_vmscan_immediate_reclaim 3033 + nr_dirtied 8007423 + nr_written 7752121 + nr_kernel_misc_reclaimable 0 + pages free 3952 + min 33 + low 41 + high 49 + spanned 4095 + present 3975 + managed 3956 + protection: (0, 2877, 7826, 7826, 7826) + nr_free_pages 3952 + nr_zone_inactive_anon 0 + nr_zone_active_anon 0 + nr_zone_inactive_file 0 + nr_zone_active_file 0 + nr_zone_unevictable 0 + nr_zone_write_pending 0 + nr_mlock 0 + nr_page_table_pages 0 + nr_kernel_stack 0 + nr_bounce 0 + nr_zspages 0 + nr_free_cma 0 + numa_hit 1 + numa_miss 0 + numa_foreign 0 + numa_interleave 0 + numa_local 1 + numa_other 0 + pagesets + cpu: 0 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 1 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 2 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 3 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 4 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 5 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 6 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 7 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + node_unreclaimable: 0 + start_pfn: 1 +Node 0, zone DMA32 + pages free 204252 + min 19510 + low 21059 + high 22608 + spanned 1044480 + present 759231 + managed 742806 + protection: (0, 0, 4949, 4949, 4949) + nr_free_pages 204252 + nr_zone_inactive_anon 118558 + nr_zone_active_anon 106598 + nr_zone_inactive_file 75475 + nr_zone_active_file 70293 + nr_zone_unevictable 66195 + nr_zone_write_pending 64 + nr_mlock 4 + nr_page_table_pages 1756 + nr_kernel_stack 2208 + nr_bounce 0 + nr_zspages 0 + nr_free_cma 0 + numa_hit 113952967 + numa_miss 0 + numa_foreign 0 + numa_interleave 0 + numa_local 113952967 + numa_other 0 + pagesets + cpu: 0 + count: 345 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 1 + count: 356 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 2 + count: 325 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 3 + count: 346 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 4 + count: 321 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 5 + count: 316 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 6 + count: 373 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 7 + count: 339 + high: 378 + batch: 63 + vm stats threshold: 48 + node_unreclaimable: 0 + start_pfn: 4096 +Node 0, zone Normal + pages free 18553 + min 11176 + low 13842 + high 16508 + spanned 1308160 + present 1308160 + managed 1268711 + protection: (0, 0, 0, 0, 0) + nr_free_pages 18553 + nr_zone_inactive_anon 112423 + nr_zone_active_anon 440982 + nr_zone_inactive_file 241429 + nr_zone_active_file 275989 + nr_zone_unevictable 49272 + nr_zone_write_pending 844 + nr_mlock 154 + nr_page_table_pages 9750 + nr_kernel_stack 15136 + nr_bounce 0 + nr_zspages 0 + nr_free_cma 0 + numa_hit 162718019 + numa_miss 0 + numa_foreign 0 + numa_interleave 26812 + numa_local 162718019 + numa_other 0 + pagesets + cpu: 0 + count: 316 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 1 + count: 366 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 2 + count: 60 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 3 + count: 256 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 4 + count: 253 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 5 + count: 159 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 6 + count: 311 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 7 + count: 264 + high: 378 + batch: 63 + vm stats threshold: 56 + node_unreclaimable: 0 + start_pfn: 1048576 +Node 0, zone Movable + pages free 0 + min 0 + low 0 + high 0 + spanned 0 + present 0 + managed 0 + protection: (0, 0, 0, 0, 0) +Node 0, zone Device + pages free 0 + min 0 + low 0 + high 0 + spanned 0 + present 0 + managed 0 + protection: (0, 0, 0, 0, 0) +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/dm-0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/dm-0/stat +Lines: 1 +6447303 0 710266738 1529043 953216 0 31201176 4557464 0 796160 6088971 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/sda +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/stat +Lines: 1 +9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/board_id +Lines: 1 +SM_1141000001000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/fw_ver +Lines: 1 +2.31.5050 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/hca_type +Lines: 1 +MT4099 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_downed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/local_link_integrity_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data +Lines: 1 +2221223609 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets +Lines: 1 +87169372 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_remote_physical_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_switch_relay_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data +Lines: 1 +26509113295 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets +Lines: 1 +85734114 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait +Lines: 1 +3599 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/symbol_error +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/phys_state +Lines: 1 +5: LinkUp +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/rate +Lines: 1 +40 Gb/sec (4X QDR) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/state +Lines: 1 +4: ACTIVE +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_downed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_error_recovery +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/local_link_integrity_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_data +Lines: 1 +2460436784 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_packets +Lines: 1 +89332064 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_remote_physical_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_switch_relay_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_data +Lines: 1 +26540356890 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_discards +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_packets +Lines: 1 +88622850 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_wait +Lines: 1 +3846 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/symbol_error +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/phys_state +Lines: 1 +5: LinkUp Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/rate @@ -921,6 +2748,9 @@ Lines: 1 0x20 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/device +SymlinkTo: ../../../devices/pci0000:00/0000:00:1f.6/ +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/sys/class/net/eth0/dormant Lines: 1 1 @@ -1005,146 +2835,179 @@ Mode: 644 Directory: fixtures/sys/class/power_supply Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/power_supply/AC +Path: fixtures/sys/class/power_supply/AC +SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0 +SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/powercap Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/AC/online -Lines: 1 -0 -Mode: 444 +Directory: fixtures/sys/class/powercap/intel-rapl +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/AC/type +Path: fixtures/sys/class/powercap/intel-rapl/enabled Lines: 1 -Mains -Mode: 444 +1 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/AC/uevent -Lines: 2 -POWER_SUPPLY_NAME=AC -POWER_SUPPLY_ONLINE=0 +Path: fixtures/sys/class/powercap/intel-rapl/uevent +Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/power_supply/BAT0 +Directory: fixtures/sys/class/powercap/intel-rapl:0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/alarm +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_max_power_uw Lines: 1 -2503000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/capacity -Lines: 1 -98 +95000000 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/capacity_level +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_name Lines: 1 -Normal +long_term Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/charge_start_threshold +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_power_limit_uw Lines: 1 -95 +4090000000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/charge_stop_threshold +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_time_window_us Lines: 1 -100 +999424 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/cycle_count +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_max_power_uw Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/energy_full +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_name Lines: 1 -50060000 +short_term Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/energy_full_design +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_power_limit_uw Lines: 1 -47520000 -Mode: 444 +4090000000 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/energy_now +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_time_window_us Lines: 1 -49450000 -Mode: 444 +2440 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/manufacturer +Path: fixtures/sys/class/powercap/intel-rapl:0/enabled Lines: 1 -LGC -Mode: 444 +1 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/model_name +Path: fixtures/sys/class/powercap/intel-rapl:0/energy_uj Lines: 1 -LNV-45N1 +240422366267 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/power_now +Path: fixtures/sys/class/powercap/intel-rapl:0/max_energy_range_uj Lines: 1 -4830000 +262143328850 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/present +Path: fixtures/sys/class/powercap/intel-rapl:0/name Lines: 1 -1 +package-0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/serial_number -Lines: 1 -38109 +Path: fixtures/sys/class/powercap/intel-rapl:0/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/powercap/intel-rapl:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_max_power_uw +Lines: 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/status +Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_name Lines: 1 -Discharging +long_term Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/technology +Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_power_limit_uw Lines: 1 -Li-ion -Mode: 444 +0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/type +Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_time_window_us Lines: 1 -Battery -Mode: 444 +976 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/uevent -Lines: 16 -POWER_SUPPLY_NAME=BAT0 -POWER_SUPPLY_STATUS=Discharging -POWER_SUPPLY_PRESENT=1 -POWER_SUPPLY_TECHNOLOGY=Li-ion -POWER_SUPPLY_CYCLE_COUNT=0 -POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 -POWER_SUPPLY_VOLTAGE_NOW=12229000 -POWER_SUPPLY_POWER_NOW=4830000 -POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000 -POWER_SUPPLY_ENERGY_FULL=50060000 -POWER_SUPPLY_ENERGY_NOW=49450000 -POWER_SUPPLY_CAPACITY=98 -POWER_SUPPLY_CAPACITY_LEVEL=Normal -POWER_SUPPLY_MODEL_NAME=LNV-45N1 -POWER_SUPPLY_MANUFACTURER=LGC -POWER_SUPPLY_SERIAL_NUMBER=38109 +Path: fixtures/sys/class/powercap/intel-rapl:0:0/enabled +Lines: 1 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/voltage_min_design +Path: fixtures/sys/class/powercap/intel-rapl:0:0/energy_uj Lines: 1 -10800000 +118821284256 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/voltage_now +Path: fixtures/sys/class/powercap/intel-rapl:0:0/max_energy_range_uj Lines: 1 -12229000 +262143328850 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/name +Lines: 1 +core Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/sys/class/thermal Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal/cooling_device0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device0/cur_state +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device0/max_state +Lines: 1 +50 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device0/type +Lines: 1 +Processor +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal/cooling_device1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device1/cur_state +Lines: 1 +-1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device1/max_state +Lines: 1 +27 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device1/type +Lines: 1 +intel_powerclamp +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/sys/class/thermal/thermal_zone0 Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -1194,242 +3057,1192 @@ Mode: 664 Directory: fixtures/sys/devices Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00 +Directory: fixtures/sys/devices/LNXSYSTM:00 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0 +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4 +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache -Mode: 755 +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/device +SymlinkTo: ../../../ACPI0003:00 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/online Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/async Lines: 1 -0 +disabled Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/control Lines: 1 -0 +auto Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_kids Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_time Lines: 1 -100 -Mode: 644 +0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_enabled Lines: 1 -289 -Mode: 644 +disabled +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_status Lines: 1 -0 -Mode: 644 +unsupported +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_suspended_time Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_usage Lines: 1 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute -Mode: 755 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup Lines: 1 -0 +enabled Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_abort_count Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active_count Lines: 1 -0 -Mode: 644 +1 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_count Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_expire_count Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_last_time_ms Lines: 1 -0 -Mode: 644 +10598 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_max_time_ms +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_prevent_sleep_time_ms Lines: 1 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_total_time_ms +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/subsystem +SymlinkTo: ../../../../../../../../../class/power_supply +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/type +Lines: 1 +Mains +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/uevent +Lines: 2 +POWER_SUPPLY_NAME=AC +POWER_SUPPLY_ONLINE=0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/alarm Lines: 1 -0 +2369000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity Lines: 1 -0 -Mode: 644 +98 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity_level Lines: 1 -0 -Mode: 644 +Normal +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_start_threshold Lines: 1 -0 +95 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_stop_threshold Lines: 1 -0 +100 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/cycle_count Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/device +SymlinkTo: ../../../PNP0C0A:00 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full Lines: 1 -0 -Mode: 644 +50060000 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full_design Lines: 1 -0 -Mode: 644 +47520000 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total -Mode: 755 +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_now +Lines: 1 +49450000 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/manufacturer Lines: 1 -0 -Mode: 644 +LGC +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/model_name Lines: 1 -0 -Mode: 644 +LNV-45N1 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/async Lines: 1 -0 +disabled Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio -Lines: 1 -100 +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/autosuspend_delay_ms +Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/control Lines: 1 -546 +auto Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_kids Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_time Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_enabled Lines: 1 -0 -Mode: 644 +disabled +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5 +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power_now +Lines: 1 +4830000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/present +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/serial_number +Lines: 1 +38109 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/status +Lines: 1 +Discharging +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/subsystem +SymlinkTo: ../../../../../../../../../class/power_supply +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/technology +Lines: 1 +Li-ion +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/type +Lines: 1 +Battery +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/uevent +Lines: 16 +POWER_SUPPLY_NAME=BAT0 +POWER_SUPPLY_STATUS=Discharging +POWER_SUPPLY_PRESENT=1 +POWER_SUPPLY_TECHNOLOGY=Li-ion +POWER_SUPPLY_CYCLE_COUNT=0 +POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 +POWER_SUPPLY_VOLTAGE_NOW=11750000 +POWER_SUPPLY_POWER_NOW=5064000 +POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000 +POWER_SUPPLY_ENERGY_FULL=47390000 +POWER_SUPPLY_ENERGY_NOW=40730000 +POWER_SUPPLY_CAPACITY=85 +POWER_SUPPLY_CAPACITY_LEVEL=Normal +POWER_SUPPLY_MODEL_NAME=LNV-45N1 +POWER_SUPPLY_MANUFACTURER=LGC +POWER_SUPPLY_SERIAL_NUMBER=38109 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_min_design +Lines: 1 +10800000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_now +Lines: 1 +12229000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits +Lines: 1 +289 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits +Lines: 1 +546 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written +Lines: 1 +512 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats +Lines: 5 +Unused: 99% +Metadata: 0% +Average: 10473 +Sectors per Q: 64 +Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:1f.6 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/ari_enabled +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/broken_parity_status +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/class +Lines: 1 +0x020000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/consistent_dma_mask_bits +Lines: 1 +64 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/d3cold_allowed +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/device +Lines: 1 +0x15d7 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/dma_mask_bits +Lines: 1 +64 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/driver_override +Lines: 1 +(null) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/irq +Lines: 1 +140 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpulist +Lines: 1 +0-7 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpus +Lines: 1 +ff +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/modalias +Lines: 1 +pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/msi_bus +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/numa_node +Lines: 1 +-1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/resource +Lines: 13 +0x00000000ec200000 0x00000000ec21ffff 0x0000000000040200 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/revision +Lines: 1 +0x21 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_device +Lines: 1 +0x225a +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_vendor +Lines: 1 +0x17aa +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/uevent +Lines: 6 +DRIVER=e1000e +PCI_CLASS=20000 +PCI_ID=8086:15D7 +PCI_SUBSYS_ID=17AA:225A +PCI_SLOT_NAME=0000:00:1f.6 +MODALIAS=pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/vendor +Lines: 1 +0x8086 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/rbd +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/rbd/0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/rbd/0/name +Lines: 1 +demo +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/rbd/0/pool +Lines: 1 +iscsi-images +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/rbd/1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/rbd/1/name +Lines: 1 +wrong +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/rbd/1/pool +Lines: 1 +wrong-images +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/clocksource +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/clocksource/clocksource0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/clocksource/clocksource0/available_clocksource +Lines: 1 +tsc hpet acpi_pm +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/clocksource/clocksource0/current_clocksource +Lines: 1 +tsc +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq +SymlinkTo: ../cpufreq/policy0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count +Lines: 1 +10084 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/package_throttle_count +Lines: 1 +34818 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu0/topology +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_id +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings +Lines: 1 +ff +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings_list +Lines: 1 +0-7 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/physical_package_id +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings +Lines: 1 +11 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings_list +Lines: 1 +0,4 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu1 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq +Lines: 1 +1200195 +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq +Lines: 1 +3300000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq +Lines: 1 +1200000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency +Lines: 1 +4294967295 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus +Lines: 1 +1 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors +Lines: 1 +performance powersave +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver +Lines: 1 +intel_pstate +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor +Lines: 1 +powersave +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq +Lines: 1 +3300000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq +Lines: 1 +1200000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed +Lines: 1 + +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/core_throttle_count +Lines: 1 +523 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/package_throttle_count +Lines: 1 +34818 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu1/topology +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_id +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings +Lines: 1 +ff +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings_list +Lines: 1 +0-7 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/physical_package_id +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings +Lines: 1 +22 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings_list +Lines: 1 +1,5 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpufreq +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq +Lines: 1 +2400000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq +Lines: 1 +800000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors +Lines: 1 +performance powersave +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq +Lines: 1 +1219917 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver +Lines: 1 +intel_pstate +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor +Lines: 1 +powersave +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq +Lines: 1 +2400000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq +Lines: 1 +800000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed +Lines: 1 + +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0 +Mode: 777 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits +Lines: 1 +289 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 -Mode: 755 +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio +Lines: 1 +0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 -Mode: 755 +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits +Lines: 1 +0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 -Mode: 755 +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions +Lines: 1 +0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block -Mode: 755 +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses +Lines: 1 +0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc -Mode: 755 +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads +Lines: 1 +0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits +Lines: 1 +546 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0 +Mode: 777 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written Lines: 1 512 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats Lines: 5 Unused: 99% Metadata: 0% @@ -1438,638 +4251,800 @@ Sectors per Q: 64 Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us +Lines: 1 +1305 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits +Lines: 1 +289 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd/0 +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/0/name +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed Lines: 1 -demo +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/0/pool +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits Lines: 1 -iscsi-images +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd/1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/1/name +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses Lines: 1 -wrong +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/1/pool +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio Lines: 1 -wrong-images +100 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/clocksource -Mode: 775 +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits +Lines: 1 +546 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/clocksource/clocksource0 -Mode: 775 +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions +Lines: 1 +0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/clocksource/clocksource0/available_clocksource +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses Lines: 1 -tsc hpet acpi_pm -Mode: 444 +0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/clocksource/clocksource0/current_clocksource +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads Lines: 1 -tsc +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu -Mode: 775 +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth +Lines: 1 +0 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0 -Mode: 775 +Directory: fixtures/sys/fs/btrfs +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq -SymlinkTo: ../cpufreq/policy0 +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1 -Mode: 775 +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq -Mode: 775 +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_may_use Lines: 1 -1200195 -Mode: 400 +0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_pinned Lines: 1 -3300000 -Mode: 664 +0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_readonly Lines: 1 -1200000 -Mode: 664 +0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_reserved Lines: 1 -4294967295 -Mode: 664 +0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_used Lines: 1 -1 -Mode: 664 +808189952 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_total Lines: 1 -performance powersave -Mode: 664 +2147483648 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_used Lines: 1 -intel_pstate -Mode: 664 +808189952 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/flags Lines: 1 -powersave -Mode: 664 +1 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq -Lines: 1 -3300000 -Mode: 664 +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0 +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/total_bytes Lines: 1 -1200000 -Mode: 664 +2147483648 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/used_bytes Lines: 1 - -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq -Mode: 775 +808189952 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0 -Mode: 775 +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes +Lines: 1 +2147483648 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes_pinned Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_reserved Lines: 1 -2400000 +16777216 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_size Lines: 1 -800000 +16777216 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_may_use Lines: 1 -0 +16777216 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_pinned Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_readonly Lines: 1 -performance powersave +131072 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_reserved Lines: 1 -1219917 +0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_used Lines: 1 -intel_pstate +933888 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor -Lines: 1 -powersave -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_total Lines: 1 -2400000 -Mode: 644 +2147483648 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_used Lines: 1 -800000 -Mode: 644 +1867776 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/flags Lines: 1 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs -Mode: 755 +4 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74 -Mode: 755 +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/total_bytes +Lines: 1 +1073741824 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/used_bytes Lines: 1 -0 -Mode: 644 +933888 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0 -Mode: 777 +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes +Lines: 1 +1073741824 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes_pinned Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_may_use Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_pinned Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_readonly Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_reserved Lines: 1 -100 -Mode: 644 +0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_used Lines: 1 -289 -Mode: 644 +16384 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_total Lines: 1 -0 -Mode: 644 +16777216 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_used Lines: 1 -0 -Mode: 644 +32768 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/flags Lines: 1 -0 -Mode: 644 +2 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/total_bytes Lines: 1 -0 -Mode: 644 +8388608 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/used_bytes Lines: 1 -0 -Mode: 644 +16384 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes Lines: 1 -0 -Mode: 644 +8388608 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes_pinned Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/clone_alignment Lines: 1 -0 -Mode: 644 +4096 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25/size Lines: 1 -0 -Mode: 644 +20971520 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26/size Lines: 1 -0 -Mode: 644 +20971520 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/big_metadata +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/extended_iref Lines: 1 -0 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/mixed_backref Lines: 1 -0 -Mode: 644 +1 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/skinny_metadata Lines: 1 -0 -Mode: 644 +1 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/label Lines: 1 -0 +fixture Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/metadata_uuid Lines: 1 -0 -Mode: 644 +0abb23a9-579b-43e6-ad30-227ef47fcb9d +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/nodesize Lines: 1 -0 -Mode: 644 +16384 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/quota_override Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/sectorsize Lines: 1 -0 -Mode: 644 +4096 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed -Lines: 1 -0 -Mode: 644 +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_may_use Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_pinned Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_readonly Lines: 1 -100 -Mode: 644 +0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_reserved Lines: 1 -546 -Mode: 644 +0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_used Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_total Lines: 1 -0 -Mode: 644 +644087808 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_used Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/flags Lines: 1 -0 -Mode: 644 +1 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0 -Mode: 777 +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5 +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/total_bytes Lines: 1 -0 -Mode: 644 +644087808 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/used_bytes Lines: 1 -512 -Mode: 644 +0 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes +Lines: 1 +644087808 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes_pinned Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_reserved Lines: 1 -100 -Mode: 644 +16777216 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_size Lines: 1 -0 -Mode: 644 +16777216 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_may_use Lines: 1 -1 -Mode: 644 +16777216 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_pinned Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_readonly Lines: 1 -1305 -Mode: 644 +262144 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_reserved Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_used Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day -Mode: 755 +114688 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_total Lines: 1 -0 -Mode: 644 +429391872 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_used Lines: 1 -0 -Mode: 644 +114688 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/flags Lines: 1 -0 -Mode: 644 +4 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6 +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/total_bytes Lines: 1 -289 -Mode: 644 +429391872 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/used_bytes Lines: 1 -0 -Mode: 644 +114688 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes Lines: 1 -0 -Mode: 644 +429391872 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes_pinned Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_may_use Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_pinned Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_readonly Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_reserved Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_used Lines: 1 -0 -Mode: 644 +16384 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_total Lines: 1 -0 -Mode: 644 +16777216 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_used Lines: 1 -0 -Mode: 644 +16384 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/flags Lines: 1 -0 -Mode: 644 +2 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/total_bytes Lines: 1 -0 -Mode: 644 +16777216 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/used_bytes Lines: 1 -0 -Mode: 644 +16384 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes Lines: 1 -0 -Mode: 644 +16777216 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes_pinned Lines: 1 0 -Mode: 644 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/clone_alignment Lines: 1 -0 -Mode: 644 +4096 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop22 +SymlinkTo: ../../../../devices/virtual/block/loop22 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop23 +SymlinkTo: ../../../../devices/virtual/block/loop23 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop24 +SymlinkTo: ../../../../devices/virtual/block/loop24 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop25 +SymlinkTo: ../../../../devices/virtual/block/loop25 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/big_metadata Lines: 1 -0 -Mode: 644 +1 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/extended_iref Lines: 1 -0 +1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/mixed_backref Lines: 1 -0 -Mode: 644 +1 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/raid56 Lines: 1 -100 -Mode: 644 +1 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/skinny_metadata Lines: 1 -546 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/label +Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/metadata_uuid Lines: 1 -0 -Mode: 644 +7f07c59f-6136-449c-ab87-e1cf2328731b +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/nodesize Lines: 1 -0 -Mode: 644 +16384 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/quota_override Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/sectorsize Lines: 1 -0 -Mode: 644 +4096 +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/sys/fs/xfs Mode: 755 diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod index b2f8cca93..0e04e5d1f 100644 --- a/vendor/github.com/prometheus/procfs/go.mod +++ b/vendor/github.com/prometheus/procfs/go.mod @@ -1,6 +1,8 @@ module github.com/prometheus/procfs +go 1.12 + require ( - github.com/google/go-cmp v0.3.0 - golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 + github.com/google/go-cmp v0.3.1 + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e ) diff --git a/vendor/github.com/prometheus/procfs/go.sum b/vendor/github.com/prometheus/procfs/go.sum index db54133d7..33b824b01 100644 --- a/vendor/github.com/prometheus/procfs/go.sum +++ b/vendor/github.com/prometheus/procfs/go.sum @@ -1,4 +1,4 @@ -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go index 7ddfd6b6e..565e89e42 100644 --- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -26,7 +26,7 @@ const ( // DefaultSysMountPoint is the common mount point of the sys filesystem. DefaultSysMountPoint = "/sys" - // DefaultConfigfsMountPoint is the commont mount point of the configfs + // DefaultConfigfsMountPoint is the common mount point of the configfs DefaultConfigfsMountPoint = "/sys/kernel/config" ) diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go new file mode 100644 index 000000000..755591d9a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -0,0 +1,88 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "io/ioutil" + "strconv" + "strings" +) + +// ParseUint32s parses a slice of strings into a slice of uint32s. +func ParseUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} + +// ParseUint64s parses a slice of strings into a slice of uint64s. +func ParseUint64s(ss []string) ([]uint64, error) { + us := make([]uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, u) + } + + return us, nil +} + +// ParsePInt64s parses a slice of strings into a slice of int64 pointers. +func ParsePInt64s(ss []string) ([]*int64, error) { + us := make([]*int64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, &u) + } + + return us, nil +} + +// ReadUintFromFile reads a file and attempts to parse a uint64 from it. +func ReadUintFromFile(path string) (uint64, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} + +// ParseBool parses a string into a boolean pointer. +func ParseBool(b string) *bool { + var truth bool + switch b { + case "enabled": + truth = true + case "disabled": + truth = false + default: + return nil + } + return &truth +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go new file mode 100644 index 000000000..8051161b2 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/readfile.go @@ -0,0 +1,38 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "io" + "io/ioutil" + "os" +) + +// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file. +// This is similar to ioutil.ReadFile but without the call to os.Stat, because +// many files in /proc and /sys report incorrect file sizes (either 0 or 4096). +// Reads a max file size of 512kB. For files larger than this, a scanner +// should be used. +func ReadFileNoStat(filename string) ([]byte, error) { + const maxBufferSize = 1024 * 512 + + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + + reader := io.LimitReader(f, maxBufferSize) + return ioutil.ReadAll(reader) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go new file mode 100644 index 000000000..c07de0b6c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -0,0 +1,48 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux,!appengine + +package util + +import ( + "bytes" + "os" + "syscall" +) + +// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. +// https://github.com/prometheus/node_exporter/pull/728/files +// +// Note that this function will not read files larger than 128 bytes. +func SysReadFile(file string) (string, error) { + f, err := os.Open(file) + if err != nil { + return "", err + } + defer f.Close() + + // On some machines, hwmon drivers are broken and return EAGAIN. This causes + // Go's ioutil.ReadFile implementation to poll forever. + // + // Since we either want to read data or bail immediately, do the simplest + // possible read using syscall directly. + const sysFileBufferSize = 128 + b := make([]byte, sysFileBufferSize) + n, err := syscall.Read(int(f.Fd()), b) + if err != nil { + return "", err + } + + return string(bytes.TrimSpace(b[:n])), nil +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go new file mode 100644 index 000000000..bd55b4537 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go @@ -0,0 +1,26 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux,appengine !linux + +package util + +import ( + "fmt" +) + +// SysReadFile is here implemented as a noop for builds that do not support +// the read syscall. For example Windows, or Linux on Google App Engine. +func SysReadFile(file string) (string, error) { + return "", fmt.Errorf("not supported on this platform") +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go new file mode 100644 index 000000000..fe2355d3c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go @@ -0,0 +1,91 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "strconv" +) + +// TODO(mdlayher): util packages are an anti-pattern and this should be moved +// somewhere else that is more focused in the future. + +// A ValueParser enables parsing a single string into a variety of data types +// in a concise and safe way. The Err method must be invoked after invoking +// any other methods to ensure a value was successfully parsed. +type ValueParser struct { + v string + err error +} + +// NewValueParser creates a ValueParser using the input string. +func NewValueParser(v string) *ValueParser { + return &ValueParser{v: v} +} + +// Int interprets the underlying value as an int and returns that value. +func (vp *ValueParser) Int() int { return int(vp.int64()) } + +// PInt64 interprets the underlying value as an int64 and returns a pointer to +// that value. +func (vp *ValueParser) PInt64() *int64 { + if vp.err != nil { + return nil + } + + v := vp.int64() + return &v +} + +// int64 interprets the underlying value as an int64 and returns that value. +// TODO: export if/when necessary. +func (vp *ValueParser) int64() int64 { + if vp.err != nil { + return 0 + } + + // A base value of zero makes ParseInt infer the correct base using the + // string's prefix, if any. + const base = 0 + v, err := strconv.ParseInt(vp.v, base, 64) + if err != nil { + vp.err = err + return 0 + } + + return v +} + +// PUInt64 interprets the underlying value as an uint64 and returns a pointer to +// that value. +func (vp *ValueParser) PUInt64() *uint64 { + if vp.err != nil { + return nil + } + + // A base value of zero makes ParseInt infer the correct base using the + // string's prefix, if any. + const base = 0 + v, err := strconv.ParseUint(vp.v, base, 64) + if err != nil { + vp.err = err + return nil + } + + return &v +} + +// Err returns the last error, if any, encountered by the ValueParser. +func (vp *ValueParser) Err() error { + return vp.err +} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index 2d6cb8d1c..89e447746 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -15,6 +15,7 @@ package procfs import ( "bufio" + "bytes" "encoding/hex" "errors" "fmt" @@ -24,6 +25,8 @@ import ( "os" "strconv" "strings" + + "github.com/prometheus/procfs/internal/util" ) // IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. @@ -64,17 +67,16 @@ type IPVSBackendStatus struct { // IPVSStats reads the IPVS statistics from the specified `proc` filesystem. func (fs FS) IPVSStats() (IPVSStats, error) { - file, err := os.Open(fs.proc.Path("net/ip_vs_stats")) + data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats")) if err != nil { return IPVSStats{}, err } - defer file.Close() - return parseIPVSStats(file) + return parseIPVSStats(bytes.NewReader(data)) } // parseIPVSStats performs the actual parsing of `ip_vs_stats`. -func parseIPVSStats(file io.Reader) (IPVSStats, error) { +func parseIPVSStats(r io.Reader) (IPVSStats, error) { var ( statContent []byte statLines []string @@ -82,7 +84,7 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) { stats IPVSStats ) - statContent, err := ioutil.ReadAll(file) + statContent, err := ioutil.ReadAll(r) if err != nil { return IPVSStats{}, err } diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go new file mode 100644 index 000000000..50dab4bcd --- /dev/null +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -0,0 +1,277 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Meminfo represents memory statistics. +type Meminfo struct { + // Total usable ram (i.e. physical ram minus a few reserved + // bits and the kernel binary code) + MemTotal uint64 + // The sum of LowFree+HighFree + MemFree uint64 + // An estimate of how much memory is available for starting + // new applications, without swapping. Calculated from + // MemFree, SReclaimable, the size of the file LRU lists, and + // the low watermarks in each zone. The estimate takes into + // account that the system needs some page cache to function + // well, and that not all reclaimable slab will be + // reclaimable, due to items being in use. The impact of those + // factors will vary from system to system. + MemAvailable uint64 + // Relatively temporary storage for raw disk blocks shouldn't + // get tremendously large (20MB or so) + Buffers uint64 + Cached uint64 + // Memory that once was swapped out, is swapped back in but + // still also is in the swapfile (if memory is needed it + // doesn't need to be swapped out AGAIN because it is already + // in the swapfile. This saves I/O) + SwapCached uint64 + // Memory that has been used more recently and usually not + // reclaimed unless absolutely necessary. + Active uint64 + // Memory which has been less recently used. It is more + // eligible to be reclaimed for other purposes + Inactive uint64 + ActiveAnon uint64 + InactiveAnon uint64 + ActiveFile uint64 + InactiveFile uint64 + Unevictable uint64 + Mlocked uint64 + // total amount of swap space available + SwapTotal uint64 + // Memory which has been evicted from RAM, and is temporarily + // on the disk + SwapFree uint64 + // Memory which is waiting to get written back to the disk + Dirty uint64 + // Memory which is actively being written back to the disk + Writeback uint64 + // Non-file backed pages mapped into userspace page tables + AnonPages uint64 + // files which have been mapped, such as libraries + Mapped uint64 + Shmem uint64 + // in-kernel data structures cache + Slab uint64 + // Part of Slab, that might be reclaimed, such as caches + SReclaimable uint64 + // Part of Slab, that cannot be reclaimed on memory pressure + SUnreclaim uint64 + KernelStack uint64 + // amount of memory dedicated to the lowest level of page + // tables. + PageTables uint64 + // NFS pages sent to the server, but not yet committed to + // stable storage + NFSUnstable uint64 + // Memory used for block device "bounce buffers" + Bounce uint64 + // Memory used by FUSE for temporary writeback buffers + WritebackTmp uint64 + // Based on the overcommit ratio ('vm.overcommit_ratio'), + // this is the total amount of memory currently available to + // be allocated on the system. This limit is only adhered to + // if strict overcommit accounting is enabled (mode 2 in + // 'vm.overcommit_memory'). + // The CommitLimit is calculated with the following formula: + // CommitLimit = ([total RAM pages] - [total huge TLB pages]) * + // overcommit_ratio / 100 + [total swap pages] + // For example, on a system with 1G of physical RAM and 7G + // of swap with a `vm.overcommit_ratio` of 30 it would + // yield a CommitLimit of 7.3G. + // For more details, see the memory overcommit documentation + // in vm/overcommit-accounting. + CommitLimit uint64 + // The amount of memory presently allocated on the system. + // The committed memory is a sum of all of the memory which + // has been allocated by processes, even if it has not been + // "used" by them as of yet. A process which malloc()'s 1G + // of memory, but only touches 300M of it will show up as + // using 1G. This 1G is memory which has been "committed" to + // by the VM and can be used at any time by the allocating + // application. With strict overcommit enabled on the system + // (mode 2 in 'vm.overcommit_memory'),allocations which would + // exceed the CommitLimit (detailed above) will not be permitted. + // This is useful if one needs to guarantee that processes will + // not fail due to lack of memory once that memory has been + // successfully allocated. + CommittedAS uint64 + // total size of vmalloc memory area + VmallocTotal uint64 + // amount of vmalloc area which is used + VmallocUsed uint64 + // largest contiguous block of vmalloc area which is free + VmallocChunk uint64 + HardwareCorrupted uint64 + AnonHugePages uint64 + ShmemHugePages uint64 + ShmemPmdMapped uint64 + CmaTotal uint64 + CmaFree uint64 + HugePagesTotal uint64 + HugePagesFree uint64 + HugePagesRsvd uint64 + HugePagesSurp uint64 + Hugepagesize uint64 + DirectMap4k uint64 + DirectMap2M uint64 + DirectMap1G uint64 +} + +// Meminfo returns an information about current kernel/system memory statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) Meminfo() (Meminfo, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("meminfo")) + if err != nil { + return Meminfo{}, err + } + + m, err := parseMemInfo(bytes.NewReader(b)) + if err != nil { + return Meminfo{}, fmt.Errorf("failed to parse meminfo: %v", err) + } + + return *m, nil +} + +func parseMemInfo(r io.Reader) (*Meminfo, error) { + var m Meminfo + s := bufio.NewScanner(r) + for s.Scan() { + // Each line has at least a name and value; we ignore the unit. + fields := strings.Fields(s.Text()) + if len(fields) < 2 { + return nil, fmt.Errorf("malformed meminfo line: %q", s.Text()) + } + + v, err := strconv.ParseUint(fields[1], 0, 64) + if err != nil { + return nil, err + } + + switch fields[0] { + case "MemTotal:": + m.MemTotal = v + case "MemFree:": + m.MemFree = v + case "MemAvailable:": + m.MemAvailable = v + case "Buffers:": + m.Buffers = v + case "Cached:": + m.Cached = v + case "SwapCached:": + m.SwapCached = v + case "Active:": + m.Active = v + case "Inactive:": + m.Inactive = v + case "Active(anon):": + m.ActiveAnon = v + case "Inactive(anon):": + m.InactiveAnon = v + case "Active(file):": + m.ActiveFile = v + case "Inactive(file):": + m.InactiveFile = v + case "Unevictable:": + m.Unevictable = v + case "Mlocked:": + m.Mlocked = v + case "SwapTotal:": + m.SwapTotal = v + case "SwapFree:": + m.SwapFree = v + case "Dirty:": + m.Dirty = v + case "Writeback:": + m.Writeback = v + case "AnonPages:": + m.AnonPages = v + case "Mapped:": + m.Mapped = v + case "Shmem:": + m.Shmem = v + case "Slab:": + m.Slab = v + case "SReclaimable:": + m.SReclaimable = v + case "SUnreclaim:": + m.SUnreclaim = v + case "KernelStack:": + m.KernelStack = v + case "PageTables:": + m.PageTables = v + case "NFS_Unstable:": + m.NFSUnstable = v + case "Bounce:": + m.Bounce = v + case "WritebackTmp:": + m.WritebackTmp = v + case "CommitLimit:": + m.CommitLimit = v + case "Committed_AS:": + m.CommittedAS = v + case "VmallocTotal:": + m.VmallocTotal = v + case "VmallocUsed:": + m.VmallocUsed = v + case "VmallocChunk:": + m.VmallocChunk = v + case "HardwareCorrupted:": + m.HardwareCorrupted = v + case "AnonHugePages:": + m.AnonHugePages = v + case "ShmemHugePages:": + m.ShmemHugePages = v + case "ShmemPmdMapped:": + m.ShmemPmdMapped = v + case "CmaTotal:": + m.CmaTotal = v + case "CmaFree:": + m.CmaFree = v + case "HugePages_Total:": + m.HugePagesTotal = v + case "HugePages_Free:": + m.HugePagesFree = v + case "HugePages_Rsvd:": + m.HugePagesRsvd = v + case "HugePages_Surp:": + m.HugePagesSurp = v + case "Hugepagesize:": + m.Hugepagesize = v + case "DirectMap4k:": + m.DirectMap4k = v + case "DirectMap2M:": + m.DirectMap2M = v + case "DirectMap1G:": + m.DirectMap1G = v + } + } + + return &m, nil +} diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go index 61fa61887..bb01bb5a2 100644 --- a/vendor/github.com/prometheus/procfs/mountinfo.go +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -15,19 +15,13 @@ package procfs import ( "bufio" + "bytes" "fmt" - "io" - "os" "strconv" "strings" -) -var validOptionalFields = map[string]bool{ - "shared": true, - "master": true, - "propagate_from": true, - "unbindable": true, -} + "github.com/prometheus/procfs/internal/util" +) // A MountInfo is a type that describes the details, options // for each mount, parsed from /proc/self/mountinfo. @@ -58,18 +52,10 @@ type MountInfo struct { SuperOptions map[string]string } -// Returns part of the mountinfo line, if it exists, else an empty string. -func getStringSliceElement(parts []string, idx int, defaultValue string) string { - if idx >= len(parts) { - return defaultValue - } - return parts[idx] -} - // Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs. -func parseMountInfo(r io.Reader) ([]*MountInfo, error) { +func parseMountInfo(info []byte) ([]*MountInfo, error) { mounts := []*MountInfo{} - scanner := bufio.NewScanner(r) + scanner := bufio.NewScanner(bytes.NewReader(info)) for scanner.Scan() { mountString := scanner.Text() parsedMounts, err := parseMountInfoString(mountString) @@ -89,57 +75,75 @@ func parseMountInfo(r io.Reader) ([]*MountInfo, error) { func parseMountInfoString(mountString string) (*MountInfo, error) { var err error - // OptionalFields can be zero, hence these checks to ensure we do not populate the wrong values in the wrong spots - separatorIndex := strings.Index(mountString, "-") - if separatorIndex == -1 { - return nil, fmt.Errorf("no separator found in mountinfo string: %s", mountString) + mountInfo := strings.Split(mountString, " ") + mountInfoLength := len(mountInfo) + if mountInfoLength < 11 { + return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) } - beforeFields := strings.Fields(mountString[:separatorIndex]) - afterFields := strings.Fields(mountString[separatorIndex+1:]) - if (len(beforeFields) + len(afterFields)) < 7 { - return nil, fmt.Errorf("too few fields") + + if mountInfo[mountInfoLength-4] != "-" { + return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4]) } mount := &MountInfo{ - MajorMinorVer: getStringSliceElement(beforeFields, 2, ""), - Root: getStringSliceElement(beforeFields, 3, ""), - MountPoint: getStringSliceElement(beforeFields, 4, ""), - Options: mountOptionsParser(getStringSliceElement(beforeFields, 5, "")), + MajorMinorVer: mountInfo[2], + Root: mountInfo[3], + MountPoint: mountInfo[4], + Options: mountOptionsParser(mountInfo[5]), OptionalFields: nil, - FSType: getStringSliceElement(afterFields, 0, ""), - Source: getStringSliceElement(afterFields, 1, ""), - SuperOptions: mountOptionsParser(getStringSliceElement(afterFields, 2, "")), + FSType: mountInfo[mountInfoLength-3], + Source: mountInfo[mountInfoLength-2], + SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]), } - mount.MountId, err = strconv.Atoi(getStringSliceElement(beforeFields, 0, "")) + mount.MountId, err = strconv.Atoi(mountInfo[0]) if err != nil { return nil, fmt.Errorf("failed to parse mount ID") } - mount.ParentId, err = strconv.Atoi(getStringSliceElement(beforeFields, 1, "")) + mount.ParentId, err = strconv.Atoi(mountInfo[1]) if err != nil { return nil, fmt.Errorf("failed to parse parent ID") } // Has optional fields, which is a space separated list of values. // Example: shared:2 master:7 - if len(beforeFields) > 6 { - mount.OptionalFields = make(map[string]string) - optionalFields := beforeFields[6:] - for _, field := range optionalFields { - optionSplit := strings.Split(field, ":") - target, value := optionSplit[0], "" - if len(optionSplit) == 2 { - value = optionSplit[1] - } - // Checks if the 'keys' in the optional fields in the mountinfo line are acceptable. - // Allowed 'keys' are shared, master, propagate_from, unbindable. - if _, ok := validOptionalFields[target]; ok { - mount.OptionalFields[target] = value - } + if mountInfo[6] != "" { + mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) + if err != nil { + return nil, err } } return mount, nil } +// mountOptionsIsValidField checks a string against a valid list of optional fields keys. +func mountOptionsIsValidField(s string) bool { + switch s { + case + "shared", + "master", + "propagate_from", + "unbindable": + return true + } + return false +} + +// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings. +func mountOptionsParseOptionalFields(o []string) (map[string]string, error) { + optionalFields := make(map[string]string) + for _, field := range o { + optionSplit := strings.SplitN(field, ":", 2) + value := "" + if len(optionSplit) == 2 { + value = optionSplit[1] + } + if mountOptionsIsValidField(optionSplit[0]) { + optionalFields[optionSplit[0]] = value + } + } + return optionalFields, nil +} + // Parses the mount options, superblock options. func mountOptionsParser(mountOptions string) map[string]string { opts := make(map[string]string) @@ -159,20 +163,18 @@ func mountOptionsParser(mountOptions string) map[string]string { // Retrieves mountinfo information from `/proc/self/mountinfo`. func GetMounts() ([]*MountInfo, error) { - f, err := os.Open("/proc/self/mountinfo") + data, err := util.ReadFileNoStat("/proc/self/mountinfo") if err != nil { return nil, err } - defer f.Close() - return parseMountInfo(f) + return parseMountInfo(data) } // Retrieves mountinfo information from a processes' `/proc//mountinfo`. func GetProcMounts(pid int) ([]*MountInfo, error) { - f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid)) if err != nil { return nil, err } - defer f.Close() - return parseMountInfo(f) + return parseMountInfo(data) } diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go index a0b7a0119..47a710bef 100644 --- a/vendor/github.com/prometheus/procfs/net_dev.go +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -183,7 +183,6 @@ func (netDev NetDev) Total() NetDevLine { names = append(names, ifc.Name) total.RxBytes += ifc.RxBytes total.RxPackets += ifc.RxPackets - total.RxPackets += ifc.RxPackets total.RxErrors += ifc.RxErrors total.RxDropped += ifc.RxDropped total.RxFIFO += ifc.RxFIFO diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go new file mode 100644 index 000000000..f91ef5523 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -0,0 +1,163 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6, +// respectively. +type NetSockstat struct { + // Used is non-nil for IPv4 sockstat results, but nil for IPv6. + Used *int + Protocols []NetSockstatProtocol +} + +// A NetSockstatProtocol contains statistics about a given socket protocol. +// Pointer fields indicate that the value may or may not be present on any +// given protocol. +type NetSockstatProtocol struct { + Protocol string + InUse int + Orphan *int + TW *int + Alloc *int + Mem *int + Memory *int +} + +// NetSockstat retrieves IPv4 socket statistics. +func (fs FS) NetSockstat() (*NetSockstat, error) { + return readSockstat(fs.proc.Path("net", "sockstat")) +} + +// NetSockstat6 retrieves IPv6 socket statistics. +// +// If IPv6 is disabled on this kernel, the returned error can be checked with +// os.IsNotExist. +func (fs FS) NetSockstat6() (*NetSockstat, error) { + return readSockstat(fs.proc.Path("net", "sockstat6")) +} + +// readSockstat opens and parses a NetSockstat from the input file. +func readSockstat(name string) (*NetSockstat, error) { + // This file is small and can be read with one syscall. + b, err := util.ReadFileNoStat(name) + if err != nil { + // Do not wrap this error so the caller can detect os.IsNotExist and + // similar conditions. + return nil, err + } + + stat, err := parseSockstat(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to read sockstats from %q: %v", name, err) + } + + return stat, nil +} + +// parseSockstat reads the contents of a sockstat file and parses a NetSockstat. +func parseSockstat(r io.Reader) (*NetSockstat, error) { + var stat NetSockstat + s := bufio.NewScanner(r) + for s.Scan() { + // Expect a minimum of a protocol and one key/value pair. + fields := strings.Split(s.Text(), " ") + if len(fields) < 3 { + return nil, fmt.Errorf("malformed sockstat line: %q", s.Text()) + } + + // The remaining fields are key/value pairs. + kvs, err := parseSockstatKVs(fields[1:]) + if err != nil { + return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %v", s.Text(), err) + } + + // The first field is the protocol. We must trim its colon suffix. + proto := strings.TrimSuffix(fields[0], ":") + switch proto { + case "sockets": + // Special case: IPv4 has a sockets "used" key/value pair that we + // embed at the top level of the structure. + used := kvs["used"] + stat.Used = &used + default: + // Parse all other lines as individual protocols. + nsp := parseSockstatProtocol(kvs) + nsp.Protocol = proto + stat.Protocols = append(stat.Protocols, nsp) + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + return &stat, nil +} + +// parseSockstatKVs parses a string slice into a map of key/value pairs. +func parseSockstatKVs(kvs []string) (map[string]int, error) { + if len(kvs)%2 != 0 { + return nil, errors.New("odd number of fields in key/value pairs") + } + + // Iterate two values at a time to gather key/value pairs. + out := make(map[string]int, len(kvs)/2) + for i := 0; i < len(kvs); i += 2 { + vp := util.NewValueParser(kvs[i+1]) + out[kvs[i]] = vp.Int() + + if err := vp.Err(); err != nil { + return nil, err + } + } + + return out, nil +} + +// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map. +func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol { + var nsp NetSockstatProtocol + for k, v := range kvs { + // Capture the range variable to ensure we get unique pointers for + // each of the optional fields. + v := v + switch k { + case "inuse": + nsp.InUse = v + case "orphan": + nsp.Orphan = &v + case "tw": + nsp.TW = &v + case "alloc": + nsp.Alloc = &v + case "mem": + nsp.Mem = &v + case "memory": + nsp.Memory = &v + } + } + + return nsp +} diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go new file mode 100644 index 000000000..6fcad20af --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -0,0 +1,91 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "strconv" + "strings" +) + +// For the proc file format details, +// see https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 +// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. + +// SoftnetEntry contains a single row of data from /proc/net/softnet_stat +type SoftnetEntry struct { + // Number of processed packets + Processed uint + // Number of dropped packets + Dropped uint + // Number of times processing packets ran out of quota + TimeSqueezed uint +} + +// GatherSoftnetStats reads /proc/net/softnet_stat, parse the relevant columns, +// and then return a slice of SoftnetEntry's. +func (fs FS) GatherSoftnetStats() ([]SoftnetEntry, error) { + data, err := ioutil.ReadFile(fs.proc.Path("net/softnet_stat")) + if err != nil { + return nil, fmt.Errorf("error reading softnet %s: %s", fs.proc.Path("net/softnet_stat"), err) + } + + return parseSoftnetEntries(data) +} + +func parseSoftnetEntries(data []byte) ([]SoftnetEntry, error) { + lines := strings.Split(string(data), "\n") + entries := make([]SoftnetEntry, 0) + var err error + const ( + expectedColumns = 11 + ) + for _, line := range lines { + columns := strings.Fields(line) + width := len(columns) + if width == 0 { + continue + } + if width != expectedColumns { + return []SoftnetEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns) + } + var entry SoftnetEntry + if entry, err = parseSoftnetEntry(columns); err != nil { + return []SoftnetEntry{}, err + } + entries = append(entries, entry) + } + + return entries, nil +} + +func parseSoftnetEntry(columns []string) (SoftnetEntry, error) { + var err error + var processed, dropped, timeSqueezed uint64 + if processed, err = strconv.ParseUint(columns[0], 16, 32); err != nil { + return SoftnetEntry{}, fmt.Errorf("Unable to parse column 0: %s", err) + } + if dropped, err = strconv.ParseUint(columns[1], 16, 32); err != nil { + return SoftnetEntry{}, fmt.Errorf("Unable to parse column 1: %s", err) + } + if timeSqueezed, err = strconv.ParseUint(columns[2], 16, 32); err != nil { + return SoftnetEntry{}, fmt.Errorf("Unable to parse column 2: %s", err) + } + return SoftnetEntry{ + Processed: uint(processed), + Dropped: uint(dropped), + TimeSqueezed: uint(timeSqueezed), + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go index 240340a83..93bd58f80 100644 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -207,10 +207,6 @@ func (u NetUnix) parseUsers(hexStr string) (uint64, error) { return strconv.ParseUint(hexStr, 16, 32) } -func (u NetUnix) parseProtocol(hexStr string) (uint64, error) { - return strconv.ParseUint(hexStr, 16, 32) -} - func (u NetUnix) parseType(hexStr string) (NetUnixType, error) { typ, err := strconv.ParseUint(hexStr, 16, 16) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 41c148d06..330e472c7 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -22,6 +22,7 @@ import ( "strings" "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" ) // Proc provides information about a running process. @@ -121,13 +122,7 @@ func (fs FS) AllProcs() (Procs, error) { // CmdLine returns the command line of a process. func (p Proc) CmdLine() ([]string, error) { - f, err := os.Open(p.path("cmdline")) - if err != nil { - return nil, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) + data, err := util.ReadFileNoStat(p.path("cmdline")) if err != nil { return nil, err } @@ -141,13 +136,7 @@ func (p Proc) CmdLine() ([]string, error) { // Comm returns the command name of a process. func (p Proc) Comm() (string, error) { - f, err := os.Open(p.path("comm")) - if err != nil { - return "", err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) + data, err := util.ReadFileNoStat(p.path("comm")) if err != nil { return "", err } @@ -252,13 +241,11 @@ func (p Proc) MountStats() ([]*Mount, error) { // It supplies information missing in `/proc/self/mounts` and // fixes various other problems with that file too. func (p Proc) MountInfo() ([]*MountInfo, error) { - f, err := os.Open(p.path("mountinfo")) + data, err := util.ReadFileNoStat(p.path("mountinfo")) if err != nil { return nil, err } - defer f.Close() - - return parseMountInfo(f) + return parseMountInfo(data) } func (p Proc) fileDescriptors() ([]string, error) { @@ -279,3 +266,33 @@ func (p Proc) fileDescriptors() ([]string, error) { func (p Proc) path(pa ...string) string { return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) } + +// FileDescriptorsInfo retrieves information about all file descriptors of +// the process. +func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + var fdinfos ProcFDInfos + + for _, n := range names { + fdinfo, err := p.FDInfo(n) + if err != nil { + continue + } + fdinfos = append(fdinfos, *fdinfo) + } + + return fdinfos, nil +} + +// Schedstat returns task scheduling information for the process. +func (p Proc) Schedstat() (ProcSchedstat, error) { + contents, err := ioutil.ReadFile(p.path("schedstat")) + if err != nil { + return ProcSchedstat{}, err + } + return parseProcSchedstat(string(contents)) +} diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go index 7172bb586..6134b3580 100644 --- a/vendor/github.com/prometheus/procfs/proc_environ.go +++ b/vendor/github.com/prometheus/procfs/proc_environ.go @@ -14,22 +14,16 @@ package procfs import ( - "io/ioutil" - "os" "strings" + + "github.com/prometheus/procfs/internal/util" ) // Environ reads process environments from /proc//environ func (p Proc) Environ() ([]string, error) { environments := make([]string, 0) - f, err := os.Open(p.path("environ")) - if err != nil { - return environments, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) + data, err := util.ReadFileNoStat(p.path("environ")) if err != nil { return environments, err } diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go new file mode 100644 index 000000000..4e7597f86 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -0,0 +1,125 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "regexp" + + "github.com/prometheus/procfs/internal/util" +) + +// Regexp variables +var ( + rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) + rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) + rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) + rInotify = regexp.MustCompile(`^inotify`) +) + +// ProcFDInfo contains represents file descriptor information. +type ProcFDInfo struct { + // File descriptor + FD string + // File offset + Pos string + // File access mode and status flags + Flags string + // Mount point ID + MntID string + // List of inotify lines (structed) in the fdinfo file (kernel 3.8+ only) + InotifyInfos []InotifyInfo +} + +// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty. +func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { + data, err := util.ReadFileNoStat(p.path("fdinfo", fd)) + if err != nil { + return nil, err + } + + var text, pos, flags, mntid string + var inotify []InotifyInfo + + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + text = scanner.Text() + if rPos.MatchString(text) { + pos = rPos.FindStringSubmatch(text)[1] + } else if rFlags.MatchString(text) { + flags = rFlags.FindStringSubmatch(text)[1] + } else if rMntID.MatchString(text) { + mntid = rMntID.FindStringSubmatch(text)[1] + } else if rInotify.MatchString(text) { + newInotify, err := parseInotifyInfo(text) + if err != nil { + return nil, err + } + inotify = append(inotify, *newInotify) + } + } + + i := &ProcFDInfo{ + FD: fd, + Pos: pos, + Flags: flags, + MntID: mntid, + InotifyInfos: inotify, + } + + return i, nil +} + +// InotifyInfo represents a single inotify line in the fdinfo file. +type InotifyInfo struct { + // Watch descriptor number + WD string + // Inode number + Ino string + // Device ID + Sdev string + // Mask of events being monitored + Mask string +} + +// InotifyInfo constructor. Only available on kernel 3.8+. +func parseInotifyInfo(line string) (*InotifyInfo, error) { + r := regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)\s+mask:([0-9a-f]+)`) + m := r.FindStringSubmatch(line) + i := &InotifyInfo{ + WD: m[1], + Ino: m[2], + Sdev: m[3], + Mask: m[4], + } + return i, nil +} + +// ProcFDInfos represents a list of ProcFDInfo structs. +type ProcFDInfos []ProcFDInfo + +func (p ProcFDInfos) Len() int { return len(p) } +func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD } + +// InotifyWatchLen returns the total number of inotify watches +func (p ProcFDInfos) InotifyWatchLen() (int, error) { + length := 0 + for _, f := range p { + length += len(f.InotifyInfos) + } + + return length, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go index 0ff89b1ce..776f34971 100644 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -15,8 +15,8 @@ package procfs import ( "fmt" - "io/ioutil" - "os" + + "github.com/prometheus/procfs/internal/util" ) // ProcIO models the content of /proc//io. @@ -43,13 +43,7 @@ type ProcIO struct { func (p Proc) IO() (ProcIO, error) { pio := ProcIO{} - f, err := os.Open(p.path("io")) - if err != nil { - return pio, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) + data, err := util.ReadFileNoStat(p.path("io")) if err != nil { return pio, err } diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go index 46fe26626..0d7bee54c 100644 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -24,11 +24,13 @@ package procfs // > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134 import ( + "bufio" + "bytes" "fmt" "io" - "io/ioutil" - "os" "strings" + + "github.com/prometheus/procfs/internal/util" ) const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" @@ -55,24 +57,21 @@ type PSIStats struct { // resource from /proc/pressure/. At time of writing this can be // either "cpu", "memory" or "io". func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { - file, err := os.Open(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) + data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) if err != nil { return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource) } - defer file.Close() - return parsePSIStats(resource, file) + return parsePSIStats(resource, bytes.NewReader(data)) } // parsePSIStats parses the specified file for pressure stall information -func parsePSIStats(resource string, file io.Reader) (PSIStats, error) { +func parsePSIStats(resource string, r io.Reader) (PSIStats, error) { psiStats := PSIStats{} - stats, err := ioutil.ReadAll(file) - if err != nil { - return psiStats, fmt.Errorf("psi_stats: unable to read data for %s", resource) - } - for _, l := range strings.Split(string(stats), "\n") { + scanner := bufio.NewScanner(r) + for scanner.Scan() { + l := scanner.Text() prefix := strings.Split(l, " ")[0] switch prefix { case "some": diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index dbde1fa0d..4517d2e9d 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -16,10 +16,10 @@ package procfs import ( "bytes" "fmt" - "io/ioutil" "os" "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" ) // Originally, this USER_HZ value was dynamically retrieved via a sysconf call @@ -113,13 +113,7 @@ func (p Proc) NewStat() (ProcStat, error) { // Stat returns the current status information of the process. func (p Proc) Stat() (ProcStat, error) { - f, err := os.Open(p.path("stat")) - if err != nil { - return ProcStat{}, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) + data, err := util.ReadFileNoStat(p.path("stat")) if err != nil { return ProcStat{}, err } diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index 6b4b61f71..e30c2b88f 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -15,13 +15,13 @@ package procfs import ( "bytes" - "io/ioutil" - "os" "strconv" "strings" + + "github.com/prometheus/procfs/internal/util" ) -// ProcStat provides status information about the process, +// ProcStatus provides status information about the process, // read from /proc/[pid]/stat. type ProcStatus struct { // The process ID. @@ -29,6 +29,9 @@ type ProcStatus struct { // The process name. Name string + // Thread group ID. + TGID int + // Peak virtual memory size. VmPeak uint64 // Virtual memory size. @@ -72,13 +75,7 @@ type ProcStatus struct { // NewStatus returns the current status information of the process. func (p Proc) NewStatus() (ProcStatus, error) { - f, err := os.Open(p.path("status")) - if err != nil { - return ProcStatus{}, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) + data, err := util.ReadFileNoStat(p.path("status")) if err != nil { return ProcStatus{}, err } @@ -113,6 +110,8 @@ func (p Proc) NewStatus() (ProcStatus, error) { func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { switch k { + case "Tgid": + s.TGID = int(vUint) case "Name": s.Name = vString case "VmPeak": diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go new file mode 100644 index 000000000..a4c4089ac --- /dev/null +++ b/vendor/github.com/prometheus/procfs/schedstat.go @@ -0,0 +1,118 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "os" + "regexp" + "strconv" +) + +var ( + cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`) + procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`) +) + +// Schedstat contains scheduler statistics from /proc/schedstat +// +// See +// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt +// for a detailed description of what these numbers mean. +// +// Note the current kernel documentation claims some of the time units are in +// jiffies when they are actually in nanoseconds since 2.6.23 with the +// introduction of CFS. A fix to the documentation is pending. See +// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473 +type Schedstat struct { + CPUs []*SchedstatCPU +} + +// SchedstatCPU contains the values from one "cpu" line +type SchedstatCPU struct { + CPUNum string + + RunningNanoseconds uint64 + WaitingNanoseconds uint64 + RunTimeslices uint64 +} + +// ProcSchedstat contains the values from /proc//schedstat +type ProcSchedstat struct { + RunningNanoseconds uint64 + WaitingNanoseconds uint64 + RunTimeslices uint64 +} + +// Schedstat reads data from /proc/schedstat +func (fs FS) Schedstat() (*Schedstat, error) { + file, err := os.Open(fs.proc.Path("schedstat")) + if err != nil { + return nil, err + } + defer file.Close() + + stats := &Schedstat{} + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + match := cpuLineRE.FindStringSubmatch(scanner.Text()) + if match != nil { + cpu := &SchedstatCPU{} + cpu.CPUNum = match[1] + + cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64) + if err != nil { + continue + } + + cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64) + if err != nil { + continue + } + + cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64) + if err != nil { + continue + } + + stats.CPUs = append(stats.CPUs, cpu) + } + } + + return stats, nil +} + +func parseProcSchedstat(contents string) (stats ProcSchedstat, err error) { + match := procLineRE.FindStringSubmatch(contents) + + if match != nil { + stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64) + if err != nil { + return + } + + stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64) + if err != nil { + return + } + + stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64) + return + } + + err = errors.New("could not parse schedstat") + return +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 6661ee03a..b2a6fc994 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -15,13 +15,14 @@ package procfs import ( "bufio" + "bytes" "fmt" "io" - "os" "strconv" "strings" "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" ) // CPUStat shows how much time the cpu spend in various stages. @@ -164,16 +165,15 @@ func (fs FS) NewStat() (Stat, error) { // Stat returns information about current cpu/process statistics. // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt func (fs FS) Stat() (Stat, error) { - - f, err := os.Open(fs.proc.Path("stat")) + fileName := fs.proc.Path("stat") + data, err := util.ReadFileNoStat(fileName) if err != nil { return Stat{}, err } - defer f.Close() stat := Stat{} - scanner := bufio.NewScanner(f) + scanner := bufio.NewScanner(bytes.NewReader(data)) for scanner.Scan() { line := scanner.Text() parts := strings.Fields(scanner.Text()) @@ -237,7 +237,7 @@ func (fs FS) Stat() (Stat, error) { } if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) + return Stat{}, fmt.Errorf("couldn't parse %s: %s", fileName, err) } return stat, nil diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go new file mode 100644 index 000000000..cb1389141 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -0,0 +1,210 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// The VM interface is described at +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// Each setting is exposed as a single file. +// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array +// and numa_zonelist_order (deprecated) which is a string +type VM struct { + AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes + BlockDump *int64 // /proc/sys/vm/block_dump + CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed + DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes + DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio + DirtyBytes *int64 // /proc/sys/vm/dirty_bytes + DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs + DirtyRatio *int64 // /proc/sys/vm/dirty_ratio + DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds + DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs + DropCaches *int64 // /proc/sys/vm/drop_caches + ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold + HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group + LaptopMode *int64 // /proc/sys/vm/laptop_mode + LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout + LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio + MaxMapCount *int64 // /proc/sys/vm/max_map_count + MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill + MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery + MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes + MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio + MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio + MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr + NrHugepages *int64 // /proc/sys/vm/nr_hugepages + NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy + NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages + NumaStat *int64 // /proc/sys/vm/numa_stat + NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order + OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks + OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task + OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes + OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory + OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio + PageCluster *int64 // /proc/sys/vm/page-cluster + PanicOnOom *int64 // /proc/sys/vm/panic_on_oom + PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction + StatInterval *int64 // /proc/sys/vm/stat_interval + Swappiness *int64 // /proc/sys/vm/swappiness + UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes + VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure + WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor + WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor + ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode +} + +// VM reads the VM statistics from the specified `proc` filesystem. +func (fs FS) VM() (*VM, error) { + path := fs.proc.Path("sys/vm") + file, err := os.Stat(path) + if err != nil { + return nil, err + } + if !file.Mode().IsDir() { + return nil, fmt.Errorf("%s is not a directory", path) + } + + files, err := ioutil.ReadDir(path) + if err != nil { + return nil, err + } + + var vm VM + for _, f := range files { + if f.IsDir() { + continue + } + + name := filepath.Join(path, f.Name()) + // ignore errors on read, as there are some write only + // in /proc/sys/vm + value, err := util.SysReadFile(name) + if err != nil { + continue + } + vp := util.NewValueParser(value) + + switch f.Name() { + case "admin_reserve_kbytes": + vm.AdminReserveKbytes = vp.PInt64() + case "block_dump": + vm.BlockDump = vp.PInt64() + case "compact_unevictable_allowed": + vm.CompactUnevictableAllowed = vp.PInt64() + case "dirty_background_bytes": + vm.DirtyBackgroundBytes = vp.PInt64() + case "dirty_background_ratio": + vm.DirtyBackgroundRatio = vp.PInt64() + case "dirty_bytes": + vm.DirtyBytes = vp.PInt64() + case "dirty_expire_centisecs": + vm.DirtyExpireCentisecs = vp.PInt64() + case "dirty_ratio": + vm.DirtyRatio = vp.PInt64() + case "dirtytime_expire_seconds": + vm.DirtytimeExpireSeconds = vp.PInt64() + case "dirty_writeback_centisecs": + vm.DirtyWritebackCentisecs = vp.PInt64() + case "drop_caches": + vm.DropCaches = vp.PInt64() + case "extfrag_threshold": + vm.ExtfragThreshold = vp.PInt64() + case "hugetlb_shm_group": + vm.HugetlbShmGroup = vp.PInt64() + case "laptop_mode": + vm.LaptopMode = vp.PInt64() + case "legacy_va_layout": + vm.LegacyVaLayout = vp.PInt64() + case "lowmem_reserve_ratio": + stringSlice := strings.Fields(value) + pint64Slice := make([]*int64, 0, len(stringSlice)) + for _, value := range stringSlice { + vp := util.NewValueParser(value) + pint64Slice = append(pint64Slice, vp.PInt64()) + } + vm.LowmemReserveRatio = pint64Slice + case "max_map_count": + vm.MaxMapCount = vp.PInt64() + case "memory_failure_early_kill": + vm.MemoryFailureEarlyKill = vp.PInt64() + case "memory_failure_recovery": + vm.MemoryFailureRecovery = vp.PInt64() + case "min_free_kbytes": + vm.MinFreeKbytes = vp.PInt64() + case "min_slab_ratio": + vm.MinSlabRatio = vp.PInt64() + case "min_unmapped_ratio": + vm.MinUnmappedRatio = vp.PInt64() + case "mmap_min_addr": + vm.MmapMinAddr = vp.PInt64() + case "nr_hugepages": + vm.NrHugepages = vp.PInt64() + case "nr_hugepages_mempolicy": + vm.NrHugepagesMempolicy = vp.PInt64() + case "nr_overcommit_hugepages": + vm.NrOvercommitHugepages = vp.PInt64() + case "numa_stat": + vm.NumaStat = vp.PInt64() + case "numa_zonelist_order": + vm.NumaZonelistOrder = value + case "oom_dump_tasks": + vm.OomDumpTasks = vp.PInt64() + case "oom_kill_allocating_task": + vm.OomKillAllocatingTask = vp.PInt64() + case "overcommit_kbytes": + vm.OvercommitKbytes = vp.PInt64() + case "overcommit_memory": + vm.OvercommitMemory = vp.PInt64() + case "overcommit_ratio": + vm.OvercommitRatio = vp.PInt64() + case "page-cluster": + vm.PageCluster = vp.PInt64() + case "panic_on_oom": + vm.PanicOnOom = vp.PInt64() + case "percpu_pagelist_fraction": + vm.PercpuPagelistFraction = vp.PInt64() + case "stat_interval": + vm.StatInterval = vp.PInt64() + case "swappiness": + vm.Swappiness = vp.PInt64() + case "user_reserve_kbytes": + vm.UserReserveKbytes = vp.PInt64() + case "vfs_cache_pressure": + vm.VfsCachePressure = vp.PInt64() + case "watermark_boost_factor": + vm.WatermarkBoostFactor = vp.PInt64() + case "watermark_scale_factor": + vm.WatermarkScaleFactor = vp.PInt64() + case "zone_reclaim_mode": + vm.ZoneReclaimMode = vp.PInt64() + } + if err := vp.Err(); err != nil { + return nil, err + } + } + + return &vm, nil +} diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go new file mode 100644 index 000000000..e941503d5 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -0,0 +1,196 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "regexp" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Zoneinfo holds info parsed from /proc/zoneinfo. +type Zoneinfo struct { + Node string + Zone string + NrFreePages *int64 + Min *int64 + Low *int64 + High *int64 + Scanned *int64 + Spanned *int64 + Present *int64 + Managed *int64 + NrActiveAnon *int64 + NrInactiveAnon *int64 + NrIsolatedAnon *int64 + NrAnonPages *int64 + NrAnonTransparentHugepages *int64 + NrActiveFile *int64 + NrInactiveFile *int64 + NrIsolatedFile *int64 + NrFilePages *int64 + NrSlabReclaimable *int64 + NrSlabUnreclaimable *int64 + NrMlockStack *int64 + NrKernelStack *int64 + NrMapped *int64 + NrDirty *int64 + NrWriteback *int64 + NrUnevictable *int64 + NrShmem *int64 + NrDirtied *int64 + NrWritten *int64 + NumaHit *int64 + NumaMiss *int64 + NumaForeign *int64 + NumaInterleave *int64 + NumaLocal *int64 + NumaOther *int64 + Protection []*int64 +} + +var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) + +// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of +// structs containing the relevant info. More information available here: +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +func (fs FS) Zoneinfo() ([]Zoneinfo, error) { + data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo")) + if err != nil { + return nil, fmt.Errorf("error reading zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err) + } + zoneinfo, err := parseZoneinfo(data) + if err != nil { + return nil, fmt.Errorf("error parsing zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err) + } + return zoneinfo, nil +} + +func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) { + + zoneinfo := []Zoneinfo{} + + zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode")) + for _, block := range zoneinfoBlocks { + var zoneinfoElement Zoneinfo + lines := strings.Split(string(block), "\n") + for _, line := range lines { + + if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil { + zoneinfoElement.Node = nodeZone[1] + zoneinfoElement.Zone = nodeZone[2] + continue + } + if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") { + zoneinfoElement.Zone = "" + continue + } + parts := strings.Fields(strings.TrimSpace(line)) + if len(parts) < 2 { + continue + } + vp := util.NewValueParser(parts[1]) + switch parts[0] { + case "nr_free_pages": + zoneinfoElement.NrFreePages = vp.PInt64() + case "min": + zoneinfoElement.Min = vp.PInt64() + case "low": + zoneinfoElement.Low = vp.PInt64() + case "high": + zoneinfoElement.High = vp.PInt64() + case "scanned": + zoneinfoElement.Scanned = vp.PInt64() + case "spanned": + zoneinfoElement.Spanned = vp.PInt64() + case "present": + zoneinfoElement.Present = vp.PInt64() + case "managed": + zoneinfoElement.Managed = vp.PInt64() + case "nr_active_anon": + zoneinfoElement.NrActiveAnon = vp.PInt64() + case "nr_inactive_anon": + zoneinfoElement.NrInactiveAnon = vp.PInt64() + case "nr_isolated_anon": + zoneinfoElement.NrIsolatedAnon = vp.PInt64() + case "nr_anon_pages": + zoneinfoElement.NrAnonPages = vp.PInt64() + case "nr_anon_transparent_hugepages": + zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64() + case "nr_active_file": + zoneinfoElement.NrActiveFile = vp.PInt64() + case "nr_inactive_file": + zoneinfoElement.NrInactiveFile = vp.PInt64() + case "nr_isolated_file": + zoneinfoElement.NrIsolatedFile = vp.PInt64() + case "nr_file_pages": + zoneinfoElement.NrFilePages = vp.PInt64() + case "nr_slab_reclaimable": + zoneinfoElement.NrSlabReclaimable = vp.PInt64() + case "nr_slab_unreclaimable": + zoneinfoElement.NrSlabUnreclaimable = vp.PInt64() + case "nr_mlock_stack": + zoneinfoElement.NrMlockStack = vp.PInt64() + case "nr_kernel_stack": + zoneinfoElement.NrKernelStack = vp.PInt64() + case "nr_mapped": + zoneinfoElement.NrMapped = vp.PInt64() + case "nr_dirty": + zoneinfoElement.NrDirty = vp.PInt64() + case "nr_writeback": + zoneinfoElement.NrWriteback = vp.PInt64() + case "nr_unevictable": + zoneinfoElement.NrUnevictable = vp.PInt64() + case "nr_shmem": + zoneinfoElement.NrShmem = vp.PInt64() + case "nr_dirtied": + zoneinfoElement.NrDirtied = vp.PInt64() + case "nr_written": + zoneinfoElement.NrWritten = vp.PInt64() + case "numa_hit": + zoneinfoElement.NumaHit = vp.PInt64() + case "numa_miss": + zoneinfoElement.NumaMiss = vp.PInt64() + case "numa_foreign": + zoneinfoElement.NumaForeign = vp.PInt64() + case "numa_interleave": + zoneinfoElement.NumaInterleave = vp.PInt64() + case "numa_local": + zoneinfoElement.NumaLocal = vp.PInt64() + case "numa_other": + zoneinfoElement.NumaOther = vp.PInt64() + case "protection:": + protectionParts := strings.Split(line, ":") + protectionValues := strings.Replace(protectionParts[1], "(", "", 1) + protectionValues = strings.Replace(protectionValues, ")", "", 1) + protectionValues = strings.TrimSpace(protectionValues) + protectionStringMap := strings.Split(protectionValues, ", ") + val, err := util.ParsePInt64s(protectionStringMap) + if err == nil { + zoneinfoElement.Protection = val + } + } + + } + + zoneinfo = append(zoneinfo, zoneinfoElement) + } + return zoneinfo, nil +} diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml index f8a63b308..00d04cb9b 100644 --- a/vendor/github.com/spf13/pflag/.travis.yml +++ b/vendor/github.com/spf13/pflag/.travis.yml @@ -3,8 +3,9 @@ sudo: false language: go go: - - 1.7.3 - - 1.8.1 + - 1.9.x + - 1.10.x + - 1.11.x - tip matrix: @@ -12,7 +13,7 @@ matrix: - go: tip install: - - go get github.com/golang/lint/golint + - go get golang.org/x/lint/golint - export PATH=$GOPATH/bin:$PATH - go install ./... diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md index b052414d1..7eacc5bdb 100644 --- a/vendor/github.com/spf13/pflag/README.md +++ b/vendor/github.com/spf13/pflag/README.md @@ -86,8 +86,8 @@ fmt.Println("ip has value ", *ip) fmt.Println("flagvar has value ", flagvar) ``` -There are helpers function to get values later if you have the FlagSet but -it was difficult to keep up with all of the flag pointers in your code. +There are helper functions available to get the value stored in a Flag if you have a FlagSet but find +it difficult to keep up with all of the pointers in your code. If you have a pflag.FlagSet with a flag called 'flagname' of type int you can use GetInt() to get the int value. But notice that 'flagname' must exist and it must be an int. GetString("flagname") will fail. diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go index 5af02f1a7..3731370d6 100644 --- a/vendor/github.com/spf13/pflag/bool_slice.go +++ b/vendor/github.com/spf13/pflag/bool_slice.go @@ -71,6 +71,44 @@ func (s *boolSliceValue) String() string { return "[" + out + "]" } +func (s *boolSliceValue) fromString(val string) (bool, error) { + return strconv.ParseBool(val) +} + +func (s *boolSliceValue) toString(val bool) string { + return strconv.FormatBool(val) +} + +func (s *boolSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *boolSliceValue) Replace(val []string) error { + out := make([]bool, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *boolSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func boolSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go index aa126e44d..a0b2679f7 100644 --- a/vendor/github.com/spf13/pflag/count.go +++ b/vendor/github.com/spf13/pflag/count.go @@ -46,7 +46,7 @@ func (f *FlagSet) GetCount(name string) (int, error) { // CountVar defines a count flag with specified name, default value, and usage string. // The argument p points to an int variable in which to store the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line +// A count flag will add 1 to its value every time it is found on the command line func (f *FlagSet) CountVar(p *int, name string, usage string) { f.CountVarP(p, name, "", usage) } @@ -69,7 +69,7 @@ func CountVarP(p *int, name, shorthand string, usage string) { // Count defines a count flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line +// A count flag will add 1 to its value every time it is found on the command line func (f *FlagSet) Count(name string, usage string) *int { p := new(int) f.CountVarP(p, name, "", usage) diff --git a/vendor/github.com/spf13/pflag/duration_slice.go b/vendor/github.com/spf13/pflag/duration_slice.go index 52c6b6dc1..badadda53 100644 --- a/vendor/github.com/spf13/pflag/duration_slice.go +++ b/vendor/github.com/spf13/pflag/duration_slice.go @@ -51,6 +51,44 @@ func (s *durationSliceValue) String() string { return "[" + strings.Join(out, ",") + "]" } +func (s *durationSliceValue) fromString(val string) (time.Duration, error) { + return time.ParseDuration(val) +} + +func (s *durationSliceValue) toString(val time.Duration) string { + return fmt.Sprintf("%s", val) +} + +func (s *durationSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *durationSliceValue) Replace(val []string) error { + out := make([]time.Duration, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *durationSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func durationSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 9beeda8ec..24a5036e9 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -57,9 +57,9 @@ that give one-letter shorthands for flags. You can use these by appending var ip = flag.IntP("flagname", "f", 1234, "help message") var flagvar bool func init() { - flag.BoolVarP("boolname", "b", true, "help message") + flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") } - flag.VarP(&flagVar, "varname", "v", 1234, "help message") + flag.VarP(&flagval, "varname", "v", "help message") Shorthand letters can be used with single dashes on the command line. Boolean shorthand flags can be combined with other shorthand flags. @@ -190,6 +190,18 @@ type Value interface { Type() string } +// SliceValue is a secondary interface to all flags which hold a list +// of values. This allows full control over the value of list flags, +// and avoids complicated marshalling and unmarshalling to csv. +type SliceValue interface { + // Append adds the specified value to the end of the flag value list. + Append(string) error + // Replace will fully overwrite any data currently in the flag value list. + Replace([]string) error + // GetSlice returns the flag value list as an array of strings. + GetSlice() []string +} + // sortFlags returns the flags as a slice in lexicographical sorted order. func sortFlags(flags map[NormalizedName]*Flag) []*Flag { list := make(sort.StringSlice, len(flags)) diff --git a/vendor/github.com/spf13/pflag/float32_slice.go b/vendor/github.com/spf13/pflag/float32_slice.go new file mode 100644 index 000000000..caa352741 --- /dev/null +++ b/vendor/github.com/spf13/pflag/float32_slice.go @@ -0,0 +1,174 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- float32Slice Value +type float32SliceValue struct { + value *[]float32 + changed bool +} + +func newFloat32SliceValue(val []float32, p *[]float32) *float32SliceValue { + isv := new(float32SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *float32SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]float32, len(ss)) + for i, d := range ss { + var err error + var temp64 float64 + temp64, err = strconv.ParseFloat(d, 32) + if err != nil { + return err + } + out[i] = float32(temp64) + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *float32SliceValue) Type() string { + return "float32Slice" +} + +func (s *float32SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%f", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *float32SliceValue) fromString(val string) (float32, error) { + t64, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(t64), nil +} + +func (s *float32SliceValue) toString(val float32) string { + return fmt.Sprintf("%f", val) +} + +func (s *float32SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *float32SliceValue) Replace(val []string) error { + out := make([]float32, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *float32SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func float32SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []float32{}, nil + } + ss := strings.Split(val, ",") + out := make([]float32, len(ss)) + for i, d := range ss { + var err error + var temp64 float64 + temp64, err = strconv.ParseFloat(d, 32) + if err != nil { + return nil, err + } + out[i] = float32(temp64) + + } + return out, nil +} + +// GetFloat32Slice return the []float32 value of a flag with the given name +func (f *FlagSet) GetFloat32Slice(name string) ([]float32, error) { + val, err := f.getFlagType(name, "float32Slice", float32SliceConv) + if err != nil { + return []float32{}, err + } + return val.([]float32), nil +} + +// Float32SliceVar defines a float32Slice flag with specified name, default value, and usage string. +// The argument p points to a []float32 variable in which to store the value of the flag. +func (f *FlagSet) Float32SliceVar(p *[]float32, name string, value []float32, usage string) { + f.VarP(newFloat32SliceValue(value, p), name, "", usage) +} + +// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { + f.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) +} + +// Float32SliceVar defines a float32[] flag with specified name, default value, and usage string. +// The argument p points to a float32[] variable in which to store the value of the flag. +func Float32SliceVar(p *[]float32, name string, value []float32, usage string) { + CommandLine.VarP(newFloat32SliceValue(value, p), name, "", usage) +} + +// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { + CommandLine.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) +} + +// Float32Slice defines a []float32 flag with specified name, default value, and usage string. +// The return value is the address of a []float32 variable that stores the value of the flag. +func (f *FlagSet) Float32Slice(name string, value []float32, usage string) *[]float32 { + p := []float32{} + f.Float32SliceVarP(&p, name, "", value, usage) + return &p +} + +// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { + p := []float32{} + f.Float32SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Float32Slice defines a []float32 flag with specified name, default value, and usage string. +// The return value is the address of a []float32 variable that stores the value of the flag. +func Float32Slice(name string, value []float32, usage string) *[]float32 { + return CommandLine.Float32SliceP(name, "", value, usage) +} + +// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. +func Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { + return CommandLine.Float32SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/float64_slice.go b/vendor/github.com/spf13/pflag/float64_slice.go new file mode 100644 index 000000000..85bf3073d --- /dev/null +++ b/vendor/github.com/spf13/pflag/float64_slice.go @@ -0,0 +1,166 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- float64Slice Value +type float64SliceValue struct { + value *[]float64 + changed bool +} + +func newFloat64SliceValue(val []float64, p *[]float64) *float64SliceValue { + isv := new(float64SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *float64SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]float64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseFloat(d, 64) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *float64SliceValue) Type() string { + return "float64Slice" +} + +func (s *float64SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%f", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *float64SliceValue) fromString(val string) (float64, error) { + return strconv.ParseFloat(val, 64) +} + +func (s *float64SliceValue) toString(val float64) string { + return fmt.Sprintf("%f", val) +} + +func (s *float64SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *float64SliceValue) Replace(val []string) error { + out := make([]float64, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *float64SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func float64SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []float64{}, nil + } + ss := strings.Split(val, ",") + out := make([]float64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseFloat(d, 64) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetFloat64Slice return the []float64 value of a flag with the given name +func (f *FlagSet) GetFloat64Slice(name string) ([]float64, error) { + val, err := f.getFlagType(name, "float64Slice", float64SliceConv) + if err != nil { + return []float64{}, err + } + return val.([]float64), nil +} + +// Float64SliceVar defines a float64Slice flag with specified name, default value, and usage string. +// The argument p points to a []float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64SliceVar(p *[]float64, name string, value []float64, usage string) { + f.VarP(newFloat64SliceValue(value, p), name, "", usage) +} + +// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { + f.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) +} + +// Float64SliceVar defines a float64[] flag with specified name, default value, and usage string. +// The argument p points to a float64[] variable in which to store the value of the flag. +func Float64SliceVar(p *[]float64, name string, value []float64, usage string) { + CommandLine.VarP(newFloat64SliceValue(value, p), name, "", usage) +} + +// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { + CommandLine.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) +} + +// Float64Slice defines a []float64 flag with specified name, default value, and usage string. +// The return value is the address of a []float64 variable that stores the value of the flag. +func (f *FlagSet) Float64Slice(name string, value []float64, usage string) *[]float64 { + p := []float64{} + f.Float64SliceVarP(&p, name, "", value, usage) + return &p +} + +// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { + p := []float64{} + f.Float64SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Float64Slice defines a []float64 flag with specified name, default value, and usage string. +// The return value is the address of a []float64 variable that stores the value of the flag. +func Float64Slice(name string, value []float64, usage string) *[]float64 { + return CommandLine.Float64SliceP(name, "", value, usage) +} + +// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. +func Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { + return CommandLine.Float64SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/go.mod b/vendor/github.com/spf13/pflag/go.mod new file mode 100644 index 000000000..b2287eec1 --- /dev/null +++ b/vendor/github.com/spf13/pflag/go.mod @@ -0,0 +1,3 @@ +module github.com/spf13/pflag + +go 1.12 diff --git a/vendor/github.com/spf13/pflag/go.sum b/vendor/github.com/spf13/pflag/go.sum new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/spf13/pflag/int32_slice.go b/vendor/github.com/spf13/pflag/int32_slice.go new file mode 100644 index 000000000..ff128ff06 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int32_slice.go @@ -0,0 +1,174 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- int32Slice Value +type int32SliceValue struct { + value *[]int32 + changed bool +} + +func newInt32SliceValue(val []int32, p *[]int32) *int32SliceValue { + isv := new(int32SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *int32SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int32, len(ss)) + for i, d := range ss { + var err error + var temp64 int64 + temp64, err = strconv.ParseInt(d, 0, 32) + if err != nil { + return err + } + out[i] = int32(temp64) + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *int32SliceValue) Type() string { + return "int32Slice" +} + +func (s *int32SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *int32SliceValue) fromString(val string) (int32, error) { + t64, err := strconv.ParseInt(val, 0, 32) + if err != nil { + return 0, err + } + return int32(t64), nil +} + +func (s *int32SliceValue) toString(val int32) string { + return fmt.Sprintf("%d", val) +} + +func (s *int32SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *int32SliceValue) Replace(val []string) error { + out := make([]int32, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *int32SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func int32SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int32{}, nil + } + ss := strings.Split(val, ",") + out := make([]int32, len(ss)) + for i, d := range ss { + var err error + var temp64 int64 + temp64, err = strconv.ParseInt(d, 0, 32) + if err != nil { + return nil, err + } + out[i] = int32(temp64) + + } + return out, nil +} + +// GetInt32Slice return the []int32 value of a flag with the given name +func (f *FlagSet) GetInt32Slice(name string) ([]int32, error) { + val, err := f.getFlagType(name, "int32Slice", int32SliceConv) + if err != nil { + return []int32{}, err + } + return val.([]int32), nil +} + +// Int32SliceVar defines a int32Slice flag with specified name, default value, and usage string. +// The argument p points to a []int32 variable in which to store the value of the flag. +func (f *FlagSet) Int32SliceVar(p *[]int32, name string, value []int32, usage string) { + f.VarP(newInt32SliceValue(value, p), name, "", usage) +} + +// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { + f.VarP(newInt32SliceValue(value, p), name, shorthand, usage) +} + +// Int32SliceVar defines a int32[] flag with specified name, default value, and usage string. +// The argument p points to a int32[] variable in which to store the value of the flag. +func Int32SliceVar(p *[]int32, name string, value []int32, usage string) { + CommandLine.VarP(newInt32SliceValue(value, p), name, "", usage) +} + +// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { + CommandLine.VarP(newInt32SliceValue(value, p), name, shorthand, usage) +} + +// Int32Slice defines a []int32 flag with specified name, default value, and usage string. +// The return value is the address of a []int32 variable that stores the value of the flag. +func (f *FlagSet) Int32Slice(name string, value []int32, usage string) *[]int32 { + p := []int32{} + f.Int32SliceVarP(&p, name, "", value, usage) + return &p +} + +// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { + p := []int32{} + f.Int32SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Int32Slice defines a []int32 flag with specified name, default value, and usage string. +// The return value is the address of a []int32 variable that stores the value of the flag. +func Int32Slice(name string, value []int32, usage string) *[]int32 { + return CommandLine.Int32SliceP(name, "", value, usage) +} + +// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. +func Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { + return CommandLine.Int32SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int64_slice.go b/vendor/github.com/spf13/pflag/int64_slice.go new file mode 100644 index 000000000..25464638f --- /dev/null +++ b/vendor/github.com/spf13/pflag/int64_slice.go @@ -0,0 +1,166 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- int64Slice Value +type int64SliceValue struct { + value *[]int64 + changed bool +} + +func newInt64SliceValue(val []int64, p *[]int64) *int64SliceValue { + isv := new(int64SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *int64SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseInt(d, 0, 64) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *int64SliceValue) Type() string { + return "int64Slice" +} + +func (s *int64SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *int64SliceValue) fromString(val string) (int64, error) { + return strconv.ParseInt(val, 0, 64) +} + +func (s *int64SliceValue) toString(val int64) string { + return fmt.Sprintf("%d", val) +} + +func (s *int64SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *int64SliceValue) Replace(val []string) error { + out := make([]int64, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *int64SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func int64SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int64{}, nil + } + ss := strings.Split(val, ",") + out := make([]int64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseInt(d, 0, 64) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetInt64Slice return the []int64 value of a flag with the given name +func (f *FlagSet) GetInt64Slice(name string) ([]int64, error) { + val, err := f.getFlagType(name, "int64Slice", int64SliceConv) + if err != nil { + return []int64{}, err + } + return val.([]int64), nil +} + +// Int64SliceVar defines a int64Slice flag with specified name, default value, and usage string. +// The argument p points to a []int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64SliceVar(p *[]int64, name string, value []int64, usage string) { + f.VarP(newInt64SliceValue(value, p), name, "", usage) +} + +// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { + f.VarP(newInt64SliceValue(value, p), name, shorthand, usage) +} + +// Int64SliceVar defines a int64[] flag with specified name, default value, and usage string. +// The argument p points to a int64[] variable in which to store the value of the flag. +func Int64SliceVar(p *[]int64, name string, value []int64, usage string) { + CommandLine.VarP(newInt64SliceValue(value, p), name, "", usage) +} + +// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { + CommandLine.VarP(newInt64SliceValue(value, p), name, shorthand, usage) +} + +// Int64Slice defines a []int64 flag with specified name, default value, and usage string. +// The return value is the address of a []int64 variable that stores the value of the flag. +func (f *FlagSet) Int64Slice(name string, value []int64, usage string) *[]int64 { + p := []int64{} + f.Int64SliceVarP(&p, name, "", value, usage) + return &p +} + +// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { + p := []int64{} + f.Int64SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Int64Slice defines a []int64 flag with specified name, default value, and usage string. +// The return value is the address of a []int64 variable that stores the value of the flag. +func Int64Slice(name string, value []int64, usage string) *[]int64 { + return CommandLine.Int64SliceP(name, "", value, usage) +} + +// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. +func Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { + return CommandLine.Int64SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go index 1e7c9edde..e71c39d91 100644 --- a/vendor/github.com/spf13/pflag/int_slice.go +++ b/vendor/github.com/spf13/pflag/int_slice.go @@ -51,6 +51,36 @@ func (s *intSliceValue) String() string { return "[" + strings.Join(out, ",") + "]" } +func (s *intSliceValue) Append(val string) error { + i, err := strconv.Atoi(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *intSliceValue) Replace(val []string) error { + out := make([]int, len(val)) + for i, d := range val { + var err error + out[i], err = strconv.Atoi(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *intSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = strconv.Itoa(d) + } + return out +} + func intSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go index 7dd196fe3..775faae4f 100644 --- a/vendor/github.com/spf13/pflag/ip_slice.go +++ b/vendor/github.com/spf13/pflag/ip_slice.go @@ -72,9 +72,47 @@ func (s *ipSliceValue) String() string { return "[" + out + "]" } +func (s *ipSliceValue) fromString(val string) (net.IP, error) { + return net.ParseIP(strings.TrimSpace(val)), nil +} + +func (s *ipSliceValue) toString(val net.IP) string { + return val.String() +} + +func (s *ipSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *ipSliceValue) Replace(val []string) error { + out := make([]net.IP, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *ipSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func ipSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") - // Emtpy string would cause a slice with one (empty) entry + // Empty string would cause a slice with one (empty) entry if len(val) == 0 { return []net.IP{}, nil } diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go index fa7bc6018..4894af818 100644 --- a/vendor/github.com/spf13/pflag/string_array.go +++ b/vendor/github.com/spf13/pflag/string_array.go @@ -23,6 +23,32 @@ func (s *stringArrayValue) Set(val string) error { return nil } +func (s *stringArrayValue) Append(val string) error { + *s.value = append(*s.value, val) + return nil +} + +func (s *stringArrayValue) Replace(val []string) error { + out := make([]string, len(val)) + for i, d := range val { + var err error + out[i] = d + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *stringArrayValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = d + } + return out +} + func (s *stringArrayValue) Type() string { return "stringArray" } diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go index 0cd3ccc08..3cb2e69db 100644 --- a/vendor/github.com/spf13/pflag/string_slice.go +++ b/vendor/github.com/spf13/pflag/string_slice.go @@ -62,6 +62,20 @@ func (s *stringSliceValue) String() string { return "[" + str + "]" } +func (s *stringSliceValue) Append(val string) error { + *s.value = append(*s.value, val) + return nil +} + +func (s *stringSliceValue) Replace(val []string) error { + *s.value = val + return nil +} + +func (s *stringSliceValue) GetSlice() []string { + return *s.value +} + func stringSliceConv(sval string) (interface{}, error) { sval = sval[1 : len(sval)-1] // An empty string would cause a slice with one (empty) string @@ -84,7 +98,7 @@ func (f *FlagSet) GetStringSlice(name string) ([]string, error) { // The argument p points to a []string variable in which to store the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) { @@ -100,7 +114,7 @@ func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []s // The argument p points to a []string variable in which to store the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func StringSliceVar(p *[]string, name string, value []string, usage string) { @@ -116,7 +130,7 @@ func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage // The return value is the address of a []string variable that stores the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { @@ -136,7 +150,7 @@ func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage str // The return value is the address of a []string variable that stores the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func StringSlice(name string, value []string, usage string) *[]string { diff --git a/vendor/github.com/spf13/pflag/string_to_int64.go b/vendor/github.com/spf13/pflag/string_to_int64.go new file mode 100644 index 000000000..a807a04a0 --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_to_int64.go @@ -0,0 +1,149 @@ +package pflag + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +// -- stringToInt64 Value +type stringToInt64Value struct { + value *map[string]int64 + changed bool +} + +func newStringToInt64Value(val map[string]int64, p *map[string]int64) *stringToInt64Value { + ssv := new(stringToInt64Value) + ssv.value = p + *ssv.value = val + return ssv +} + +// Format: a=1,b=2 +func (s *stringToInt64Value) Set(val string) error { + ss := strings.Split(val, ",") + out := make(map[string]int64, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) + if err != nil { + return err + } + } + if !s.changed { + *s.value = out + } else { + for k, v := range out { + (*s.value)[k] = v + } + } + s.changed = true + return nil +} + +func (s *stringToInt64Value) Type() string { + return "stringToInt64" +} + +func (s *stringToInt64Value) String() string { + var buf bytes.Buffer + i := 0 + for k, v := range *s.value { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteString(k) + buf.WriteRune('=') + buf.WriteString(strconv.FormatInt(v, 10)) + i++ + } + return "[" + buf.String() + "]" +} + +func stringToInt64Conv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // An empty string would cause an empty map + if len(val) == 0 { + return map[string]int64{}, nil + } + ss := strings.Split(val, ",") + out := make(map[string]int64, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) + if err != nil { + return nil, err + } + } + return out, nil +} + +// GetStringToInt64 return the map[string]int64 value of a flag with the given name +func (f *FlagSet) GetStringToInt64(name string) (map[string]int64, error) { + val, err := f.getFlagType(name, "stringToInt64", stringToInt64Conv) + if err != nil { + return map[string]int64{}, err + } + return val.(map[string]int64), nil +} + +// StringToInt64Var defines a string flag with specified name, default value, and usage string. +// The argument p point64s to a map[string]int64 variable in which to store the values of the multiple flags. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { + f.VarP(newStringToInt64Value(value, p), name, "", usage) +} + +// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { + f.VarP(newStringToInt64Value(value, p), name, shorthand, usage) +} + +// StringToInt64Var defines a string flag with specified name, default value, and usage string. +// The argument p point64s to a map[string]int64 variable in which to store the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { + CommandLine.VarP(newStringToInt64Value(value, p), name, "", usage) +} + +// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. +func StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { + CommandLine.VarP(newStringToInt64Value(value, p), name, shorthand, usage) +} + +// StringToInt64 defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int64 variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { + p := map[string]int64{} + f.StringToInt64VarP(&p, name, "", value, usage) + return &p +} + +// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { + p := map[string]int64{} + f.StringToInt64VarP(&p, name, shorthand, value, usage) + return &p +} + +// StringToInt64 defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int64 variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { + return CommandLine.StringToInt64P(name, "", value, usage) +} + +// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. +func StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { + return CommandLine.StringToInt64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go index edd94c600..5fa924835 100644 --- a/vendor/github.com/spf13/pflag/uint_slice.go +++ b/vendor/github.com/spf13/pflag/uint_slice.go @@ -50,6 +50,48 @@ func (s *uintSliceValue) String() string { return "[" + strings.Join(out, ",") + "]" } +func (s *uintSliceValue) fromString(val string) (uint, error) { + t, err := strconv.ParseUint(val, 10, 0) + if err != nil { + return 0, err + } + return uint(t), nil +} + +func (s *uintSliceValue) toString(val uint) string { + return fmt.Sprintf("%d", val) +} + +func (s *uintSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *uintSliceValue) Replace(val []string) error { + out := make([]uint, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *uintSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func uintSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore index 0a4504f11..c3fa25389 100644 --- a/vendor/go.uber.org/atomic/.gitignore +++ b/vendor/go.uber.org/atomic/.gitignore @@ -1,6 +1,7 @@ +/bin .DS_Store /vendor -/cover +cover.html cover.out lint.log diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml index 0f3769e5f..4e73268b6 100644 --- a/vendor/go.uber.org/atomic/.travis.yml +++ b/vendor/go.uber.org/atomic/.travis.yml @@ -2,26 +2,26 @@ sudo: false language: go go_import_path: go.uber.org/atomic -go: - - 1.11.x - - 1.12.x +env: + global: + - GO111MODULE=on matrix: include: - go: 1.12.x - env: NO_TEST=yes LINT=yes + - go: 1.13.x + env: LINT=1 cache: directories: - vendor -install: - - make install_ci +before_install: + - go version script: - - test -n "$NO_TEST" || make test_ci - - test -n "$NO_TEST" || scripts/test-ubergo.sh - - test -z "$LINT" || make install_lint lint + - test -z "$LINT" || make lint + - make cover after_success: - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md new file mode 100644 index 000000000..6db846f10 --- /dev/null +++ b/vendor/go.uber.org/atomic/CHANGELOG.md @@ -0,0 +1,59 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [1.5.1] - 2019-11-19 +- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together + causing `CAS` to fail even though the old value matches. + +## [1.5.0] - 2019-10-29 +### Changed +- With Go modules, only the `go.uber.org/atomic` import path is supported now. + If you need to use the old import path, please add a `replace` directive to + your `go.mod`. + +## [1.4.0] - 2019-05-01 +### Added + - Add `atomic.Error` type for atomic operations on `error` values. + +## [1.3.2] - 2018-05-02 +### Added +- Add `atomic.Duration` type for atomic operations on `time.Duration` values. + +## [1.3.1] - 2017-11-14 +### Fixed +- Revert optimization for `atomic.String.Store("")` which caused data races. + +## [1.3.0] - 2017-11-13 +### Added +- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools. + +### Changed +- Optimize `atomic.String.Store("")` by avoiding an allocation. + +## [1.2.0] - 2017-04-12 +### Added +- Shadow `atomic.Value` from `sync/atomic`. + +## [1.1.0] - 2017-03-10 +### Added +- Add atomic `Float64` type. + +### Changed +- Support new `go.uber.org/atomic` import path. + +## [1.0.0] - 2016-07-18 + +- Initial release. + +[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 +[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0 +[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0 +[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2 +[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1 +[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0 +[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0 +[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0 diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile index 1ef263075..39af0fb63 100644 --- a/vendor/go.uber.org/atomic/Makefile +++ b/vendor/go.uber.org/atomic/Makefile @@ -1,51 +1,35 @@ -# Many Go tools take file globs or directories as arguments instead of packages. -PACKAGE_FILES ?= *.go +# Directory to place `go install`ed binaries into. +export GOBIN ?= $(shell pwd)/bin -# For pre go1.6 -export GO15VENDOREXPERIMENT=1 +GOLINT = $(GOBIN)/golint +GO_FILES ?= *.go .PHONY: build build: - go build -i ./... - - -.PHONY: install -install: - glide --version || go get github.com/Masterminds/glide - glide install - + go build ./... .PHONY: test test: - go test -cover -race ./... + go test -race ./... +.PHONY: gofmt +gofmt: + $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) + gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true + @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false) -.PHONY: install_ci -install_ci: install - go get github.com/wadey/gocovmerge - go get github.com/mattn/goveralls - go get golang.org/x/tools/cmd/cover - -.PHONY: install_lint -install_lint: - go get golang.org/x/lint/golint +$(GOLINT): + go install golang.org/x/lint/golint +.PHONY: golint +golint: $(GOLINT) + $(GOLINT) ./... .PHONY: lint -lint: - @rm -rf lint.log - @echo "Checking formatting..." - @gofmt -d -s $(PACKAGE_FILES) 2>&1 | tee lint.log - @echo "Checking vet..." - @go vet ./... 2>&1 | tee -a lint.log;) - @echo "Checking lint..." - @golint $$(go list ./...) 2>&1 | tee -a lint.log - @echo "Checking for unresolved FIXMEs..." - @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log - @[ ! -s lint.log ] - - -.PHONY: test_ci -test_ci: install_ci build - ./scripts/cover.sh $(shell go list $(PACKAGES)) +lint: gofmt golint + +.PHONY: cover +cover: + go test -coverprofile=cover.out -coverpkg ./... -v ./... + go tool cover -html=cover.out -o cover.html diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md index 62eb8e576..ade0c20f1 100644 --- a/vendor/go.uber.org/atomic/README.md +++ b/vendor/go.uber.org/atomic/README.md @@ -3,9 +3,34 @@ Simple wrappers for primitive types to enforce atomic access. ## Installation -`go get -u go.uber.org/atomic` + +```shell +$ go get -u go.uber.org/atomic@v1 +``` + +### Legacy Import Path + +As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way +of using this package. If you are using Go modules, this package will fail to +compile with the legacy import path path `github.com/uber-go/atomic`. + +We recommend migrating your code to the new import path but if you're unable +to do so, or if your dependencies are still using the old import path, you +will have to add a `replace` directive to your `go.mod` file downgrading the +legacy import path to an older version. + +``` +replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0 +``` + +You can do so automatically by running the following command. + +```shell +$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0 +``` ## Usage + The standard library's `sync/atomic` is powerful, but it's easy to forget which variables must be accessed atomically. `go.uber.org/atomic` preserves all the functionality of the standard library, but wraps the primitive types to @@ -21,9 +46,11 @@ atom.CAS(40, 11) See the [documentation][doc] for a complete API specification. ## Development Status + Stable. -___ +--- + Released under the [MIT License](LICENSE.txt). [doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg diff --git a/vendor/go.uber.org/atomic/atomic.go b/vendor/go.uber.org/atomic/atomic.go index 1db6849fc..ad5fa0980 100644 --- a/vendor/go.uber.org/atomic/atomic.go +++ b/vendor/go.uber.org/atomic/atomic.go @@ -250,11 +250,16 @@ func (b *Bool) Swap(new bool) bool { // Toggle atomically negates the Boolean and returns the previous value. func (b *Bool) Toggle() bool { - return truthy(atomic.AddUint32(&b.v, 1) - 1) + for { + old := b.Load() + if b.CAS(old, !old) { + return old + } + } } func truthy(n uint32) bool { - return n&1 == 1 + return n == 1 } func boolToInt(b bool) uint32 { diff --git a/vendor/go.uber.org/atomic/glide.lock b/vendor/go.uber.org/atomic/glide.lock deleted file mode 100644 index 3c72c5997..000000000 --- a/vendor/go.uber.org/atomic/glide.lock +++ /dev/null @@ -1,17 +0,0 @@ -hash: f14d51408e3e0e4f73b34e4039484c78059cd7fc5f4996fdd73db20dc8d24f53 -updated: 2016-10-27T00:10:51.16960137-07:00 -imports: [] -testImports: -- name: github.com/davecgh/go-spew - version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d - subpackages: - - spew -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib -- name: github.com/stretchr/testify - version: d77da356e56a7428ad25149ca77381849a6a5232 - subpackages: - - assert - - require diff --git a/vendor/go.uber.org/atomic/glide.yaml b/vendor/go.uber.org/atomic/glide.yaml deleted file mode 100644 index 4cf608ec0..000000000 --- a/vendor/go.uber.org/atomic/glide.yaml +++ /dev/null @@ -1,6 +0,0 @@ -package: go.uber.org/atomic -testImport: -- package: github.com/stretchr/testify - subpackages: - - assert - - require diff --git a/vendor/go.uber.org/atomic/go.mod b/vendor/go.uber.org/atomic/go.mod new file mode 100644 index 000000000..a935daebb --- /dev/null +++ b/vendor/go.uber.org/atomic/go.mod @@ -0,0 +1,10 @@ +module go.uber.org/atomic + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/stretchr/testify v1.3.0 + golang.org/x/lint v0.0.0-20190930215403-16217165b5de + golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c // indirect +) + +go 1.13 diff --git a/vendor/go.uber.org/atomic/go.sum b/vendor/go.uber.org/atomic/go.sum new file mode 100644 index 000000000..51b2b62af --- /dev/null +++ b/vendor/go.uber.org/atomic/go.sum @@ -0,0 +1,22 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c h1:IGkKhmfzcztjm6gYkykvu/NiS8kaqbCWAEWWAyf8J5U= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/go.uber.org/atomic/tools.go b/vendor/go.uber.org/atomic/tools.go new file mode 100644 index 000000000..654f5b2fe --- /dev/null +++ b/vendor/go.uber.org/atomic/tools.go @@ -0,0 +1,28 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build tools + +package atomic + +import ( + // Tools used during development. + _ "golang.org/x/lint/golint" +) diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore index 61ead8666..b9a05e3da 100644 --- a/vendor/go.uber.org/multierr/.gitignore +++ b/vendor/go.uber.org/multierr/.gitignore @@ -1 +1,4 @@ /vendor +cover.html +cover.out +/bin diff --git a/vendor/go.uber.org/multierr/.travis.yml b/vendor/go.uber.org/multierr/.travis.yml index 5ffa8fed4..786c917a3 100644 --- a/vendor/go.uber.org/multierr/.travis.yml +++ b/vendor/go.uber.org/multierr/.travis.yml @@ -5,11 +5,12 @@ go_import_path: go.uber.org/multierr env: global: - GO15VENDOREXPERIMENT=1 + - GO111MODULE=on go: - - 1.7 - - 1.8 - - tip + - 1.11.x + - 1.12.x + - 1.13.x cache: directories: @@ -18,16 +19,11 @@ cache: before_install: - go version -install: -- | - set -e - make install_ci - script: - | set -e make lint - make test_ci + make cover after_success: - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md index 898445d06..abb355a06 100644 --- a/vendor/go.uber.org/multierr/CHANGELOG.md +++ b/vendor/go.uber.org/multierr/CHANGELOG.md @@ -1,6 +1,26 @@ Releases ======== +v1.4.0 (2019-11-04) +=================== + +- Add `AppendInto` function to more ergonomically build errors inside a + loop. + + +v1.3.0 (2019-10-29) +=================== + +- Switch to Go modules. + + +v1.2.0 (2019-09-26) +=================== + +- Support extracting and matching against wrapped errors with `errors.As` + and `errors.Is`. + + v1.1.0 (2017-06-30) =================== diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile index a7437d061..416018237 100644 --- a/vendor/go.uber.org/multierr/Makefile +++ b/vendor/go.uber.org/multierr/Makefile @@ -1,23 +1,17 @@ -export GO15VENDOREXPERIMENT=1 - -PACKAGES := $(shell glide nv) +# Directory to put `go install`ed binaries in. +export GOBIN ?= $(shell pwd)/bin GO_FILES := $(shell \ find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ -o -name '*.go' -print | cut -b3-) -.PHONY: install -install: - glide --version || go get github.com/Masterminds/glide - glide install - .PHONY: build build: - go build -i $(PACKAGES) + go build ./... .PHONY: test test: - go test -cover -race $(PACKAGES) + go test -race ./... .PHONY: gofmt gofmt: @@ -25,50 +19,24 @@ gofmt: @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false) -.PHONY: govet -govet: - $(eval VET_LOG := $(shell mktemp -t govet.XXXXX)) - @go vet $(PACKAGES) 2>&1 \ - | grep -v '^exit status' > $(VET_LOG) || true - @[ ! -s "$(VET_LOG)" ] || (echo "govet failed:" | cat - $(VET_LOG) && false) - .PHONY: golint golint: - @go get github.com/golang/lint/golint - $(eval LINT_LOG := $(shell mktemp -t golint.XXXXX)) - @cat /dev/null > $(LINT_LOG) - @$(foreach pkg, $(PACKAGES), golint $(pkg) >> $(LINT_LOG) || true;) - @[ ! -s "$(LINT_LOG)" ] || (echo "golint failed:" | cat - $(LINT_LOG) && false) + @go install golang.org/x/lint/golint + @$(GOBIN)/golint ./... .PHONY: staticcheck staticcheck: - @go get honnef.co/go/tools/cmd/staticcheck - $(eval STATICCHECK_LOG := $(shell mktemp -t staticcheck.XXXXX)) - @staticcheck $(PACKAGES) 2>&1 > $(STATICCHECK_LOG) || true - @[ ! -s "$(STATICCHECK_LOG)" ] || (echo "staticcheck failed:" | cat - $(STATICCHECK_LOG) && false) + @go install honnef.co/go/tools/cmd/staticcheck + @$(GOBIN)/staticcheck ./... .PHONY: lint -lint: gofmt govet golint staticcheck +lint: gofmt golint staticcheck .PHONY: cover cover: - ./scripts/cover.sh $(shell go list $(PACKAGES)) + go test -coverprofile=cover.out -coverpkg=./... -v ./... go tool cover -html=cover.out -o cover.html update-license: - @go get go.uber.org/tools/update-license - @update-license \ - $(shell go list -json $(PACKAGES) | \ - jq -r '.Dir + "/" + (.GoFiles | .[])') - -############################################################################## - -.PHONY: install_ci -install_ci: install - go get github.com/wadey/gocovmerge - go get github.com/mattn/goveralls - go get golang.org/x/tools/cmd/cover - -.PHONY: test_ci -test_ci: install_ci - ./scripts/cover.sh $(shell go list $(PACKAGES)) + @go install go.uber.org/tools/update-license + @$(GOBIN)/update-license $(GO_FILES) diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md index 065088f64..751bd65e5 100644 --- a/vendor/go.uber.org/multierr/README.md +++ b/vendor/go.uber.org/multierr/README.md @@ -17,7 +17,7 @@ Released under the [MIT License]. [MIT License]: LICENSE.txt [doc-img]: https://godoc.org/go.uber.org/multierr?status.svg [doc]: https://godoc.org/go.uber.org/multierr -[ci-img]: https://travis-ci.org/uber-go/multierr.svg?branch=master +[ci-img]: https://travis-ci.com/uber-go/multierr.svg?branch=master [cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg -[ci]: https://travis-ci.org/uber-go/multierr +[ci]: https://travis-ci.com/uber-go/multierr [cov]: https://codecov.io/gh/uber-go/multierr diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go index de6ce4736..0ee6fe8bc 100644 --- a/vendor/go.uber.org/multierr/error.go +++ b/vendor/go.uber.org/multierr/error.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2019 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -33,7 +33,7 @@ // If only two errors are being combined, the Append function may be used // instead. // -// err = multierr.Combine(reader.Close(), writer.Close()) +// err = multierr.Append(reader.Close(), writer.Close()) // // This makes it possible to record resource cleanup failures from deferred // blocks with the help of named return values. @@ -99,8 +99,6 @@ var ( // Separator for single-line error messages. _singlelineSeparator = []byte("; ") - _newline = []byte("\n") - // Prefix for multi-line messages _multilinePrefix = []byte("the following errors occurred:") @@ -399,3 +397,53 @@ func Append(left error, right error) error { errors := [2]error{left, right} return fromSlice(errors[0:]) } + +// AppendInto appends an error into the destination of an error pointer and +// returns whether the error being appended was non-nil. +// +// var err error +// multierr.AppendInto(&err, r.Close()) +// multierr.AppendInto(&err, w.Close()) +// +// The above is equivalent to, +// +// err := multierr.Append(r.Close(), w.Close()) +// +// As AppendInto reports whether the provided error was non-nil, it may be +// used to build a multierr error in a loop more ergonomically. For example: +// +// var err error +// for line := range lines { +// var item Item +// if multierr.AppendInto(&err, parse(line, &item)) { +// continue +// } +// items = append(items, item) +// } +// +// Compare this with a verison that relies solely on Append: +// +// var err error +// for line := range lines { +// var item Item +// if parseErr := parse(line, &item); parseErr != nil { +// err = multierr.Append(err, parseErr) +// continue +// } +// items = append(items, item) +// } +func AppendInto(into *error, err error) (errored bool) { + if into == nil { + // We panic if 'into' is nil. This is not documented above + // because suggesting that the pointer must be non-nil may + // confuse users into thinking that the error that it points + // to must be non-nil. + panic("misuse of multierr.AppendInto: into pointer must not be nil") + } + + if err == nil { + return false + } + *into = Append(*into, err) + return true +} diff --git a/vendor/go.uber.org/multierr/glide.lock b/vendor/go.uber.org/multierr/glide.lock deleted file mode 100644 index f9ea94c33..000000000 --- a/vendor/go.uber.org/multierr/glide.lock +++ /dev/null @@ -1,19 +0,0 @@ -hash: b53b5e9a84b9cb3cc4b2d0499e23da2feca1eec318ce9bb717ecf35bf24bf221 -updated: 2017-04-10T13:34:45.671678062-07:00 -imports: -- name: go.uber.org/atomic - version: 3b8db5e93c4c02efbc313e17b2e796b0914a01fb -testImports: -- name: github.com/davecgh/go-spew - version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 - subpackages: - - spew -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib -- name: github.com/stretchr/testify - version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0 - subpackages: - - assert - - require diff --git a/vendor/go.uber.org/multierr/go.mod b/vendor/go.uber.org/multierr/go.mod new file mode 100644 index 000000000..5463fac72 --- /dev/null +++ b/vendor/go.uber.org/multierr/go.mod @@ -0,0 +1,12 @@ +module go.uber.org/multierr + +go 1.12 + +require ( + github.com/stretchr/testify v1.3.0 + go.uber.org/atomic v1.5.0 + go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee + golang.org/x/lint v0.0.0-20190930215403-16217165b5de + golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 // indirect + honnef.co/go/tools v0.0.1-2019.2.3 +) diff --git a/vendor/go.uber.org/multierr/go.sum b/vendor/go.uber.org/multierr/go.sum new file mode 100644 index 000000000..b460913d2 --- /dev/null +++ b/vendor/go.uber.org/multierr/go.sum @@ -0,0 +1,45 @@ +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c h1:IGkKhmfzcztjm6gYkykvu/NiS8kaqbCWAEWWAyf8J5U= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/vendor/go.uber.org/multierr/go113.go b/vendor/go.uber.org/multierr/go113.go new file mode 100644 index 000000000..264b0eac0 --- /dev/null +++ b/vendor/go.uber.org/multierr/go113.go @@ -0,0 +1,52 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build go1.13 + +package multierr + +import "errors" + +// As attempts to find the first error in the error list that matches the type +// of the value that target points to. +// +// This function allows errors.As to traverse the values stored on the +// multierr error. +func (merr *multiError) As(target interface{}) bool { + for _, err := range merr.Errors() { + if errors.As(err, target) { + return true + } + } + return false +} + +// Is attempts to match the provided error against errors in the error list. +// +// This function allows errors.Is to traverse the values stored on the +// multierr error. +func (merr *multiError) Is(target error) bool { + for _, err := range merr.Errors() { + if errors.Is(err, target) { + return true + } + } + return false +} diff --git a/vendor/go.uber.org/multierr/tools.go b/vendor/go.uber.org/multierr/tools.go new file mode 100644 index 000000000..df93f0723 --- /dev/null +++ b/vendor/go.uber.org/multierr/tools.go @@ -0,0 +1,30 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build tools + +package multierr + +import ( + // Tools we use during development. + _ "go.uber.org/tools/update-license" + _ "golang.org/x/lint/golint" + _ "honnef.co/go/tools/cmd/staticcheck" +) diff --git a/vendor/go.uber.org/tools/LICENSE b/vendor/go.uber.org/tools/LICENSE new file mode 100644 index 000000000..858e02475 --- /dev/null +++ b/vendor/go.uber.org/tools/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/tools/update-license/.gitignore b/vendor/go.uber.org/tools/update-license/.gitignore new file mode 100644 index 000000000..b167772c8 --- /dev/null +++ b/vendor/go.uber.org/tools/update-license/.gitignore @@ -0,0 +1 @@ +update-license diff --git a/vendor/go.uber.org/tools/update-license/README.md b/vendor/go.uber.org/tools/update-license/README.md new file mode 100644 index 000000000..5887df1dd --- /dev/null +++ b/vendor/go.uber.org/tools/update-license/README.md @@ -0,0 +1,24 @@ +# update-license + +This is a small tool that updates the license header for Uber's open source Golang files. + +## Installation + +``` +go get go.uber.org/tools/update-license +``` + +## Usage + +``` +update-license go_files... +``` + +## Further Work + +* Support more licenses by name (MIT, Apache 2.0, etc), file path, url (http GET) +* Support custom owner (not just "Uber Technologies, Inc.") +* Support more languages than go (cover go, java, js, py to start, along with LICENSE, LICENSE.txt) +* Talk about removing custom logic for header comments (ie `@generated`, `Code generated by`), it probably makes more sense just to put the license at the top +* Better detection support for existing licenses so they can be removed +* Verbose, dry run support diff --git a/vendor/go.uber.org/tools/update-license/licenses.go b/vendor/go.uber.org/tools/update-license/licenses.go new file mode 100644 index 000000000..76957c21e --- /dev/null +++ b/vendor/go.uber.org/tools/update-license/licenses.go @@ -0,0 +1,56 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package main + +var licenseTemplates = map[string]string{ + "Apache-2.0": `// Copyright {{.Year}} {{.Owner}} +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License.`, + "MIT": `// Copyright (c) {{.Year}} {{.Owner}} +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE.`, +} diff --git a/vendor/go.uber.org/tools/update-license/main.go b/vendor/go.uber.org/tools/update-license/main.go new file mode 100644 index 000000000..269fd9b47 --- /dev/null +++ b/vendor/go.uber.org/tools/update-license/main.go @@ -0,0 +1,228 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package main + +import ( + "bytes" + "flag" + "fmt" + "html/template" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "strings" + "time" +) + +const ( + // how many lines to check for an existing copyright + // this logic is not great and we should probably do something else + // but this was copied from the python script + copyrightLineLimit = 5 + headerPrefix = "// Copyright" +) + +var ( + flagDryRun = flag.Bool("dry", false, "Do not edit files and just print out what files would be edited") + flagOwner = flag.String("owner", "Uber Technologies, Inc.", "Copyright owner") + flagLicense = flag.String( + "license", + "MIT", + fmt.Sprintf( + "Type of license to use [%s]", + strings.Join(validLicenses(), ", "), + ), + ) + + lineSkipPrefixes = []string{ + "// Code generated by", + "// @generated", + } +) + +func main() { + log.SetFlags(0) + log.SetOutput(os.Stdout) + log.SetPrefix("") + if err := do(); err != nil { + log.Fatal(err) + } +} + +func do() error { + flag.Parse() + + if len(flag.Args()) < 1 { + return fmt.Errorf("usage: %s GO_FILES", os.Args[0]) + } + + return updateFiles( + flag.Args(), + time.Now().UTC().Year(), + *flagLicense, + *flagOwner, + *flagDryRun, + ) +} + +func fullLicense(ts string, year int, owner string) string { + var buf bytes.Buffer + t, err := template.New("").Parse(ts) + if err != nil { + log.Panic("failed to parse license template", err) + } + + data := struct { + Year int + Owner string + }{year, owner} + if err := t.Execute(&buf, data); err != nil { + log.Panic("failed to execture license template", err) + } + + return strings.TrimSpace(buf.String()) +} + +// validLicenses grabs all the license templates from the folder +func validLicenses() []string { + res := make([]string, 0, len(licenseTemplates)) + + for k := range licenseTemplates { + res = append(res, k) + } + + sort.Strings(res) + return res +} + +func updateFiles( + filePaths []string, + year int, + license string, + owner string, + dryRun bool, +) error { + if err := checkFilePaths(filePaths); err != nil { + return err + } + for _, filePath := range filePaths { + if err := updateFile(filePath, year, license, owner, dryRun); err != nil { + return err + } + } + return nil +} + +func checkFilePaths(filePaths []string) error { + for _, filePath := range filePaths { + if filepath.Ext(filePath) != ".go" { + return fmt.Errorf("%s is not a go file", filePath) + } + } + return nil +} + +func updateFile( + filePath string, + year int, + license string, + owner string, + dryRun bool, +) error { + data, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + newData := updateData(data, year, license, owner) + if !bytes.Equal(data, newData) { + if dryRun { + log.Print(filePath) + return nil + } + // we could do something more complicated so that we do not + // need to pass 0644 as the file mode, but in this case it should + // never actually be used to create a file since we know the file + // already exists, and it's easier to use the ReadFile/WriteFile + // logic as it is right now, and since this is just a generation + // program, this should be acceptable + return ioutil.WriteFile(filePath, newData, 0644) + } + return nil +} + +func updateData( + data []byte, + year int, + license string, + owner string, +) []byte { + licenseText := fullLicense(string(licenseTemplates[license]), year, owner) + + return []byte( + strings.Join( + updateLines(strings.Split(string(data), "\n"), licenseText), + "\n", + ), + ) +} + +// a value in the returned slice may contain newlines itself +func updateLines(lines []string, license string) []string { + for i, line := range lines { + if i >= copyrightLineLimit { + break + } + if strings.HasPrefix(line, headerPrefix) { + // assume that the new license text always starts with the copyright + // string. Pretty safe to assume, right? RIGHT? + lines[i] = strings.Split(license, "\n")[0] + return lines + } + } + return addToLines(lines, license) +} + +// a value in the returned slice may contain newlines itself +func addToLines(lines []string, license string) []string { + i := 0 + for len(lines) > i && lineContainsSkipPrefix(lines[i]) { + i++ + // skip comments under the generated line too + for strings.HasPrefix(lines[i], "//") { + i++ + } + } + if i == 0 { + return append([]string{license, ""}, lines...) + } + return append(lines[0:i], append([]string{"", license}, lines[i:]...)...) +} + +func lineContainsSkipPrefix(line string) bool { + for _, skipPrefix := range lineSkipPrefixes { + if strings.HasPrefix(line, skipPrefix) { + return true + } + } + return false +} diff --git a/vendor/go.uber.org/zap/.gitignore b/vendor/go.uber.org/zap/.gitignore index 08fbde6ce..da9d9d00b 100644 --- a/vendor/go.uber.org/zap/.gitignore +++ b/vendor/go.uber.org/zap/.gitignore @@ -26,3 +26,7 @@ _testmain.go *.pprof *.out *.log + +/bin +cover.out +cover.html diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl index c6440db8e..3154a1e64 100644 --- a/vendor/go.uber.org/zap/.readme.tmpl +++ b/vendor/go.uber.org/zap/.readme.tmpl @@ -100,9 +100,10 @@ pinned in zap's [glide.lock][] file. [↩](#anchor-versions) [doc-img]: https://godoc.org/go.uber.org/zap?status.svg [doc]: https://godoc.org/go.uber.org/zap -[ci-img]: https://travis-ci.org/uber-go/zap.svg?branch=master -[ci]: https://travis-ci.org/uber-go/zap +[ci-img]: https://travis-ci.com/uber-go/zap.svg?branch=master +[ci]: https://travis-ci.com/uber-go/zap [cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg [cov]: https://codecov.io/gh/uber-go/zap [benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks [glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock + diff --git a/vendor/go.uber.org/zap/.travis.yml b/vendor/go.uber.org/zap/.travis.yml index ada5ebdcc..647b4ee43 100644 --- a/vendor/go.uber.org/zap/.travis.yml +++ b/vendor/go.uber.org/zap/.travis.yml @@ -1,21 +1,23 @@ language: go sudo: false -go: - - 1.11.x - - 1.12.x + go_import_path: go.uber.org/zap env: global: - TEST_TIMEOUT_SCALE=10 -cache: - directories: - - vendor -install: - - make dependencies + - GO111MODULE=on + +matrix: + include: + - go: 1.12.x + - go: 1.13.x + env: LINT=1 + script: - - make lint + - test -z "$LINT" || make lint - make test - make bench + after_success: - make cover - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index 28d10677e..bebdb748d 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -1,5 +1,26 @@ # Changelog +## 1.13.0 (13 Nov 2019) + +Enhancements: +* [#758][]: Add `Intp`, `Stringp`, and other similar `*p` field constructors + to log pointers to primitives with support for `nil` values. + +Thanks to @jbizzle for their contributions to this release. + +## 1.12.0 (29 Oct 2019) + +Enhancements: +* [#751][]: Migrate to Go modules. + +## 1.11.0 (21 Oct 2019) + +Enhancements: +* [#725][]: Add `zapcore.OmitKey` to omit keys in an `EncoderConfig`. +* [#736][]: Add `RFC3339` and `RFC3339Nano` time encoders. + +Thanks to @juicemia, @uhthomas for their contributions to this release. + ## 1.10.0 (29 Apr 2019) Bugfixes: @@ -325,3 +346,7 @@ upgrade to the upcoming stable release. [#610]: https://github.com/uber-go/zap/pull/610 [#675]: https://github.com/uber-go/zap/pull/675 [#704]: https://github.com/uber-go/zap/pull/704 +[#725]: https://github.com/uber-go/zap/pull/725 +[#736]: https://github.com/uber-go/zap/pull/736 +[#751]: https://github.com/uber-go/zap/pull/751 +[#758]: https://github.com/uber-go/zap/pull/758 diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile index 073e9aa91..21e436c45 100644 --- a/vendor/go.uber.org/zap/Makefile +++ b/vendor/go.uber.org/zap/Makefile @@ -1,74 +1,55 @@ -export GO15VENDOREXPERIMENT=1 +export GOBIN ?= $(shell pwd)/bin +GOLINT = $(GOBIN)/golint BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem -PKGS ?= $(shell glide novendor) -# Many Go tools take file globs or directories as arguments instead of packages. -PKG_FILES ?= *.go zapcore benchmarks buffer zapgrpc zaptest zaptest/observer internal/bufferpool internal/exit internal/color internal/ztest -# The linting tools evolve with each Go version, so run them only on the latest -# stable release. -GO_VERSION := $(shell go version | cut -d " " -f 3) -GO_MINOR_VERSION := $(word 2,$(subst ., ,$(GO_VERSION))) -LINTABLE_MINOR_VERSIONS := 12 -ifneq ($(filter $(LINTABLE_MINOR_VERSIONS),$(GO_MINOR_VERSION)),) -SHOULD_LINT := true -endif +# Directories containing independent Go modules. +# +# We track coverage only for the main module. +MODULE_DIRS = . ./benchmarks +# Many Go tools take file globs or directories as arguments instead of packages. +GO_FILES := $(shell \ + find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ + -o -name '*.go' -print | cut -b3-) .PHONY: all all: lint test -.PHONY: dependencies -dependencies: - @echo "Installing Glide and locked dependencies..." - glide --version || go get -u -f github.com/Masterminds/glide - glide install - @echo "Installing test dependencies..." - go install ./vendor/github.com/axw/gocov/gocov - go install ./vendor/github.com/mattn/goveralls -ifdef SHOULD_LINT - @echo "Installing golint..." - go install ./vendor/github.com/golang/lint/golint -else - @echo "Not installing golint, since we don't expect to lint on" $(GO_VERSION) -endif - -# Disable printf-like invocation checking due to testify.assert.Error() -VET_RULES := -printf=false - .PHONY: lint -lint: -ifdef SHOULD_LINT +lint: $(GOLINT) @rm -rf lint.log @echo "Checking formatting..." - @gofmt -d -s $(PKG_FILES) 2>&1 | tee lint.log - @echo "Installing test dependencies for vet..." - @go test -i $(PKGS) + @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log @echo "Checking vet..." - @go vet $(VET_RULES) $(PKGS) 2>&1 | tee -a lint.log + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log @echo "Checking lint..." - @$(foreach dir,$(PKGS),golint $(dir) 2>&1 | tee -a lint.log;) + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log @echo "Checking for unresolved FIXMEs..." - @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log + @git grep -i fixme | grep -v -e Makefile | tee -a lint.log @echo "Checking for license headers..." - @./check_license.sh | tee -a lint.log + @./checklicense.sh | tee -a lint.log @[ ! -s lint.log ] -else - @echo "Skipping linters on" $(GO_VERSION) -endif + +$(GOLINT): + go install golang.org/x/lint/golint .PHONY: test test: - go test -race $(PKGS) + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true .PHONY: cover cover: - ./scripts/cover.sh $(PKGS) + go test -race -coverprofile=cover.out -coverpkg=./... ./... + go tool cover -html=cover.out -o cover.html .PHONY: bench BENCH ?= . bench: - @$(foreach pkg,$(PKGS),go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) $(pkg);) + @$(foreach dir,$(MODULE_DIRS), ( \ + cd $(dir) && \ + go list ./... | xargs -n1 go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) \ + ) &&) true .PHONY: updatereadme updatereadme: diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md index f4fd1cb44..bcea28a19 100644 --- a/vendor/go.uber.org/zap/README.md +++ b/vendor/go.uber.org/zap/README.md @@ -64,43 +64,40 @@ id="anchor-versions">[1](#footnote-versions) Log a message and 10 fields: -| Package | Time | Objects Allocated | -| :--- | :---: | :---: | -| :zap: zap | 3131 ns/op | 5 allocs/op | -| :zap: zap (sugared) | 4173 ns/op | 21 allocs/op | -| zerolog | 16154 ns/op | 90 allocs/op | -| lion | 16341 ns/op | 111 allocs/op | -| go-kit | 17049 ns/op | 126 allocs/op | -| logrus | 23662 ns/op | 142 allocs/op | -| log15 | 36351 ns/op | 149 allocs/op | -| apex/log | 42530 ns/op | 126 allocs/op | +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 862 ns/op | +0% | 5 allocs/op +| :zap: zap (sugared) | 1250 ns/op | +45% | 11 allocs/op +| zerolog | 4021 ns/op | +366% | 76 allocs/op +| go-kit | 4542 ns/op | +427% | 105 allocs/op +| apex/log | 26785 ns/op | +3007% | 115 allocs/op +| logrus | 29501 ns/op | +3322% | 125 allocs/op +| log15 | 29906 ns/op | +3369% | 122 allocs/op Log a message with a logger that already has 10 fields of context: -| Package | Time | Objects Allocated | -| :--- | :---: | :---: | -| :zap: zap | 380 ns/op | 0 allocs/op | -| :zap: zap (sugared) | 564 ns/op | 2 allocs/op | -| zerolog | 321 ns/op | 0 allocs/op | -| lion | 7092 ns/op | 39 allocs/op | -| go-kit | 20226 ns/op | 115 allocs/op | -| logrus | 22312 ns/op | 130 allocs/op | -| log15 | 28788 ns/op | 79 allocs/op | -| apex/log | 42063 ns/op | 115 allocs/op | +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 126 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 187 ns/op | +48% | 2 allocs/op +| zerolog | 88 ns/op | -30% | 0 allocs/op +| go-kit | 5087 ns/op | +3937% | 103 allocs/op +| log15 | 18548 ns/op | +14621% | 73 allocs/op +| apex/log | 26012 ns/op | +20544% | 104 allocs/op +| logrus | 27236 ns/op | +21516% | 113 allocs/op Log a static string, without any context or `printf`-style templating: -| Package | Time | Objects Allocated | -| :--- | :---: | :---: | -| :zap: zap | 361 ns/op | 0 allocs/op | -| :zap: zap (sugared) | 534 ns/op | 2 allocs/op | -| zerolog | 323 ns/op | 0 allocs/op | -| standard library | 575 ns/op | 2 allocs/op | -| go-kit | 922 ns/op | 13 allocs/op | -| lion | 1413 ns/op | 10 allocs/op | -| logrus | 2291 ns/op | 27 allocs/op | -| apex/log | 3690 ns/op | 11 allocs/op | -| log15 | 5954 ns/op | 26 allocs/op | +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 118 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 191 ns/op | +62% | 2 allocs/op +| zerolog | 93 ns/op | -21% | 0 allocs/op +| go-kit | 280 ns/op | +137% | 11 allocs/op +| standard library | 499 ns/op | +323% | 2 allocs/op +| apex/log | 1990 ns/op | +1586% | 10 allocs/op +| logrus | 3129 ns/op | +2552% | 24 allocs/op +| log15 | 3887 ns/op | +3194% | 23 allocs/op ## Development Status: Stable @@ -124,13 +121,14 @@ Released under the [MIT License](LICENSE.txt). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are -pinned in zap's [glide.lock][] file. [↩](#anchor-versions) +pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) [doc-img]: https://godoc.org/go.uber.org/zap?status.svg [doc]: https://godoc.org/go.uber.org/zap -[ci-img]: https://travis-ci.org/uber-go/zap.svg?branch=master -[ci]: https://travis-ci.org/uber-go/zap +[ci-img]: https://travis-ci.com/uber-go/zap.svg?branch=master +[ci]: https://travis-ci.com/uber-go/zap [cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg [cov]: https://codecov.io/gh/uber-go/zap [benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks -[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock +[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod + diff --git a/vendor/go.uber.org/zap/check_license.sh b/vendor/go.uber.org/zap/checklicense.sh similarity index 100% rename from vendor/go.uber.org/zap/check_license.sh rename to vendor/go.uber.org/zap/checklicense.sh diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go index 5130e1347..83c1ea245 100644 --- a/vendor/go.uber.org/zap/field.go +++ b/vendor/go.uber.org/zap/field.go @@ -38,6 +38,12 @@ func Skip() Field { return Field{Type: zapcore.SkipType} } +// nilField returns a field which will marshal explicitly as nil. See motivation +// in https://github.com/uber-go/zap/issues/753 . If we ever make breaking +// changes and add zapcore.NilType and zapcore.ObjectEncoder.AddNil, the +// implementation here should be changed to reflect that. +func nilField(key string) Field { return Reflect(key, nil) } + // Binary constructs a field that carries an opaque binary blob. // // Binary data is serialized in an encoding-appropriate format. For example, @@ -56,6 +62,15 @@ func Bool(key string, val bool) Field { return Field{Key: key, Type: zapcore.BoolType, Integer: ival} } +// Boolp constructs a field that carries a *bool. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Boolp(key string, val *bool) Field { + if val == nil { + return nilField(key) + } + return Bool(key, *val) +} + // ByteString constructs a field that carries UTF-8 encoded text as a []byte. // To log opaque binary blobs (which aren't necessarily valid UTF-8), use // Binary. @@ -70,6 +85,15 @@ func Complex128(key string, val complex128) Field { return Field{Key: key, Type: zapcore.Complex128Type, Interface: val} } +// Complex128p constructs a field that carries a *complex128. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Complex128p(key string, val *complex128) Field { + if val == nil { + return nilField(key) + } + return Complex128(key, *val) +} + // Complex64 constructs a field that carries a complex number. Unlike most // numeric fields, this costs an allocation (to convert the complex64 to // interface{}). @@ -77,6 +101,15 @@ func Complex64(key string, val complex64) Field { return Field{Key: key, Type: zapcore.Complex64Type, Interface: val} } +// Complex64p constructs a field that carries a *complex64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Complex64p(key string, val *complex64) Field { + if val == nil { + return nilField(key) + } + return Complex64(key, *val) +} + // Float64 constructs a field that carries a float64. The way the // floating-point value is represented is encoder-dependent, so marshaling is // necessarily lazy. @@ -84,6 +117,15 @@ func Float64(key string, val float64) Field { return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))} } +// Float64p constructs a field that carries a *float64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Float64p(key string, val *float64) Field { + if val == nil { + return nilField(key) + } + return Float64(key, *val) +} + // Float32 constructs a field that carries a float32. The way the // floating-point value is represented is encoder-dependent, so marshaling is // necessarily lazy. @@ -91,66 +133,183 @@ func Float32(key string, val float32) Field { return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))} } +// Float32p constructs a field that carries a *float32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Float32p(key string, val *float32) Field { + if val == nil { + return nilField(key) + } + return Float32(key, *val) +} + // Int constructs a field with the given key and value. func Int(key string, val int) Field { return Int64(key, int64(val)) } +// Intp constructs a field that carries a *int. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Intp(key string, val *int) Field { + if val == nil { + return nilField(key) + } + return Int(key, *val) +} + // Int64 constructs a field with the given key and value. func Int64(key string, val int64) Field { return Field{Key: key, Type: zapcore.Int64Type, Integer: val} } +// Int64p constructs a field that carries a *int64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int64p(key string, val *int64) Field { + if val == nil { + return nilField(key) + } + return Int64(key, *val) +} + // Int32 constructs a field with the given key and value. func Int32(key string, val int32) Field { return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)} } +// Int32p constructs a field that carries a *int32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int32p(key string, val *int32) Field { + if val == nil { + return nilField(key) + } + return Int32(key, *val) +} + // Int16 constructs a field with the given key and value. func Int16(key string, val int16) Field { return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)} } +// Int16p constructs a field that carries a *int16. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int16p(key string, val *int16) Field { + if val == nil { + return nilField(key) + } + return Int16(key, *val) +} + // Int8 constructs a field with the given key and value. func Int8(key string, val int8) Field { return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)} } +// Int8p constructs a field that carries a *int8. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int8p(key string, val *int8) Field { + if val == nil { + return nilField(key) + } + return Int8(key, *val) +} + // String constructs a field with the given key and value. func String(key string, val string) Field { return Field{Key: key, Type: zapcore.StringType, String: val} } +// Stringp constructs a field that carries a *string. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Stringp(key string, val *string) Field { + if val == nil { + return nilField(key) + } + return String(key, *val) +} + // Uint constructs a field with the given key and value. func Uint(key string, val uint) Field { return Uint64(key, uint64(val)) } +// Uintp constructs a field that carries a *uint. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uintp(key string, val *uint) Field { + if val == nil { + return nilField(key) + } + return Uint(key, *val) +} + // Uint64 constructs a field with the given key and value. func Uint64(key string, val uint64) Field { return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)} } +// Uint64p constructs a field that carries a *uint64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint64p(key string, val *uint64) Field { + if val == nil { + return nilField(key) + } + return Uint64(key, *val) +} + // Uint32 constructs a field with the given key and value. func Uint32(key string, val uint32) Field { return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)} } +// Uint32p constructs a field that carries a *uint32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint32p(key string, val *uint32) Field { + if val == nil { + return nilField(key) + } + return Uint32(key, *val) +} + // Uint16 constructs a field with the given key and value. func Uint16(key string, val uint16) Field { return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)} } +// Uint16p constructs a field that carries a *uint16. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint16p(key string, val *uint16) Field { + if val == nil { + return nilField(key) + } + return Uint16(key, *val) +} + // Uint8 constructs a field with the given key and value. func Uint8(key string, val uint8) Field { return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)} } +// Uint8p constructs a field that carries a *uint8. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint8p(key string, val *uint8) Field { + if val == nil { + return nilField(key) + } + return Uint8(key, *val) +} + // Uintptr constructs a field with the given key and value. func Uintptr(key string, val uintptr) Field { return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)} } +// Uintptrp constructs a field that carries a *uintptr. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uintptrp(key string, val *uintptr) Field { + if val == nil { + return nilField(key) + } + return Uintptr(key, *val) +} + // Reflect constructs a field with the given key and an arbitrary object. It uses // an encoding-appropriate, reflection-based function to lazily serialize nearly // any object into the logging context, but it's relatively slow and @@ -183,6 +342,15 @@ func Time(key string, val time.Time) Field { return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()} } +// Timep constructs a field that carries a *time.Time. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Timep(key string, val *time.Time) Field { + if val == nil { + return nilField(key) + } + return Time(key, *val) +} + // Stack constructs a field that stores a stacktrace of the current goroutine // under provided key. Keep in mind that taking a stacktrace is eager and // expensive (relatively speaking); this function both makes an allocation and @@ -201,6 +369,15 @@ func Duration(key string, val time.Duration) Field { return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)} } +// Durationp constructs a field that carries a *time.Duration. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Durationp(key string, val *time.Duration) Field { + if val == nil { + return nilField(key) + } + return Duration(key, *val) +} + // Object constructs a field with the given key and ObjectMarshaler. It // provides a flexible, but still type-safe and efficient, way to add map- or // struct-like user-defined types to the logging context. The struct's @@ -224,78 +401,116 @@ func Any(key string, value interface{}) Field { return Array(key, val) case bool: return Bool(key, val) + case *bool: + return Boolp(key, val) case []bool: return Bools(key, val) case complex128: return Complex128(key, val) + case *complex128: + return Complex128p(key, val) case []complex128: return Complex128s(key, val) case complex64: return Complex64(key, val) + case *complex64: + return Complex64p(key, val) case []complex64: return Complex64s(key, val) case float64: return Float64(key, val) + case *float64: + return Float64p(key, val) case []float64: return Float64s(key, val) case float32: return Float32(key, val) + case *float32: + return Float32p(key, val) case []float32: return Float32s(key, val) case int: return Int(key, val) + case *int: + return Intp(key, val) case []int: return Ints(key, val) case int64: return Int64(key, val) + case *int64: + return Int64p(key, val) case []int64: return Int64s(key, val) case int32: return Int32(key, val) + case *int32: + return Int32p(key, val) case []int32: return Int32s(key, val) case int16: return Int16(key, val) + case *int16: + return Int16p(key, val) case []int16: return Int16s(key, val) case int8: return Int8(key, val) + case *int8: + return Int8p(key, val) case []int8: return Int8s(key, val) case string: return String(key, val) + case *string: + return Stringp(key, val) case []string: return Strings(key, val) case uint: return Uint(key, val) + case *uint: + return Uintp(key, val) case []uint: return Uints(key, val) case uint64: return Uint64(key, val) + case *uint64: + return Uint64p(key, val) case []uint64: return Uint64s(key, val) case uint32: return Uint32(key, val) + case *uint32: + return Uint32p(key, val) case []uint32: return Uint32s(key, val) case uint16: return Uint16(key, val) + case *uint16: + return Uint16p(key, val) case []uint16: return Uint16s(key, val) case uint8: return Uint8(key, val) + case *uint8: + return Uint8p(key, val) case []byte: return Binary(key, val) case uintptr: return Uintptr(key, val) + case *uintptr: + return Uintptrp(key, val) case []uintptr: return Uintptrs(key, val) case time.Time: return Time(key, val) + case *time.Time: + return Timep(key, val) case []time.Time: return Times(key, val) case time.Duration: return Duration(key, val) + case *time.Duration: + return Durationp(key, val) case []time.Duration: return Durations(key, val) case error: diff --git a/vendor/go.uber.org/zap/glide.lock b/vendor/go.uber.org/zap/glide.lock deleted file mode 100644 index 881b462c0..000000000 --- a/vendor/go.uber.org/zap/glide.lock +++ /dev/null @@ -1,76 +0,0 @@ -hash: f073ba522c06c88ea3075bde32a8aaf0969a840a66cab6318a0897d141ffee92 -updated: 2017-07-22T18:06:49.598185334-07:00 -imports: -- name: go.uber.org/atomic - version: 4e336646b2ef9fc6e47be8e21594178f98e5ebcf -- name: go.uber.org/multierr - version: 3c4937480c32f4c13a875a1829af76c98ca3d40a -testImports: -- name: github.com/apex/log - version: d9b960447bfa720077b2da653cc79e533455b499 - subpackages: - - handlers/json -- name: github.com/axw/gocov - version: 3a69a0d2a4ef1f263e2d92b041a69593d6964fe8 - subpackages: - - gocov -- name: github.com/davecgh/go-spew - version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 - subpackages: - - spew -- name: github.com/fatih/color - version: 62e9147c64a1ed519147b62a56a14e83e2be02c1 -- name: github.com/go-kit/kit - version: e10f5bf035be9af21fd5b2fb4469d5716c6ab07d - subpackages: - - log -- name: github.com/go-logfmt/logfmt - version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 -- name: github.com/go-stack/stack - version: 54be5f394ed2c3e19dac9134a40a95ba5a017f7b -- name: github.com/golang/lint - version: c5fb716d6688a859aae56d26d3e6070808df29f7 - subpackages: - - golint -- name: github.com/kr/logfmt - version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 -- name: github.com/mattn/go-colorable - version: 3fa8c76f9daed4067e4a806fb7e4dc86455c6d6a -- name: github.com/mattn/go-isatty - version: fc9e8d8ef48496124e79ae0df75490096eccf6fe -- name: github.com/mattn/goveralls - version: 6efce81852ad1b7567c17ad71b03aeccc9dd9ae0 -- name: github.com/pborman/uuid - version: e790cca94e6cc75c7064b1332e63811d4aae1a53 -- name: github.com/pkg/errors - version: 645ef00459ed84a119197bfb8d8205042c6df63d -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib -- name: github.com/rs/zerolog - version: eed4c2b94d945e0b2456ad6aa518a443986b5f22 -- name: github.com/satori/go.uuid - version: 5bf94b69c6b68ee1b541973bb8e1144db23a194b -- name: github.com/sirupsen/logrus - version: 7dd06bf38e1e13df288d471a57d5adbac106be9e -- name: github.com/stretchr/testify - version: f6abca593680b2315d2075e0f5e2a9751e3f431a - subpackages: - - assert - - require -- name: go.pedge.io/lion - version: 87958e8713f1fa138d993087133b97e976642159 -- name: golang.org/x/sys - version: c4489faa6e5ab84c0ef40d6ee878f7a030281f0f - subpackages: - - unix -- name: golang.org/x/tools - version: 496819729719f9d07692195e0a94d6edd2251389 - subpackages: - - cover -- name: gopkg.in/inconshreveable/log15.v2 - version: b105bd37f74e5d9dc7b6ad7806715c7a2b83fd3f - subpackages: - - stack - - term diff --git a/vendor/go.uber.org/zap/glide.yaml b/vendor/go.uber.org/zap/glide.yaml index 94412594c..8e1d05e9a 100644 --- a/vendor/go.uber.org/zap/glide.yaml +++ b/vendor/go.uber.org/zap/glide.yaml @@ -22,12 +22,11 @@ testImport: - package: github.com/mattn/goveralls - package: github.com/pborman/uuid - package: github.com/pkg/errors -- package: go.pedge.io/lion - package: github.com/rs/zerolog - package: golang.org/x/tools subpackages: - cover -- package: github.com/golang/lint +- package: golang.org/x/lint subpackages: - golint - package: github.com/axw/gocov diff --git a/vendor/go.uber.org/zap/go.mod b/vendor/go.uber.org/zap/go.mod new file mode 100644 index 000000000..1fb6bba0b --- /dev/null +++ b/vendor/go.uber.org/zap/go.mod @@ -0,0 +1,11 @@ +module go.uber.org/zap + +go 1.13 + +require ( + github.com/pkg/errors v0.8.1 + github.com/stretchr/testify v1.4.0 + go.uber.org/atomic v1.5.0 + go.uber.org/multierr v1.3.0 + golang.org/x/lint v0.0.0-20190930215403-16217165b5de +) diff --git a/vendor/go.uber.org/zap/go.sum b/vendor/go.uber.org/zap/go.sum new file mode 100644 index 000000000..9ff6735d8 --- /dev/null +++ b/vendor/go.uber.org/zap/go.sum @@ -0,0 +1,56 @@ +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.3.0 h1:sFPn2GLc3poCkfrpIXGhBD2X0CMIo4Q/zSULXrj/+uc= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c h1:IGkKhmfzcztjm6gYkykvu/NiS8kaqbCWAEWWAyf8J5U= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/vendor/go.uber.org/zap/tools.go b/vendor/go.uber.org/zap/tools.go new file mode 100644 index 000000000..2b6366bea --- /dev/null +++ b/vendor/go.uber.org/zap/tools.go @@ -0,0 +1,28 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build tools + +package zap + +import ( + // Tools we use during development. + _ "golang.org/x/lint/golint" +) diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go index f0509522b..5e0a69be5 100644 --- a/vendor/go.uber.org/zap/zapcore/encoder.go +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -31,6 +31,9 @@ import ( // behavior. const DefaultLineEnding = "\n" +// OmitKey defines the key to use when callers want to remove a key from log output. +const OmitKey = "" + // A LevelEncoder serializes a Level to a primitive type. type LevelEncoder func(Level, PrimitiveArrayEncoder) @@ -115,11 +118,30 @@ func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { enc.AppendString(t.Format("2006-01-02T15:04:05.000Z0700")) } -// UnmarshalText unmarshals text to a TimeEncoder. "iso8601" and "ISO8601" are -// unmarshaled to ISO8601TimeEncoder, "millis" is unmarshaled to -// EpochMillisTimeEncoder, and anything else is unmarshaled to EpochTimeEncoder. +// RFC3339TimeEncoder serializes a time.Time to an RFC3339-formatted string. +func RFC3339TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + enc.AppendString(t.Format(time.RFC3339)) +} + +// RFC3339NanoTimeEncoder serializes a time.Time to an RFC3339-formatted string +// with nanosecond precision. +func RFC3339NanoTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + enc.AppendString(t.Format(time.RFC3339Nano)) +} + +// UnmarshalText unmarshals text to a TimeEncoder. +// "rfc3339nano" and "RFC3339Nano" are unmarshaled to RFC3339NanoTimeEncoder. +// "rfc3339" and "RFC3339" are unmarshaled to RFC3339TimeEncoder. +// "iso8601" and "ISO8601" are unmarshaled to ISO8601TimeEncoder. +// "millis" is unmarshaled to EpochMillisTimeEncoder. +// "nanos" is unmarshaled to EpochNanosEncoder. +// Anything else is unmarshaled to EpochTimeEncoder. func (e *TimeEncoder) UnmarshalText(text []byte) error { switch string(text) { + case "rfc3339nano", "RFC3339Nano": + *e = RFC3339NanoTimeEncoder + case "rfc3339", "RFC3339": + *e = RFC3339TimeEncoder case "iso8601", "ISO8601": *e = ISO8601TimeEncoder case "millis": @@ -272,8 +294,8 @@ type ObjectEncoder interface { AddUint8(key string, value uint8) AddUintptr(key string, value uintptr) - // AddReflected uses reflection to serialize arbitrary objects, so it's slow - // and allocation-heavy. + // AddReflected uses reflection to serialize arbitrary objects, so it can be + // slow and allocation-heavy. AddReflected(key string, value interface{}) error // OpenNamespace opens an isolated namespace where all subsequent fields will // be added. Applications can use namespaces to prevent key collisions when @@ -343,6 +365,7 @@ type Encoder interface { Clone() Encoder // EncodeEntry encodes an entry and fields, along with any accumulated - // context, into a byte buffer and returns it. + // context, into a byte buffer and returns it. Any fields that are empty, + // including fields on the `Entry` type, should be omitted. EncodeEntry(Entry, []Field) (*buffer.Buffer, error) } diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go index 7d9893f33..8273abdf0 100644 --- a/vendor/go.uber.org/zap/zapcore/entry.go +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -136,7 +136,8 @@ func (ec EntryCaller) TrimmedPath() string { // An Entry represents a complete log message. The entry's structured context // is already serialized, but the log level, time, message, and call site -// information are available for inspection and modification. +// information are available for inspection and modification. Any fields left +// empty will be omitted when encoding. // // Entries are pooled, so any functions that accept them MUST be careful not to // retain references to them. diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go index 9aec4eada..56256be8d 100644 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -145,15 +145,29 @@ func (enc *jsonEncoder) resetReflectBuf() { } } -func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { +var nullLiteralBytes = []byte("null") + +// Only invoke the standard JSON encoder if there is actually something to +// encode; otherwise write JSON null literal directly. +func (enc *jsonEncoder) encodeReflected(obj interface{}) ([]byte, error) { + if obj == nil { + return nullLiteralBytes, nil + } enc.resetReflectBuf() - err := enc.reflectEnc.Encode(obj) + if err := enc.reflectEnc.Encode(obj); err != nil { + return nil, err + } + enc.reflectBuf.TrimNewline() + return enc.reflectBuf.Bytes(), nil +} + +func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { + valueBytes, err := enc.encodeReflected(obj) if err != nil { return err } - enc.reflectBuf.TrimNewline() enc.addKey(key) - _, err = enc.buf.Write(enc.reflectBuf.Bytes()) + _, err = enc.buf.Write(valueBytes) return err } @@ -236,14 +250,12 @@ func (enc *jsonEncoder) AppendInt64(val int64) { } func (enc *jsonEncoder) AppendReflected(val interface{}) error { - enc.resetReflectBuf() - err := enc.reflectEnc.Encode(val) + valueBytes, err := enc.encodeReflected(val) if err != nil { return err } - enc.reflectBuf.TrimNewline() enc.addElementSeparator() - _, err = enc.buf.Write(enc.reflectBuf.Bytes()) + _, err = enc.buf.Write(valueBytes) return err } diff --git a/vendor/golang.org/x/lint/.travis.yml b/vendor/golang.org/x/lint/.travis.yml new file mode 100644 index 000000000..50553ebd0 --- /dev/null +++ b/vendor/golang.org/x/lint/.travis.yml @@ -0,0 +1,19 @@ +sudo: false +language: go +go: + - 1.10.x + - 1.11.x + - master + +go_import_path: golang.org/x/lint + +install: + - go get -t -v ./... + +script: + - go test -v -race ./... + +matrix: + allow_failures: + - go: master + fast_finish: true diff --git a/vendor/golang.org/x/lint/CONTRIBUTING.md b/vendor/golang.org/x/lint/CONTRIBUTING.md new file mode 100644 index 000000000..1fadda62d --- /dev/null +++ b/vendor/golang.org/x/lint/CONTRIBUTING.md @@ -0,0 +1,15 @@ +# Contributing to Golint + +## Before filing an issue: + +### Are you having trouble building golint? + +Check you have the latest version of its dependencies. Run +``` +go get -u golang.org/x/lint/golint +``` +If you still have problems, consider searching for existing issues before filing a new issue. + +## Before sending a pull request: + +Have you understood the purpose of golint? Make sure to carefully read `README`. diff --git a/vendor/golang.org/x/lint/LICENSE b/vendor/golang.org/x/lint/LICENSE new file mode 100644 index 000000000..65d761bc9 --- /dev/null +++ b/vendor/golang.org/x/lint/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/lint/README.md b/vendor/golang.org/x/lint/README.md new file mode 100644 index 000000000..4968b13ae --- /dev/null +++ b/vendor/golang.org/x/lint/README.md @@ -0,0 +1,88 @@ +Golint is a linter for Go source code. + +[![Build Status](https://travis-ci.org/golang/lint.svg?branch=master)](https://travis-ci.org/golang/lint) + +## Installation + +Golint requires a +[supported release of Go](https://golang.org/doc/devel/release.html#policy). + + go get -u golang.org/x/lint/golint + +To find out where `golint` was installed you can run `go list -f {{.Target}} golang.org/x/lint/golint`. For `golint` to be used globally add that directory to the `$PATH` environment setting. + +## Usage + +Invoke `golint` with one or more filenames, directories, or packages named +by its import path. Golint uses the same +[import path syntax](https://golang.org/cmd/go/#hdr-Import_path_syntax) as +the `go` command and therefore +also supports relative import paths like `./...`. Additionally the `...` +wildcard can be used as suffix on relative and absolute file paths to recurse +into them. + +The output of this tool is a list of suggestions in Vim quickfix format, +which is accepted by lots of different editors. + +## Purpose + +Golint differs from gofmt. Gofmt reformats Go source code, whereas +golint prints out style mistakes. + +Golint differs from govet. Govet is concerned with correctness, whereas +golint is concerned with coding style. Golint is in use at Google, and it +seeks to match the accepted style of the open source Go project. + +The suggestions made by golint are exactly that: suggestions. +Golint is not perfect, and has both false positives and false negatives. +Do not treat its output as a gold standard. We will not be adding pragmas +or other knobs to suppress specific warnings, so do not expect or require +code to be completely "lint-free". +In short, this tool is not, and will never be, trustworthy enough for its +suggestions to be enforced automatically, for example as part of a build process. +Golint makes suggestions for many of the mechanically checkable items listed in +[Effective Go](https://golang.org/doc/effective_go.html) and the +[CodeReviewComments wiki page](https://golang.org/wiki/CodeReviewComments). + +## Scope + +Golint is meant to carry out the stylistic conventions put forth in +[Effective Go](https://golang.org/doc/effective_go.html) and +[CodeReviewComments](https://golang.org/wiki/CodeReviewComments). +Changes that are not aligned with those documents will not be considered. + +## Contributions + +Contributions to this project are welcome provided they are [in scope](#scope), +though please send mail before starting work on anything major. +Contributors retain their copyright, so we need you to fill out +[a short form](https://developers.google.com/open-source/cla/individual) +before we can accept your contribution. + +## Vim + +Add this to your ~/.vimrc: + + set rtp+=$GOPATH/src/golang.org/x/lint/misc/vim + +If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value. + +Running `:Lint` will run golint on the current file and populate the quickfix list. + +Optionally, add this to your `~/.vimrc` to automatically run `golint` on `:w` + + autocmd BufWritePost,FileWritePost *.go execute 'Lint' | cwindow + + +## Emacs + +Add this to your `.emacs` file: + + (add-to-list 'load-path (concat (getenv "GOPATH") "/src/golang.org/x/lint/misc/emacs/")) + (require 'golint) + +If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value. + +Running M-x golint will run golint on the current file. + +For more usage, see [Compilation-Mode](http://www.gnu.org/software/emacs/manual/html_node/emacs/Compilation-Mode.html). diff --git a/vendor/golang.org/x/lint/go.mod b/vendor/golang.org/x/lint/go.mod new file mode 100644 index 000000000..44179f3a4 --- /dev/null +++ b/vendor/golang.org/x/lint/go.mod @@ -0,0 +1,5 @@ +module golang.org/x/lint + +go 1.11 + +require golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f diff --git a/vendor/golang.org/x/lint/go.sum b/vendor/golang.org/x/lint/go.sum new file mode 100644 index 000000000..539c98a94 --- /dev/null +++ b/vendor/golang.org/x/lint/go.sum @@ -0,0 +1,8 @@ +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f h1:kDxGY2VmgABOe55qheT/TFqUMtcTHnomIPS1iv3G4Ms= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/golang.org/x/lint/golint/golint.go b/vendor/golang.org/x/lint/golint/golint.go new file mode 100644 index 000000000..ac024b6d2 --- /dev/null +++ b/vendor/golang.org/x/lint/golint/golint.go @@ -0,0 +1,159 @@ +// Copyright (c) 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// golint lints the Go source files named on its command line. +package main + +import ( + "flag" + "fmt" + "go/build" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "golang.org/x/lint" +) + +var ( + minConfidence = flag.Float64("min_confidence", 0.8, "minimum confidence of a problem to print it") + setExitStatus = flag.Bool("set_exit_status", false, "set exit status to 1 if any issues are found") + suggestions int +) + +func usage() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + fmt.Fprintf(os.Stderr, "\tgolint [flags] # runs on package in current directory\n") + fmt.Fprintf(os.Stderr, "\tgolint [flags] [packages]\n") + fmt.Fprintf(os.Stderr, "\tgolint [flags] [directories] # where a '/...' suffix includes all sub-directories\n") + fmt.Fprintf(os.Stderr, "\tgolint [flags] [files] # all must belong to a single package\n") + fmt.Fprintf(os.Stderr, "Flags:\n") + flag.PrintDefaults() +} + +func main() { + flag.Usage = usage + flag.Parse() + + if flag.NArg() == 0 { + lintDir(".") + } else { + // dirsRun, filesRun, and pkgsRun indicate whether golint is applied to + // directory, file or package targets. The distinction affects which + // checks are run. It is no valid to mix target types. + var dirsRun, filesRun, pkgsRun int + var args []string + for _, arg := range flag.Args() { + if strings.HasSuffix(arg, "/...") && isDir(arg[:len(arg)-len("/...")]) { + dirsRun = 1 + for _, dirname := range allPackagesInFS(arg) { + args = append(args, dirname) + } + } else if isDir(arg) { + dirsRun = 1 + args = append(args, arg) + } else if exists(arg) { + filesRun = 1 + args = append(args, arg) + } else { + pkgsRun = 1 + args = append(args, arg) + } + } + + if dirsRun+filesRun+pkgsRun != 1 { + usage() + os.Exit(2) + } + switch { + case dirsRun == 1: + for _, dir := range args { + lintDir(dir) + } + case filesRun == 1: + lintFiles(args...) + case pkgsRun == 1: + for _, pkg := range importPaths(args) { + lintPackage(pkg) + } + } + } + + if *setExitStatus && suggestions > 0 { + fmt.Fprintf(os.Stderr, "Found %d lint suggestions; failing.\n", suggestions) + os.Exit(1) + } +} + +func isDir(filename string) bool { + fi, err := os.Stat(filename) + return err == nil && fi.IsDir() +} + +func exists(filename string) bool { + _, err := os.Stat(filename) + return err == nil +} + +func lintFiles(filenames ...string) { + files := make(map[string][]byte) + for _, filename := range filenames { + src, err := ioutil.ReadFile(filename) + if err != nil { + fmt.Fprintln(os.Stderr, err) + continue + } + files[filename] = src + } + + l := new(lint.Linter) + ps, err := l.LintFiles(files) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return + } + for _, p := range ps { + if p.Confidence >= *minConfidence { + fmt.Printf("%v: %s\n", p.Position, p.Text) + suggestions++ + } + } +} + +func lintDir(dirname string) { + pkg, err := build.ImportDir(dirname, 0) + lintImportedPackage(pkg, err) +} + +func lintPackage(pkgname string) { + pkg, err := build.Import(pkgname, ".", 0) + lintImportedPackage(pkg, err) +} + +func lintImportedPackage(pkg *build.Package, err error) { + if err != nil { + if _, nogo := err.(*build.NoGoError); nogo { + // Don't complain if the failure is due to no Go source files. + return + } + fmt.Fprintln(os.Stderr, err) + return + } + + var files []string + files = append(files, pkg.GoFiles...) + files = append(files, pkg.CgoFiles...) + files = append(files, pkg.TestGoFiles...) + if pkg.Dir != "." { + for i, f := range files { + files[i] = filepath.Join(pkg.Dir, f) + } + } + // TODO(dsymonds): Do foo_test too (pkg.XTestGoFiles) + + lintFiles(files...) +} diff --git a/vendor/golang.org/x/lint/golint/import.go b/vendor/golang.org/x/lint/golint/import.go new file mode 100644 index 000000000..2ba9dea77 --- /dev/null +++ b/vendor/golang.org/x/lint/golint/import.go @@ -0,0 +1,309 @@ +package main + +/* + +This file holds a direct copy of the import path matching code of +https://github.com/golang/go/blob/master/src/cmd/go/main.go. It can be +replaced when https://golang.org/issue/8768 is resolved. + +It has been updated to follow upstream changes in a few ways. + +*/ + +import ( + "fmt" + "go/build" + "log" + "os" + "path" + "path/filepath" + "regexp" + "runtime" + "strings" +) + +var ( + buildContext = build.Default + goroot = filepath.Clean(runtime.GOROOT()) + gorootSrc = filepath.Join(goroot, "src") +) + +// importPathsNoDotExpansion returns the import paths to use for the given +// command line, but it does no ... expansion. +func importPathsNoDotExpansion(args []string) []string { + if len(args) == 0 { + return []string{"."} + } + var out []string + for _, a := range args { + // Arguments are supposed to be import paths, but + // as a courtesy to Windows developers, rewrite \ to / + // in command-line arguments. Handles .\... and so on. + if filepath.Separator == '\\' { + a = strings.Replace(a, `\`, `/`, -1) + } + + // Put argument in canonical form, but preserve leading ./. + if strings.HasPrefix(a, "./") { + a = "./" + path.Clean(a) + if a == "./." { + a = "." + } + } else { + a = path.Clean(a) + } + if a == "all" || a == "std" { + out = append(out, allPackages(a)...) + continue + } + out = append(out, a) + } + return out +} + +// importPaths returns the import paths to use for the given command line. +func importPaths(args []string) []string { + args = importPathsNoDotExpansion(args) + var out []string + for _, a := range args { + if strings.Contains(a, "...") { + if build.IsLocalImport(a) { + out = append(out, allPackagesInFS(a)...) + } else { + out = append(out, allPackages(a)...) + } + continue + } + out = append(out, a) + } + return out +} + +// matchPattern(pattern)(name) reports whether +// name matches pattern. Pattern is a limited glob +// pattern in which '...' means 'any string' and there +// is no other special syntax. +func matchPattern(pattern string) func(name string) bool { + re := regexp.QuoteMeta(pattern) + re = strings.Replace(re, `\.\.\.`, `.*`, -1) + // Special case: foo/... matches foo too. + if strings.HasSuffix(re, `/.*`) { + re = re[:len(re)-len(`/.*`)] + `(/.*)?` + } + reg := regexp.MustCompile(`^` + re + `$`) + return func(name string) bool { + return reg.MatchString(name) + } +} + +// hasPathPrefix reports whether the path s begins with the +// elements in prefix. +func hasPathPrefix(s, prefix string) bool { + switch { + default: + return false + case len(s) == len(prefix): + return s == prefix + case len(s) > len(prefix): + if prefix != "" && prefix[len(prefix)-1] == '/' { + return strings.HasPrefix(s, prefix) + } + return s[len(prefix)] == '/' && s[:len(prefix)] == prefix + } +} + +// treeCanMatchPattern(pattern)(name) reports whether +// name or children of name can possibly match pattern. +// Pattern is the same limited glob accepted by matchPattern. +func treeCanMatchPattern(pattern string) func(name string) bool { + wildCard := false + if i := strings.Index(pattern, "..."); i >= 0 { + wildCard = true + pattern = pattern[:i] + } + return func(name string) bool { + return len(name) <= len(pattern) && hasPathPrefix(pattern, name) || + wildCard && strings.HasPrefix(name, pattern) + } +} + +// allPackages returns all the packages that can be found +// under the $GOPATH directories and $GOROOT matching pattern. +// The pattern is either "all" (all packages), "std" (standard packages) +// or a path including "...". +func allPackages(pattern string) []string { + pkgs := matchPackages(pattern) + if len(pkgs) == 0 { + fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) + } + return pkgs +} + +func matchPackages(pattern string) []string { + match := func(string) bool { return true } + treeCanMatch := func(string) bool { return true } + if pattern != "all" && pattern != "std" { + match = matchPattern(pattern) + treeCanMatch = treeCanMatchPattern(pattern) + } + + have := map[string]bool{ + "builtin": true, // ignore pseudo-package that exists only for documentation + } + if !buildContext.CgoEnabled { + have["runtime/cgo"] = true // ignore during walk + } + var pkgs []string + + // Commands + cmd := filepath.Join(goroot, "src/cmd") + string(filepath.Separator) + filepath.Walk(cmd, func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() || path == cmd { + return nil + } + name := path[len(cmd):] + if !treeCanMatch(name) { + return filepath.SkipDir + } + // Commands are all in cmd/, not in subdirectories. + if strings.Contains(name, string(filepath.Separator)) { + return filepath.SkipDir + } + + // We use, e.g., cmd/gofmt as the pseudo import path for gofmt. + name = "cmd/" + name + if have[name] { + return nil + } + have[name] = true + if !match(name) { + return nil + } + _, err = buildContext.ImportDir(path, 0) + if err != nil { + if _, noGo := err.(*build.NoGoError); !noGo { + log.Print(err) + } + return nil + } + pkgs = append(pkgs, name) + return nil + }) + + for _, src := range buildContext.SrcDirs() { + if (pattern == "std" || pattern == "cmd") && src != gorootSrc { + continue + } + src = filepath.Clean(src) + string(filepath.Separator) + root := src + if pattern == "cmd" { + root += "cmd" + string(filepath.Separator) + } + filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() || path == src { + return nil + } + + // Avoid .foo, _foo, and testdata directory trees. + _, elem := filepath.Split(path) + if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { + return filepath.SkipDir + } + + name := filepath.ToSlash(path[len(src):]) + if pattern == "std" && (strings.Contains(name, ".") || name == "cmd") { + // The name "std" is only the standard library. + // If the name is cmd, it's the root of the command tree. + return filepath.SkipDir + } + if !treeCanMatch(name) { + return filepath.SkipDir + } + if have[name] { + return nil + } + have[name] = true + if !match(name) { + return nil + } + _, err = buildContext.ImportDir(path, 0) + if err != nil { + if _, noGo := err.(*build.NoGoError); noGo { + return nil + } + } + pkgs = append(pkgs, name) + return nil + }) + } + return pkgs +} + +// allPackagesInFS is like allPackages but is passed a pattern +// beginning ./ or ../, meaning it should scan the tree rooted +// at the given directory. There are ... in the pattern too. +func allPackagesInFS(pattern string) []string { + pkgs := matchPackagesInFS(pattern) + if len(pkgs) == 0 { + fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) + } + return pkgs +} + +func matchPackagesInFS(pattern string) []string { + // Find directory to begin the scan. + // Could be smarter but this one optimization + // is enough for now, since ... is usually at the + // end of a path. + i := strings.Index(pattern, "...") + dir, _ := path.Split(pattern[:i]) + + // pattern begins with ./ or ../. + // path.Clean will discard the ./ but not the ../. + // We need to preserve the ./ for pattern matching + // and in the returned import paths. + prefix := "" + if strings.HasPrefix(pattern, "./") { + prefix = "./" + } + match := matchPattern(pattern) + + var pkgs []string + filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() { + return nil + } + if path == dir { + // filepath.Walk starts at dir and recurses. For the recursive case, + // the path is the result of filepath.Join, which calls filepath.Clean. + // The initial case is not Cleaned, though, so we do this explicitly. + // + // This converts a path like "./io/" to "io". Without this step, running + // "cd $GOROOT/src/pkg; go list ./io/..." would incorrectly skip the io + // package, because prepending the prefix "./" to the unclean path would + // result in "././io", and match("././io") returns false. + path = filepath.Clean(path) + } + + // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..". + _, elem := filepath.Split(path) + dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".." + if dot || strings.HasPrefix(elem, "_") || elem == "testdata" { + return filepath.SkipDir + } + + name := prefix + filepath.ToSlash(path) + if !match(name) { + return nil + } + if _, err = build.ImportDir(path, 0); err != nil { + if _, noGo := err.(*build.NoGoError); !noGo { + log.Print(err) + } + return nil + } + pkgs = append(pkgs, name) + return nil + }) + return pkgs +} diff --git a/vendor/golang.org/x/lint/golint/importcomment.go b/vendor/golang.org/x/lint/golint/importcomment.go new file mode 100644 index 000000000..d5b32f734 --- /dev/null +++ b/vendor/golang.org/x/lint/golint/importcomment.go @@ -0,0 +1,13 @@ +// Copyright (c) 2018 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// +build go1.12 + +// Require use of the correct import path only for Go 1.12+ users, so +// any breakages coincide with people updating their CI configs or +// whatnot. + +package main // import "golang.org/x/lint/golint" diff --git a/vendor/golang.org/x/lint/lint.go b/vendor/golang.org/x/lint/lint.go new file mode 100644 index 000000000..532a75ad2 --- /dev/null +++ b/vendor/golang.org/x/lint/lint.go @@ -0,0 +1,1614 @@ +// Copyright (c) 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// Package lint contains a linter for Go source code. +package lint // import "golang.org/x/lint" + +import ( + "bufio" + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "go/types" + "regexp" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/go/gcexportdata" +) + +const styleGuideBase = "https://golang.org/wiki/CodeReviewComments" + +// A Linter lints Go source code. +type Linter struct { +} + +// Problem represents a problem in some source code. +type Problem struct { + Position token.Position // position in source file + Text string // the prose that describes the problem + Link string // (optional) the link to the style guide for the problem + Confidence float64 // a value in (0,1] estimating the confidence in this problem's correctness + LineText string // the source line + Category string // a short name for the general category of the problem + + // If the problem has a suggested fix (the minority case), + // ReplacementLine is a full replacement for the relevant line of the source file. + ReplacementLine string +} + +func (p *Problem) String() string { + if p.Link != "" { + return p.Text + "\n\n" + p.Link + } + return p.Text +} + +type byPosition []Problem + +func (p byPosition) Len() int { return len(p) } +func (p byPosition) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p byPosition) Less(i, j int) bool { + pi, pj := p[i].Position, p[j].Position + + if pi.Filename != pj.Filename { + return pi.Filename < pj.Filename + } + if pi.Line != pj.Line { + return pi.Line < pj.Line + } + if pi.Column != pj.Column { + return pi.Column < pj.Column + } + + return p[i].Text < p[j].Text +} + +// Lint lints src. +func (l *Linter) Lint(filename string, src []byte) ([]Problem, error) { + return l.LintFiles(map[string][]byte{filename: src}) +} + +// LintFiles lints a set of files of a single package. +// The argument is a map of filename to source. +func (l *Linter) LintFiles(files map[string][]byte) ([]Problem, error) { + pkg := &pkg{ + fset: token.NewFileSet(), + files: make(map[string]*file), + } + var pkgName string + for filename, src := range files { + if isGenerated(src) { + continue // See issue #239 + } + f, err := parser.ParseFile(pkg.fset, filename, src, parser.ParseComments) + if err != nil { + return nil, err + } + if pkgName == "" { + pkgName = f.Name.Name + } else if f.Name.Name != pkgName { + return nil, fmt.Errorf("%s is in package %s, not %s", filename, f.Name.Name, pkgName) + } + pkg.files[filename] = &file{ + pkg: pkg, + f: f, + fset: pkg.fset, + src: src, + filename: filename, + } + } + if len(pkg.files) == 0 { + return nil, nil + } + return pkg.lint(), nil +} + +var ( + genHdr = []byte("// Code generated ") + genFtr = []byte(" DO NOT EDIT.") +) + +// isGenerated reports whether the source file is generated code +// according the rules from https://golang.org/s/generatedcode. +func isGenerated(src []byte) bool { + sc := bufio.NewScanner(bytes.NewReader(src)) + for sc.Scan() { + b := sc.Bytes() + if bytes.HasPrefix(b, genHdr) && bytes.HasSuffix(b, genFtr) && len(b) >= len(genHdr)+len(genFtr) { + return true + } + } + return false +} + +// pkg represents a package being linted. +type pkg struct { + fset *token.FileSet + files map[string]*file + + typesPkg *types.Package + typesInfo *types.Info + + // sortable is the set of types in the package that implement sort.Interface. + sortable map[string]bool + // main is whether this is a "main" package. + main bool + + problems []Problem +} + +func (p *pkg) lint() []Problem { + if err := p.typeCheck(); err != nil { + /* TODO(dsymonds): Consider reporting these errors when golint operates on entire packages. + if e, ok := err.(types.Error); ok { + pos := p.fset.Position(e.Pos) + conf := 1.0 + if strings.Contains(e.Msg, "can't find import: ") { + // Golint is probably being run in a context that doesn't support + // typechecking (e.g. package files aren't found), so don't warn about it. + conf = 0 + } + if conf > 0 { + p.errorfAt(pos, conf, category("typechecking"), e.Msg) + } + + // TODO(dsymonds): Abort if !e.Soft? + } + */ + } + + p.scanSortable() + p.main = p.isMain() + + for _, f := range p.files { + f.lint() + } + + sort.Sort(byPosition(p.problems)) + + return p.problems +} + +// file represents a file being linted. +type file struct { + pkg *pkg + f *ast.File + fset *token.FileSet + src []byte + filename string +} + +func (f *file) isTest() bool { return strings.HasSuffix(f.filename, "_test.go") } + +func (f *file) lint() { + f.lintPackageComment() + f.lintImports() + f.lintBlankImports() + f.lintExported() + f.lintNames() + f.lintElses() + f.lintRanges() + f.lintErrorf() + f.lintErrors() + f.lintErrorStrings() + f.lintReceiverNames() + f.lintIncDec() + f.lintErrorReturn() + f.lintUnexportedReturn() + f.lintTimeNames() + f.lintContextKeyTypes() + f.lintContextArgs() +} + +type link string +type category string + +// The variadic arguments may start with link and category types, +// and must end with a format string and any arguments. +// It returns the new Problem. +func (f *file) errorf(n ast.Node, confidence float64, args ...interface{}) *Problem { + pos := f.fset.Position(n.Pos()) + if pos.Filename == "" { + pos.Filename = f.filename + } + return f.pkg.errorfAt(pos, confidence, args...) +} + +func (p *pkg) errorfAt(pos token.Position, confidence float64, args ...interface{}) *Problem { + problem := Problem{ + Position: pos, + Confidence: confidence, + } + if pos.Filename != "" { + // The file might not exist in our mapping if a //line directive was encountered. + if f, ok := p.files[pos.Filename]; ok { + problem.LineText = srcLine(f.src, pos) + } + } + +argLoop: + for len(args) > 1 { // always leave at least the format string in args + switch v := args[0].(type) { + case link: + problem.Link = string(v) + case category: + problem.Category = string(v) + default: + break argLoop + } + args = args[1:] + } + + problem.Text = fmt.Sprintf(args[0].(string), args[1:]...) + + p.problems = append(p.problems, problem) + return &p.problems[len(p.problems)-1] +} + +var newImporter = func(fset *token.FileSet) types.ImporterFrom { + return gcexportdata.NewImporter(fset, make(map[string]*types.Package)) +} + +func (p *pkg) typeCheck() error { + config := &types.Config{ + // By setting a no-op error reporter, the type checker does as much work as possible. + Error: func(error) {}, + Importer: newImporter(p.fset), + } + info := &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + } + var anyFile *file + var astFiles []*ast.File + for _, f := range p.files { + anyFile = f + astFiles = append(astFiles, f.f) + } + pkg, err := config.Check(anyFile.f.Name.Name, p.fset, astFiles, info) + // Remember the typechecking info, even if config.Check failed, + // since we will get partial information. + p.typesPkg = pkg + p.typesInfo = info + return err +} + +func (p *pkg) typeOf(expr ast.Expr) types.Type { + if p.typesInfo == nil { + return nil + } + return p.typesInfo.TypeOf(expr) +} + +func (p *pkg) isNamedType(typ types.Type, importPath, name string) bool { + n, ok := typ.(*types.Named) + if !ok { + return false + } + tn := n.Obj() + return tn != nil && tn.Pkg() != nil && tn.Pkg().Path() == importPath && tn.Name() == name +} + +// scopeOf returns the tightest scope encompassing id. +func (p *pkg) scopeOf(id *ast.Ident) *types.Scope { + var scope *types.Scope + if obj := p.typesInfo.ObjectOf(id); obj != nil { + scope = obj.Parent() + } + if scope == p.typesPkg.Scope() { + // We were given a top-level identifier. + // Use the file-level scope instead of the package-level scope. + pos := id.Pos() + for _, f := range p.files { + if f.f.Pos() <= pos && pos < f.f.End() { + scope = p.typesInfo.Scopes[f.f] + break + } + } + } + return scope +} + +func (p *pkg) scanSortable() { + p.sortable = make(map[string]bool) + + // bitfield for which methods exist on each type. + const ( + Len = 1 << iota + Less + Swap + ) + nmap := map[string]int{"Len": Len, "Less": Less, "Swap": Swap} + has := make(map[string]int) + for _, f := range p.files { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { + return true + } + // TODO(dsymonds): We could check the signature to be more precise. + recv := receiverType(fn) + if i, ok := nmap[fn.Name.Name]; ok { + has[recv] |= i + } + return false + }) + } + for typ, ms := range has { + if ms == Len|Less|Swap { + p.sortable[typ] = true + } + } +} + +func (p *pkg) isMain() bool { + for _, f := range p.files { + if f.isMain() { + return true + } + } + return false +} + +func (f *file) isMain() bool { + if f.f.Name.Name == "main" { + return true + } + return false +} + +// lintPackageComment checks package comments. It complains if +// there is no package comment, or if it is not of the right form. +// This has a notable false positive in that a package comment +// could rightfully appear in a different file of the same package, +// but that's not easy to fix since this linter is file-oriented. +func (f *file) lintPackageComment() { + if f.isTest() { + return + } + + const ref = styleGuideBase + "#package-comments" + prefix := "Package " + f.f.Name.Name + " " + + // Look for a detached package comment. + // First, scan for the last comment that occurs before the "package" keyword. + var lastCG *ast.CommentGroup + for _, cg := range f.f.Comments { + if cg.Pos() > f.f.Package { + // Gone past "package" keyword. + break + } + lastCG = cg + } + if lastCG != nil && strings.HasPrefix(lastCG.Text(), prefix) { + endPos := f.fset.Position(lastCG.End()) + pkgPos := f.fset.Position(f.f.Package) + if endPos.Line+1 < pkgPos.Line { + // There isn't a great place to anchor this error; + // the start of the blank lines between the doc and the package statement + // is at least pointing at the location of the problem. + pos := token.Position{ + Filename: endPos.Filename, + // Offset not set; it is non-trivial, and doesn't appear to be needed. + Line: endPos.Line + 1, + Column: 1, + } + f.pkg.errorfAt(pos, 0.9, link(ref), category("comments"), "package comment is detached; there should be no blank lines between it and the package statement") + return + } + } + + if f.f.Doc == nil { + f.errorf(f.f, 0.2, link(ref), category("comments"), "should have a package comment, unless it's in another file for this package") + return + } + s := f.f.Doc.Text() + if ts := strings.TrimLeft(s, " \t"); ts != s { + f.errorf(f.f.Doc, 1, link(ref), category("comments"), "package comment should not have leading space") + s = ts + } + // Only non-main packages need to keep to this form. + if !f.pkg.main && !strings.HasPrefix(s, prefix) { + f.errorf(f.f.Doc, 1, link(ref), category("comments"), `package comment should be of the form "%s..."`, prefix) + } +} + +// lintBlankImports complains if a non-main package has blank imports that are +// not documented. +func (f *file) lintBlankImports() { + // In package main and in tests, we don't complain about blank imports. + if f.pkg.main || f.isTest() { + return + } + + // The first element of each contiguous group of blank imports should have + // an explanatory comment of some kind. + for i, imp := range f.f.Imports { + pos := f.fset.Position(imp.Pos()) + + if !isBlank(imp.Name) { + continue // Ignore non-blank imports. + } + if i > 0 { + prev := f.f.Imports[i-1] + prevPos := f.fset.Position(prev.Pos()) + if isBlank(prev.Name) && prevPos.Line+1 == pos.Line { + continue // A subsequent blank in a group. + } + } + + // This is the first blank import of a group. + if imp.Doc == nil && imp.Comment == nil { + ref := "" + f.errorf(imp, 1, link(ref), category("imports"), "a blank import should be only in a main or test package, or have a comment justifying it") + } + } +} + +// lintImports examines import blocks. +func (f *file) lintImports() { + for i, is := range f.f.Imports { + _ = i + if is.Name != nil && is.Name.Name == "." && !f.isTest() { + f.errorf(is, 1, link(styleGuideBase+"#import-dot"), category("imports"), "should not use dot imports") + } + + } +} + +const docCommentsLink = styleGuideBase + "#doc-comments" + +// lintExported examines the exported names. +// It complains if any required doc comments are missing, +// or if they are not of the right form. The exact rules are in +// lintFuncDoc, lintTypeDoc and lintValueSpecDoc; this function +// also tracks the GenDecl structure being traversed to permit +// doc comments for constants to be on top of the const block. +// It also complains if the names stutter when combined with +// the package name. +func (f *file) lintExported() { + if f.isTest() { + return + } + + var lastGen *ast.GenDecl // last GenDecl entered. + + // Set of GenDecls that have already had missing comments flagged. + genDeclMissingComments := make(map[*ast.GenDecl]bool) + + f.walk(func(node ast.Node) bool { + switch v := node.(type) { + case *ast.GenDecl: + if v.Tok == token.IMPORT { + return false + } + // token.CONST, token.TYPE or token.VAR + lastGen = v + return true + case *ast.FuncDecl: + f.lintFuncDoc(v) + if v.Recv == nil { + // Only check for stutter on functions, not methods. + // Method names are not used package-qualified. + f.checkStutter(v.Name, "func") + } + // Don't proceed inside funcs. + return false + case *ast.TypeSpec: + // inside a GenDecl, which usually has the doc + doc := v.Doc + if doc == nil { + doc = lastGen.Doc + } + f.lintTypeDoc(v, doc) + f.checkStutter(v.Name, "type") + // Don't proceed inside types. + return false + case *ast.ValueSpec: + f.lintValueSpecDoc(v, lastGen, genDeclMissingComments) + return false + } + return true + }) +} + +var ( + allCapsRE = regexp.MustCompile(`^[A-Z0-9_]+$`) + anyCapsRE = regexp.MustCompile(`[A-Z]`) +) + +// knownNameExceptions is a set of names that are known to be exempt from naming checks. +// This is usually because they are constrained by having to match names in the +// standard library. +var knownNameExceptions = map[string]bool{ + "LastInsertId": true, // must match database/sql + "kWh": true, +} + +func isInTopLevel(f *ast.File, ident *ast.Ident) bool { + path, _ := astutil.PathEnclosingInterval(f, ident.Pos(), ident.End()) + for _, f := range path { + switch f.(type) { + case *ast.File, *ast.GenDecl, *ast.ValueSpec, *ast.Ident: + continue + } + return false + } + return true +} + +// lintNames examines all names in the file. +// It complains if any use underscores or incorrect known initialisms. +func (f *file) lintNames() { + // Package names need slightly different handling than other names. + if strings.Contains(f.f.Name.Name, "_") && !strings.HasSuffix(f.f.Name.Name, "_test") { + f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("naming"), "don't use an underscore in package name") + } + if anyCapsRE.MatchString(f.f.Name.Name) { + f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("mixed-caps"), "don't use MixedCaps in package name; %s should be %s", f.f.Name.Name, strings.ToLower(f.f.Name.Name)) + } + + check := func(id *ast.Ident, thing string) { + if id.Name == "_" { + return + } + if knownNameExceptions[id.Name] { + return + } + + // Handle two common styles from other languages that don't belong in Go. + if len(id.Name) >= 5 && allCapsRE.MatchString(id.Name) && strings.Contains(id.Name, "_") { + capCount := 0 + for _, c := range id.Name { + if 'A' <= c && c <= 'Z' { + capCount++ + } + } + if capCount >= 2 { + f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use ALL_CAPS in Go names; use CamelCase") + return + } + } + if thing == "const" || (thing == "var" && isInTopLevel(f.f, id)) { + if len(id.Name) > 2 && id.Name[0] == 'k' && id.Name[1] >= 'A' && id.Name[1] <= 'Z' { + should := string(id.Name[1]+'a'-'A') + id.Name[2:] + f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use leading k in Go names; %s %s should be %s", thing, id.Name, should) + } + } + + should := lintName(id.Name) + if id.Name == should { + return + } + + if len(id.Name) > 2 && strings.Contains(id.Name[1:], "_") { + f.errorf(id, 0.9, link("http://golang.org/doc/effective_go.html#mixed-caps"), category("naming"), "don't use underscores in Go names; %s %s should be %s", thing, id.Name, should) + return + } + f.errorf(id, 0.8, link(styleGuideBase+"#initialisms"), category("naming"), "%s %s should be %s", thing, id.Name, should) + } + checkList := func(fl *ast.FieldList, thing string) { + if fl == nil { + return + } + for _, f := range fl.List { + for _, id := range f.Names { + check(id, thing) + } + } + } + f.walk(func(node ast.Node) bool { + switch v := node.(type) { + case *ast.AssignStmt: + if v.Tok == token.ASSIGN { + return true + } + for _, exp := range v.Lhs { + if id, ok := exp.(*ast.Ident); ok { + check(id, "var") + } + } + case *ast.FuncDecl: + if f.isTest() && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test") || strings.HasPrefix(v.Name.Name, "Benchmark")) { + return true + } + + thing := "func" + if v.Recv != nil { + thing = "method" + } + + // Exclude naming warnings for functions that are exported to C but + // not exported in the Go API. + // See https://github.com/golang/lint/issues/144. + if ast.IsExported(v.Name.Name) || !isCgoExported(v) { + check(v.Name, thing) + } + + checkList(v.Type.Params, thing+" parameter") + checkList(v.Type.Results, thing+" result") + case *ast.GenDecl: + if v.Tok == token.IMPORT { + return true + } + var thing string + switch v.Tok { + case token.CONST: + thing = "const" + case token.TYPE: + thing = "type" + case token.VAR: + thing = "var" + } + for _, spec := range v.Specs { + switch s := spec.(type) { + case *ast.TypeSpec: + check(s.Name, thing) + case *ast.ValueSpec: + for _, id := range s.Names { + check(id, thing) + } + } + } + case *ast.InterfaceType: + // Do not check interface method names. + // They are often constrainted by the method names of concrete types. + for _, x := range v.Methods.List { + ft, ok := x.Type.(*ast.FuncType) + if !ok { // might be an embedded interface name + continue + } + checkList(ft.Params, "interface method parameter") + checkList(ft.Results, "interface method result") + } + case *ast.RangeStmt: + if v.Tok == token.ASSIGN { + return true + } + if id, ok := v.Key.(*ast.Ident); ok { + check(id, "range var") + } + if id, ok := v.Value.(*ast.Ident); ok { + check(id, "range var") + } + case *ast.StructType: + for _, f := range v.Fields.List { + for _, id := range f.Names { + check(id, "struct field") + } + } + } + return true + }) +} + +// lintName returns a different name if it should be different. +func lintName(name string) (should string) { + // Fast path for simple cases: "_" and all lowercase. + if name == "_" { + return name + } + allLower := true + for _, r := range name { + if !unicode.IsLower(r) { + allLower = false + break + } + } + if allLower { + return name + } + + // Split camelCase at any lower->upper transition, and split on underscores. + // Check each word for common initialisms. + runes := []rune(name) + w, i := 0, 0 // index of start of word, scan + for i+1 <= len(runes) { + eow := false // whether we hit the end of a word + if i+1 == len(runes) { + eow = true + } else if runes[i+1] == '_' { + // underscore; shift the remainder forward over any run of underscores + eow = true + n := 1 + for i+n+1 < len(runes) && runes[i+n+1] == '_' { + n++ + } + + // Leave at most one underscore if the underscore is between two digits + if i+n+1 < len(runes) && unicode.IsDigit(runes[i]) && unicode.IsDigit(runes[i+n+1]) { + n-- + } + + copy(runes[i+1:], runes[i+n+1:]) + runes = runes[:len(runes)-n] + } else if unicode.IsLower(runes[i]) && !unicode.IsLower(runes[i+1]) { + // lower->non-lower + eow = true + } + i++ + if !eow { + continue + } + + // [w,i) is a word. + word := string(runes[w:i]) + if u := strings.ToUpper(word); commonInitialisms[u] { + // Keep consistent case, which is lowercase only at the start. + if w == 0 && unicode.IsLower(runes[w]) { + u = strings.ToLower(u) + } + // All the common initialisms are ASCII, + // so we can replace the bytes exactly. + copy(runes[w:], []rune(u)) + } else if w > 0 && strings.ToLower(word) == word { + // already all lowercase, and not the first word, so uppercase the first character. + runes[w] = unicode.ToUpper(runes[w]) + } + w = i + } + return string(runes) +} + +// commonInitialisms is a set of common initialisms. +// Only add entries that are highly unlikely to be non-initialisms. +// For instance, "ID" is fine (Freudian code is rare), but "AND" is not. +var commonInitialisms = map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTP": true, + "HTTPS": true, + "ID": true, + "IP": true, + "JSON": true, + "LHS": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, +} + +// lintTypeDoc examines the doc comment on a type. +// It complains if they are missing from an exported type, +// or if they are not of the standard form. +func (f *file) lintTypeDoc(t *ast.TypeSpec, doc *ast.CommentGroup) { + if !ast.IsExported(t.Name.Name) { + return + } + if doc == nil { + f.errorf(t, 1, link(docCommentsLink), category("comments"), "exported type %v should have comment or be unexported", t.Name) + return + } + + s := doc.Text() + articles := [...]string{"A", "An", "The"} + for _, a := range articles { + if strings.HasPrefix(s, a+" ") { + s = s[len(a)+1:] + break + } + } + if !strings.HasPrefix(s, t.Name.Name+" ") { + f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported type %v should be of the form "%v ..." (with optional leading article)`, t.Name, t.Name) + } +} + +var commonMethods = map[string]bool{ + "Error": true, + "Read": true, + "ServeHTTP": true, + "String": true, + "Write": true, +} + +// lintFuncDoc examines doc comments on functions and methods. +// It complains if they are missing, or not of the right form. +// It has specific exclusions for well-known methods (see commonMethods above). +func (f *file) lintFuncDoc(fn *ast.FuncDecl) { + if !ast.IsExported(fn.Name.Name) { + // func is unexported + return + } + kind := "function" + name := fn.Name.Name + if fn.Recv != nil && len(fn.Recv.List) > 0 { + // method + kind = "method" + recv := receiverType(fn) + if !ast.IsExported(recv) { + // receiver is unexported + return + } + if commonMethods[name] { + return + } + switch name { + case "Len", "Less", "Swap": + if f.pkg.sortable[recv] { + return + } + } + name = recv + "." + name + } + if fn.Doc == nil { + f.errorf(fn, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment or be unexported", kind, name) + return + } + s := fn.Doc.Text() + prefix := fn.Name.Name + " " + if !strings.HasPrefix(s, prefix) { + f.errorf(fn.Doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix) + } +} + +// lintValueSpecDoc examines package-global variables and constants. +// It complains if they are not individually declared, +// or if they are not suitably documented in the right form (unless they are in a block that is commented). +func (f *file) lintValueSpecDoc(vs *ast.ValueSpec, gd *ast.GenDecl, genDeclMissingComments map[*ast.GenDecl]bool) { + kind := "var" + if gd.Tok == token.CONST { + kind = "const" + } + + if len(vs.Names) > 1 { + // Check that none are exported except for the first. + for _, n := range vs.Names[1:] { + if ast.IsExported(n.Name) { + f.errorf(vs, 1, category("comments"), "exported %s %s should have its own declaration", kind, n.Name) + return + } + } + } + + // Only one name. + name := vs.Names[0].Name + if !ast.IsExported(name) { + return + } + + if vs.Doc == nil && gd.Doc == nil { + if genDeclMissingComments[gd] { + return + } + block := "" + if kind == "const" && gd.Lparen.IsValid() { + block = " (or a comment on this block)" + } + f.errorf(vs, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment%s or be unexported", kind, name, block) + genDeclMissingComments[gd] = true + return + } + // If this GenDecl has parens and a comment, we don't check its comment form. + if gd.Lparen.IsValid() && gd.Doc != nil { + return + } + // The relevant text to check will be on either vs.Doc or gd.Doc. + // Use vs.Doc preferentially. + doc := vs.Doc + if doc == nil { + doc = gd.Doc + } + prefix := name + " " + if !strings.HasPrefix(doc.Text(), prefix) { + f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix) + } +} + +func (f *file) checkStutter(id *ast.Ident, thing string) { + pkg, name := f.f.Name.Name, id.Name + if !ast.IsExported(name) { + // unexported name + return + } + // A name stutters if the package name is a strict prefix + // and the next character of the name starts a new word. + if len(name) <= len(pkg) { + // name is too short to stutter. + // This permits the name to be the same as the package name. + return + } + if !strings.EqualFold(pkg, name[:len(pkg)]) { + return + } + // We can assume the name is well-formed UTF-8. + // If the next rune after the package name is uppercase or an underscore + // the it's starting a new word and thus this name stutters. + rem := name[len(pkg):] + if next, _ := utf8.DecodeRuneInString(rem); next == '_' || unicode.IsUpper(next) { + f.errorf(id, 0.8, link(styleGuideBase+"#package-names"), category("naming"), "%s name will be used as %s.%s by other packages, and that stutters; consider calling this %s", thing, pkg, name, rem) + } +} + +// zeroLiteral is a set of ast.BasicLit values that are zero values. +// It is not exhaustive. +var zeroLiteral = map[string]bool{ + "false": true, // bool + // runes + `'\x00'`: true, + `'\000'`: true, + // strings + `""`: true, + "``": true, + // numerics + "0": true, + "0.": true, + "0.0": true, + "0i": true, +} + +// lintElses examines else blocks. It complains about any else block whose if block ends in a return. +func (f *file) lintElses() { + // We don't want to flag if { } else if { } else { } constructions. + // They will appear as an IfStmt whose Else field is also an IfStmt. + // Record such a node so we ignore it when we visit it. + ignore := make(map[*ast.IfStmt]bool) + + f.walk(func(node ast.Node) bool { + ifStmt, ok := node.(*ast.IfStmt) + if !ok || ifStmt.Else == nil { + return true + } + if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok { + ignore[elseif] = true + return true + } + if ignore[ifStmt] { + return true + } + if _, ok := ifStmt.Else.(*ast.BlockStmt); !ok { + // only care about elses without conditions + return true + } + if len(ifStmt.Body.List) == 0 { + return true + } + shortDecl := false // does the if statement have a ":=" initialization statement? + if ifStmt.Init != nil { + if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE { + shortDecl = true + } + } + lastStmt := ifStmt.Body.List[len(ifStmt.Body.List)-1] + if _, ok := lastStmt.(*ast.ReturnStmt); ok { + extra := "" + if shortDecl { + extra = " (move short variable declaration to its own line if necessary)" + } + f.errorf(ifStmt.Else, 1, link(styleGuideBase+"#indent-error-flow"), category("indent"), "if block ends with a return statement, so drop this else and outdent its block"+extra) + } + return true + }) +} + +// lintRanges examines range clauses. It complains about redundant constructions. +func (f *file) lintRanges() { + f.walk(func(node ast.Node) bool { + rs, ok := node.(*ast.RangeStmt) + if !ok { + return true + } + + if isIdent(rs.Key, "_") && (rs.Value == nil || isIdent(rs.Value, "_")) { + p := f.errorf(rs.Key, 1, category("range-loop"), "should omit values from range; this loop is equivalent to `for range ...`") + + newRS := *rs // shallow copy + newRS.Value = nil + newRS.Key = nil + p.ReplacementLine = f.firstLineOf(&newRS, rs) + + return true + } + + if isIdent(rs.Value, "_") { + p := f.errorf(rs.Value, 1, category("range-loop"), "should omit 2nd value from range; this loop is equivalent to `for %s %s range ...`", f.render(rs.Key), rs.Tok) + + newRS := *rs // shallow copy + newRS.Value = nil + p.ReplacementLine = f.firstLineOf(&newRS, rs) + } + + return true + }) +} + +// lintErrorf examines errors.New and testing.Error calls. It complains if its only argument is an fmt.Sprintf invocation. +func (f *file) lintErrorf() { + f.walk(func(node ast.Node) bool { + ce, ok := node.(*ast.CallExpr) + if !ok || len(ce.Args) != 1 { + return true + } + isErrorsNew := isPkgDot(ce.Fun, "errors", "New") + var isTestingError bool + se, ok := ce.Fun.(*ast.SelectorExpr) + if ok && se.Sel.Name == "Error" { + if typ := f.pkg.typeOf(se.X); typ != nil { + isTestingError = typ.String() == "*testing.T" + } + } + if !isErrorsNew && !isTestingError { + return true + } + if !f.imports("errors") { + return true + } + arg := ce.Args[0] + ce, ok = arg.(*ast.CallExpr) + if !ok || !isPkgDot(ce.Fun, "fmt", "Sprintf") { + return true + } + errorfPrefix := "fmt" + if isTestingError { + errorfPrefix = f.render(se.X) + } + p := f.errorf(node, 1, category("errors"), "should replace %s(fmt.Sprintf(...)) with %s.Errorf(...)", f.render(se), errorfPrefix) + + m := f.srcLineWithMatch(ce, `^(.*)`+f.render(se)+`\(fmt\.Sprintf\((.*)\)\)(.*)$`) + if m != nil { + p.ReplacementLine = m[1] + errorfPrefix + ".Errorf(" + m[2] + ")" + m[3] + } + + return true + }) +} + +// lintErrors examines global error vars. It complains if they aren't named in the standard way. +func (f *file) lintErrors() { + for _, decl := range f.f.Decls { + gd, ok := decl.(*ast.GenDecl) + if !ok || gd.Tok != token.VAR { + continue + } + for _, spec := range gd.Specs { + spec := spec.(*ast.ValueSpec) + if len(spec.Names) != 1 || len(spec.Values) != 1 { + continue + } + ce, ok := spec.Values[0].(*ast.CallExpr) + if !ok { + continue + } + if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") { + continue + } + + id := spec.Names[0] + prefix := "err" + if id.IsExported() { + prefix = "Err" + } + if !strings.HasPrefix(id.Name, prefix) { + f.errorf(id, 0.9, category("naming"), "error var %s should have name of the form %sFoo", id.Name, prefix) + } + } + } +} + +func lintErrorString(s string) (isClean bool, conf float64) { + const basicConfidence = 0.8 + const capConfidence = basicConfidence - 0.2 + first, firstN := utf8.DecodeRuneInString(s) + last, _ := utf8.DecodeLastRuneInString(s) + if last == '.' || last == ':' || last == '!' || last == '\n' { + return false, basicConfidence + } + if unicode.IsUpper(first) { + // People use proper nouns and exported Go identifiers in error strings, + // so decrease the confidence of warnings for capitalization. + if len(s) <= firstN { + return false, capConfidence + } + // Flag strings starting with something that doesn't look like an initialism. + if second, _ := utf8.DecodeRuneInString(s[firstN:]); !unicode.IsUpper(second) { + return false, capConfidence + } + } + return true, 0 +} + +// lintErrorStrings examines error strings. +// It complains if they are capitalized or end in punctuation or a newline. +func (f *file) lintErrorStrings() { + f.walk(func(node ast.Node) bool { + ce, ok := node.(*ast.CallExpr) + if !ok { + return true + } + if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") { + return true + } + if len(ce.Args) < 1 { + return true + } + str, ok := ce.Args[0].(*ast.BasicLit) + if !ok || str.Kind != token.STRING { + return true + } + s, _ := strconv.Unquote(str.Value) // can assume well-formed Go + if s == "" { + return true + } + clean, conf := lintErrorString(s) + if clean { + return true + } + + f.errorf(str, conf, link(styleGuideBase+"#error-strings"), category("errors"), + "error strings should not be capitalized or end with punctuation or a newline") + return true + }) +} + +// lintReceiverNames examines receiver names. It complains about inconsistent +// names used for the same type and names such as "this". +func (f *file) lintReceiverNames() { + typeReceiver := map[string]string{} + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { + return true + } + names := fn.Recv.List[0].Names + if len(names) < 1 { + return true + } + name := names[0].Name + const ref = styleGuideBase + "#receiver-names" + if name == "_" { + f.errorf(n, 1, link(ref), category("naming"), `receiver name should not be an underscore, omit the name if it is unused`) + return true + } + if name == "this" || name == "self" { + f.errorf(n, 1, link(ref), category("naming"), `receiver name should be a reflection of its identity; don't use generic names such as "this" or "self"`) + return true + } + recv := receiverType(fn) + if prev, ok := typeReceiver[recv]; ok && prev != name { + f.errorf(n, 1, link(ref), category("naming"), "receiver name %s should be consistent with previous receiver name %s for %s", name, prev, recv) + return true + } + typeReceiver[recv] = name + return true + }) +} + +// lintIncDec examines statements that increment or decrement a variable. +// It complains if they don't use x++ or x--. +func (f *file) lintIncDec() { + f.walk(func(n ast.Node) bool { + as, ok := n.(*ast.AssignStmt) + if !ok { + return true + } + if len(as.Lhs) != 1 { + return true + } + if !isOne(as.Rhs[0]) { + return true + } + var suffix string + switch as.Tok { + case token.ADD_ASSIGN: + suffix = "++" + case token.SUB_ASSIGN: + suffix = "--" + default: + return true + } + f.errorf(as, 0.8, category("unary-op"), "should replace %s with %s%s", f.render(as), f.render(as.Lhs[0]), suffix) + return true + }) +} + +// lintErrorReturn examines function declarations that return an error. +// It complains if the error isn't the last parameter. +func (f *file) lintErrorReturn() { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Type.Results == nil { + return true + } + ret := fn.Type.Results.List + if len(ret) <= 1 { + return true + } + if isIdent(ret[len(ret)-1].Type, "error") { + return true + } + // An error return parameter should be the last parameter. + // Flag any error parameters found before the last. + for _, r := range ret[:len(ret)-1] { + if isIdent(r.Type, "error") { + f.errorf(fn, 0.9, category("arg-order"), "error should be the last type when returning multiple items") + break // only flag one + } + } + return true + }) +} + +// lintUnexportedReturn examines exported function declarations. +// It complains if any return an unexported type. +func (f *file) lintUnexportedReturn() { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok { + return true + } + if fn.Type.Results == nil { + return false + } + if !fn.Name.IsExported() { + return false + } + thing := "func" + if fn.Recv != nil && len(fn.Recv.List) > 0 { + thing = "method" + if !ast.IsExported(receiverType(fn)) { + // Don't report exported methods of unexported types, + // such as private implementations of sort.Interface. + return false + } + } + for _, ret := range fn.Type.Results.List { + typ := f.pkg.typeOf(ret.Type) + if exportedType(typ) { + continue + } + f.errorf(ret.Type, 0.8, category("unexported-type-in-api"), + "exported %s %s returns unexported type %s, which can be annoying to use", + thing, fn.Name.Name, typ) + break // only flag one + } + return false + }) +} + +// exportedType reports whether typ is an exported type. +// It is imprecise, and will err on the side of returning true, +// such as for composite types. +func exportedType(typ types.Type) bool { + switch T := typ.(type) { + case *types.Named: + // Builtin types have no package. + return T.Obj().Pkg() == nil || T.Obj().Exported() + case *types.Map: + return exportedType(T.Key()) && exportedType(T.Elem()) + case interface { + Elem() types.Type + }: // array, slice, pointer, chan + return exportedType(T.Elem()) + } + // Be conservative about other types, such as struct, interface, etc. + return true +} + +// timeSuffixes is a list of name suffixes that imply a time unit. +// This is not an exhaustive list. +var timeSuffixes = []string{ + "Sec", "Secs", "Seconds", + "Msec", "Msecs", + "Milli", "Millis", "Milliseconds", + "Usec", "Usecs", "Microseconds", + "MS", "Ms", +} + +func (f *file) lintTimeNames() { + f.walk(func(node ast.Node) bool { + v, ok := node.(*ast.ValueSpec) + if !ok { + return true + } + for _, name := range v.Names { + origTyp := f.pkg.typeOf(name) + // Look for time.Duration or *time.Duration; + // the latter is common when using flag.Duration. + typ := origTyp + if pt, ok := typ.(*types.Pointer); ok { + typ = pt.Elem() + } + if !f.pkg.isNamedType(typ, "time", "Duration") { + continue + } + suffix := "" + for _, suf := range timeSuffixes { + if strings.HasSuffix(name.Name, suf) { + suffix = suf + break + } + } + if suffix == "" { + continue + } + f.errorf(v, 0.9, category("time"), "var %s is of type %v; don't use unit-specific suffix %q", name.Name, origTyp, suffix) + } + return true + }) +} + +// lintContextKeyTypes checks for call expressions to context.WithValue with +// basic types used for the key argument. +// See: https://golang.org/issue/17293 +func (f *file) lintContextKeyTypes() { + f.walk(func(node ast.Node) bool { + switch node := node.(type) { + case *ast.CallExpr: + f.checkContextKeyType(node) + } + + return true + }) +} + +// checkContextKeyType reports an error if the call expression calls +// context.WithValue with a key argument of basic type. +func (f *file) checkContextKeyType(x *ast.CallExpr) { + sel, ok := x.Fun.(*ast.SelectorExpr) + if !ok { + return + } + pkg, ok := sel.X.(*ast.Ident) + if !ok || pkg.Name != "context" { + return + } + if sel.Sel.Name != "WithValue" { + return + } + + // key is second argument to context.WithValue + if len(x.Args) != 3 { + return + } + key := f.pkg.typesInfo.Types[x.Args[1]] + + if ktyp, ok := key.Type.(*types.Basic); ok && ktyp.Kind() != types.Invalid { + f.errorf(x, 1.0, category("context"), fmt.Sprintf("should not use basic type %s as key in context.WithValue", key.Type)) + } +} + +// lintContextArgs examines function declarations that contain an +// argument with a type of context.Context +// It complains if that argument isn't the first parameter. +func (f *file) lintContextArgs() { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || len(fn.Type.Params.List) <= 1 { + return true + } + // A context.Context should be the first parameter of a function. + // Flag any that show up after the first. + for _, arg := range fn.Type.Params.List[1:] { + if isPkgDot(arg.Type, "context", "Context") { + f.errorf(fn, 0.9, link("https://golang.org/pkg/context/"), category("arg-order"), "context.Context should be the first parameter of a function") + break // only flag one + } + } + return true + }) +} + +// containsComments returns whether the interval [start, end) contains any +// comments without "// MATCH " prefix. +func (f *file) containsComments(start, end token.Pos) bool { + for _, cgroup := range f.f.Comments { + comments := cgroup.List + if comments[0].Slash >= end { + // All comments starting with this group are after end pos. + return false + } + if comments[len(comments)-1].Slash < start { + // Comments group ends before start pos. + continue + } + for _, c := range comments { + if start <= c.Slash && c.Slash < end && !strings.HasPrefix(c.Text, "// MATCH ") { + return true + } + } + } + return false +} + +// receiverType returns the named type of the method receiver, sans "*", +// or "invalid-type" if fn.Recv is ill formed. +func receiverType(fn *ast.FuncDecl) string { + switch e := fn.Recv.List[0].Type.(type) { + case *ast.Ident: + return e.Name + case *ast.StarExpr: + if id, ok := e.X.(*ast.Ident); ok { + return id.Name + } + } + // The parser accepts much more than just the legal forms. + return "invalid-type" +} + +func (f *file) walk(fn func(ast.Node) bool) { + ast.Walk(walker(fn), f.f) +} + +func (f *file) render(x interface{}) string { + var buf bytes.Buffer + if err := printer.Fprint(&buf, f.fset, x); err != nil { + panic(err) + } + return buf.String() +} + +func (f *file) debugRender(x interface{}) string { + var buf bytes.Buffer + if err := ast.Fprint(&buf, f.fset, x, nil); err != nil { + panic(err) + } + return buf.String() +} + +// walker adapts a function to satisfy the ast.Visitor interface. +// The function return whether the walk should proceed into the node's children. +type walker func(ast.Node) bool + +func (w walker) Visit(node ast.Node) ast.Visitor { + if w(node) { + return w + } + return nil +} + +func isIdent(expr ast.Expr, ident string) bool { + id, ok := expr.(*ast.Ident) + return ok && id.Name == ident +} + +// isBlank returns whether id is the blank identifier "_". +// If id == nil, the answer is false. +func isBlank(id *ast.Ident) bool { return id != nil && id.Name == "_" } + +func isPkgDot(expr ast.Expr, pkg, name string) bool { + sel, ok := expr.(*ast.SelectorExpr) + return ok && isIdent(sel.X, pkg) && isIdent(sel.Sel, name) +} + +func isOne(expr ast.Expr) bool { + lit, ok := expr.(*ast.BasicLit) + return ok && lit.Kind == token.INT && lit.Value == "1" +} + +func isCgoExported(f *ast.FuncDecl) bool { + if f.Recv != nil || f.Doc == nil { + return false + } + + cgoExport := regexp.MustCompile(fmt.Sprintf("(?m)^//export %s$", regexp.QuoteMeta(f.Name.Name))) + for _, c := range f.Doc.List { + if cgoExport.MatchString(c.Text) { + return true + } + } + return false +} + +var basicTypeKinds = map[types.BasicKind]string{ + types.UntypedBool: "bool", + types.UntypedInt: "int", + types.UntypedRune: "rune", + types.UntypedFloat: "float64", + types.UntypedComplex: "complex128", + types.UntypedString: "string", +} + +// isUntypedConst reports whether expr is an untyped constant, +// and indicates what its default type is. +// scope may be nil. +func (f *file) isUntypedConst(expr ast.Expr) (defType string, ok bool) { + // Re-evaluate expr outside of its context to see if it's untyped. + // (An expr evaluated within, for example, an assignment context will get the type of the LHS.) + exprStr := f.render(expr) + tv, err := types.Eval(f.fset, f.pkg.typesPkg, expr.Pos(), exprStr) + if err != nil { + return "", false + } + if b, ok := tv.Type.(*types.Basic); ok { + if dt, ok := basicTypeKinds[b.Kind()]; ok { + return dt, true + } + } + + return "", false +} + +// firstLineOf renders the given node and returns its first line. +// It will also match the indentation of another node. +func (f *file) firstLineOf(node, match ast.Node) string { + line := f.render(node) + if i := strings.Index(line, "\n"); i >= 0 { + line = line[:i] + } + return f.indentOf(match) + line +} + +func (f *file) indentOf(node ast.Node) string { + line := srcLine(f.src, f.fset.Position(node.Pos())) + for i, r := range line { + switch r { + case ' ', '\t': + default: + return line[:i] + } + } + return line // unusual or empty line +} + +func (f *file) srcLineWithMatch(node ast.Node, pattern string) (m []string) { + line := srcLine(f.src, f.fset.Position(node.Pos())) + line = strings.TrimSuffix(line, "\n") + rx := regexp.MustCompile(pattern) + return rx.FindStringSubmatch(line) +} + +// imports returns true if the current file imports the specified package path. +func (f *file) imports(importPath string) bool { + all := astutil.Imports(f.fset, f.f) + for _, p := range all { + for _, i := range p { + uq, err := strconv.Unquote(i.Path.Value) + if err == nil && importPath == uq { + return true + } + } + } + return false +} + +// srcLine returns the complete line at p, including the terminating newline. +func srcLine(src []byte, p token.Position) string { + // Run to end of line in both directions if not at line start/end. + lo, hi := p.Offset, p.Offset+1 + for lo > 0 && src[lo-1] != '\n' { + lo-- + } + for hi < len(src) && src[hi-1] != '\n' { + hi++ + } + return string(src[lo:hi]) +} diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go deleted file mode 100644 index 5d052781b..000000000 --- a/vendor/golang.org/x/net/html/atom/gen.go +++ /dev/null @@ -1,712 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -//go:generate go run gen.go -//go:generate go run gen.go -test - -package main - -import ( - "bytes" - "flag" - "fmt" - "go/format" - "io/ioutil" - "math/rand" - "os" - "sort" - "strings" -) - -// identifier converts s to a Go exported identifier. -// It converts "div" to "Div" and "accept-charset" to "AcceptCharset". -func identifier(s string) string { - b := make([]byte, 0, len(s)) - cap := true - for _, c := range s { - if c == '-' { - cap = true - continue - } - if cap && 'a' <= c && c <= 'z' { - c -= 'a' - 'A' - } - cap = false - b = append(b, byte(c)) - } - return string(b) -} - -var test = flag.Bool("test", false, "generate table_test.go") - -func genFile(name string, buf *bytes.Buffer) { - b, err := format.Source(buf.Bytes()) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - if err := ioutil.WriteFile(name, b, 0644); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func main() { - flag.Parse() - - var all []string - all = append(all, elements...) - all = append(all, attributes...) - all = append(all, eventHandlers...) - all = append(all, extra...) - sort.Strings(all) - - // uniq - lists have dups - w := 0 - for _, s := range all { - if w == 0 || all[w-1] != s { - all[w] = s - w++ - } - } - all = all[:w] - - if *test { - var buf bytes.Buffer - fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") - fmt.Fprintln(&buf, "//go:generate go run gen.go -test\n") - fmt.Fprintln(&buf, "package atom\n") - fmt.Fprintln(&buf, "var testAtomList = []string{") - for _, s := range all { - fmt.Fprintf(&buf, "\t%q,\n", s) - } - fmt.Fprintln(&buf, "}") - - genFile("table_test.go", &buf) - return - } - - // Find hash that minimizes table size. - var best *table - for i := 0; i < 1000000; i++ { - if best != nil && 1<<(best.k-1) < len(all) { - break - } - h := rand.Uint32() - for k := uint(0); k <= 16; k++ { - if best != nil && k >= best.k { - break - } - var t table - if t.init(h, k, all) { - best = &t - break - } - } - } - if best == nil { - fmt.Fprintf(os.Stderr, "failed to construct string table\n") - os.Exit(1) - } - - // Lay out strings, using overlaps when possible. - layout := append([]string{}, all...) - - // Remove strings that are substrings of other strings - for changed := true; changed; { - changed = false - for i, s := range layout { - if s == "" { - continue - } - for j, t := range layout { - if i != j && t != "" && strings.Contains(s, t) { - changed = true - layout[j] = "" - } - } - } - } - - // Join strings where one suffix matches another prefix. - for { - // Find best i, j, k such that layout[i][len-k:] == layout[j][:k], - // maximizing overlap length k. - besti := -1 - bestj := -1 - bestk := 0 - for i, s := range layout { - if s == "" { - continue - } - for j, t := range layout { - if i == j { - continue - } - for k := bestk + 1; k <= len(s) && k <= len(t); k++ { - if s[len(s)-k:] == t[:k] { - besti = i - bestj = j - bestk = k - } - } - } - } - if bestk > 0 { - layout[besti] += layout[bestj][bestk:] - layout[bestj] = "" - continue - } - break - } - - text := strings.Join(layout, "") - - atom := map[string]uint32{} - for _, s := range all { - off := strings.Index(text, s) - if off < 0 { - panic("lost string " + s) - } - atom[s] = uint32(off<<8 | len(s)) - } - - var buf bytes.Buffer - // Generate the Go code. - fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") - fmt.Fprintln(&buf, "//go:generate go run gen.go\n") - fmt.Fprintln(&buf, "package atom\n\nconst (") - - // compute max len - maxLen := 0 - for _, s := range all { - if maxLen < len(s) { - maxLen = len(s) - } - fmt.Fprintf(&buf, "\t%s Atom = %#x\n", identifier(s), atom[s]) - } - fmt.Fprintln(&buf, ")\n") - - fmt.Fprintf(&buf, "const hash0 = %#x\n\n", best.h0) - fmt.Fprintf(&buf, "const maxAtomLen = %d\n\n", maxLen) - - fmt.Fprintf(&buf, "var table = [1<<%d]Atom{\n", best.k) - for i, s := range best.tab { - if s == "" { - continue - } - fmt.Fprintf(&buf, "\t%#x: %#x, // %s\n", i, atom[s], s) - } - fmt.Fprintf(&buf, "}\n") - datasize := (1 << best.k) * 4 - - fmt.Fprintln(&buf, "const atomText =") - textsize := len(text) - for len(text) > 60 { - fmt.Fprintf(&buf, "\t%q +\n", text[:60]) - text = text[60:] - } - fmt.Fprintf(&buf, "\t%q\n\n", text) - - genFile("table.go", &buf) - - fmt.Fprintf(os.Stdout, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize) -} - -type byLen []string - -func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) } -func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x byLen) Len() int { return len(x) } - -// fnv computes the FNV hash with an arbitrary starting value h. -func fnv(h uint32, s string) uint32 { - for i := 0; i < len(s); i++ { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} - -// A table represents an attempt at constructing the lookup table. -// The lookup table uses cuckoo hashing, meaning that each string -// can be found in one of two positions. -type table struct { - h0 uint32 - k uint - mask uint32 - tab []string -} - -// hash returns the two hashes for s. -func (t *table) hash(s string) (h1, h2 uint32) { - h := fnv(t.h0, s) - h1 = h & t.mask - h2 = (h >> 16) & t.mask - return -} - -// init initializes the table with the given parameters. -// h0 is the initial hash value, -// k is the number of bits of hash value to use, and -// x is the list of strings to store in the table. -// init returns false if the table cannot be constructed. -func (t *table) init(h0 uint32, k uint, x []string) bool { - t.h0 = h0 - t.k = k - t.tab = make([]string, 1< len(t.tab) { - return false - } - s := t.tab[i] - h1, h2 := t.hash(s) - j := h1 + h2 - i - if t.tab[j] != "" && !t.push(j, depth+1) { - return false - } - t.tab[j] = s - return true -} - -// The lists of element names and attribute keys were taken from -// https://html.spec.whatwg.org/multipage/indices.html#index -// as of the "HTML Living Standard - Last Updated 16 April 2018" version. - -// "command", "keygen" and "menuitem" have been removed from the spec, -// but are kept here for backwards compatibility. -var elements = []string{ - "a", - "abbr", - "address", - "area", - "article", - "aside", - "audio", - "b", - "base", - "bdi", - "bdo", - "blockquote", - "body", - "br", - "button", - "canvas", - "caption", - "cite", - "code", - "col", - "colgroup", - "command", - "data", - "datalist", - "dd", - "del", - "details", - "dfn", - "dialog", - "div", - "dl", - "dt", - "em", - "embed", - "fieldset", - "figcaption", - "figure", - "footer", - "form", - "h1", - "h2", - "h3", - "h4", - "h5", - "h6", - "head", - "header", - "hgroup", - "hr", - "html", - "i", - "iframe", - "img", - "input", - "ins", - "kbd", - "keygen", - "label", - "legend", - "li", - "link", - "main", - "map", - "mark", - "menu", - "menuitem", - "meta", - "meter", - "nav", - "noscript", - "object", - "ol", - "optgroup", - "option", - "output", - "p", - "param", - "picture", - "pre", - "progress", - "q", - "rp", - "rt", - "ruby", - "s", - "samp", - "script", - "section", - "select", - "slot", - "small", - "source", - "span", - "strong", - "style", - "sub", - "summary", - "sup", - "table", - "tbody", - "td", - "template", - "textarea", - "tfoot", - "th", - "thead", - "time", - "title", - "tr", - "track", - "u", - "ul", - "var", - "video", - "wbr", -} - -// https://html.spec.whatwg.org/multipage/indices.html#attributes-3 -// -// "challenge", "command", "contextmenu", "dropzone", "icon", "keytype", "mediagroup", -// "radiogroup", "spellcheck", "scoped", "seamless", "sortable" and "sorted" have been removed from the spec, -// but are kept here for backwards compatibility. -var attributes = []string{ - "abbr", - "accept", - "accept-charset", - "accesskey", - "action", - "allowfullscreen", - "allowpaymentrequest", - "allowusermedia", - "alt", - "as", - "async", - "autocomplete", - "autofocus", - "autoplay", - "challenge", - "charset", - "checked", - "cite", - "class", - "color", - "cols", - "colspan", - "command", - "content", - "contenteditable", - "contextmenu", - "controls", - "coords", - "crossorigin", - "data", - "datetime", - "default", - "defer", - "dir", - "dirname", - "disabled", - "download", - "draggable", - "dropzone", - "enctype", - "for", - "form", - "formaction", - "formenctype", - "formmethod", - "formnovalidate", - "formtarget", - "headers", - "height", - "hidden", - "high", - "href", - "hreflang", - "http-equiv", - "icon", - "id", - "inputmode", - "integrity", - "is", - "ismap", - "itemid", - "itemprop", - "itemref", - "itemscope", - "itemtype", - "keytype", - "kind", - "label", - "lang", - "list", - "loop", - "low", - "manifest", - "max", - "maxlength", - "media", - "mediagroup", - "method", - "min", - "minlength", - "multiple", - "muted", - "name", - "nomodule", - "nonce", - "novalidate", - "open", - "optimum", - "pattern", - "ping", - "placeholder", - "playsinline", - "poster", - "preload", - "radiogroup", - "readonly", - "referrerpolicy", - "rel", - "required", - "reversed", - "rows", - "rowspan", - "sandbox", - "spellcheck", - "scope", - "scoped", - "seamless", - "selected", - "shape", - "size", - "sizes", - "sortable", - "sorted", - "slot", - "span", - "spellcheck", - "src", - "srcdoc", - "srclang", - "srcset", - "start", - "step", - "style", - "tabindex", - "target", - "title", - "translate", - "type", - "typemustmatch", - "updateviacache", - "usemap", - "value", - "width", - "workertype", - "wrap", -} - -// "onautocomplete", "onautocompleteerror", "onmousewheel", -// "onshow" and "onsort" have been removed from the spec, -// but are kept here for backwards compatibility. -var eventHandlers = []string{ - "onabort", - "onautocomplete", - "onautocompleteerror", - "onauxclick", - "onafterprint", - "onbeforeprint", - "onbeforeunload", - "onblur", - "oncancel", - "oncanplay", - "oncanplaythrough", - "onchange", - "onclick", - "onclose", - "oncontextmenu", - "oncopy", - "oncuechange", - "oncut", - "ondblclick", - "ondrag", - "ondragend", - "ondragenter", - "ondragexit", - "ondragleave", - "ondragover", - "ondragstart", - "ondrop", - "ondurationchange", - "onemptied", - "onended", - "onerror", - "onfocus", - "onhashchange", - "oninput", - "oninvalid", - "onkeydown", - "onkeypress", - "onkeyup", - "onlanguagechange", - "onload", - "onloadeddata", - "onloadedmetadata", - "onloadend", - "onloadstart", - "onmessage", - "onmessageerror", - "onmousedown", - "onmouseenter", - "onmouseleave", - "onmousemove", - "onmouseout", - "onmouseover", - "onmouseup", - "onmousewheel", - "onwheel", - "onoffline", - "ononline", - "onpagehide", - "onpageshow", - "onpaste", - "onpause", - "onplay", - "onplaying", - "onpopstate", - "onprogress", - "onratechange", - "onreset", - "onresize", - "onrejectionhandled", - "onscroll", - "onsecuritypolicyviolation", - "onseeked", - "onseeking", - "onselect", - "onshow", - "onsort", - "onstalled", - "onstorage", - "onsubmit", - "onsuspend", - "ontimeupdate", - "ontoggle", - "onunhandledrejection", - "onunload", - "onvolumechange", - "onwaiting", -} - -// extra are ad-hoc values not covered by any of the lists above. -var extra = []string{ - "acronym", - "align", - "annotation", - "annotation-xml", - "applet", - "basefont", - "bgsound", - "big", - "blink", - "center", - "color", - "desc", - "face", - "font", - "foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive. - "foreignobject", - "frame", - "frameset", - "image", - "isindex", - "listing", - "malignmark", - "marquee", - "math", - "mglyph", - "mi", - "mn", - "mo", - "ms", - "mtext", - "nobr", - "noembed", - "noframes", - "plaintext", - "prompt", - "public", - "rb", - "rtc", - "spacer", - "strike", - "svg", - "system", - "tt", - "xmp", -} diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go index a3a918f0b..73804d347 100644 --- a/vendor/golang.org/x/net/html/const.go +++ b/vendor/golang.org/x/net/html/const.go @@ -52,7 +52,6 @@ var isSpecialElementMap = map[string]bool{ "iframe": true, "img": true, "input": true, - "isindex": true, // The 'isindex' element has been removed, but keep it for backwards compatibility. "keygen": true, "li": true, "link": true, diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go index c801a6a04..2cd12fc81 100644 --- a/vendor/golang.org/x/net/html/parse.go +++ b/vendor/golang.org/x/net/html/parse.go @@ -184,6 +184,17 @@ func (p *parser) clearStackToContext(s scope) { } } +// parseGenericRawTextElements implements the generic raw text element parsing +// algorithm defined in 12.2.6.2. +// https://html.spec.whatwg.org/multipage/parsing.html#parsing-elements-that-contain-only-text +// TODO: Since both RAWTEXT and RCDATA states are treated as tokenizer's part +// officially, need to make tokenizer consider both states. +func (p *parser) parseGenericRawTextElement() { + p.addElement() + p.originalIM = p.im + p.im = textIM +} + // generateImpliedEndTags pops nodes off the stack of open elements as long as // the top node has a tag name of dd, dt, li, optgroup, option, p, rb, rp, rt or rtc. // If exceptions are specified, nodes with that name will not be popped off. @@ -192,16 +203,17 @@ func (p *parser) generateImpliedEndTags(exceptions ...string) { loop: for i = len(p.oe) - 1; i >= 0; i-- { n := p.oe[i] - if n.Type == ElementNode { - switch n.DataAtom { - case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc: - for _, except := range exceptions { - if n.Data == except { - break loop - } + if n.Type != ElementNode { + break + } + switch n.DataAtom { + case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc: + for _, except := range exceptions { + if n.Data == except { + break loop } - continue } + continue } break } @@ -369,8 +381,7 @@ findIdenticalElements: // Section 12.2.4.3. func (p *parser) clearActiveFormattingElements() { for { - n := p.afe.pop() - if len(p.afe) == 0 || n.Type == scopeMarkerNode { + if n := p.afe.pop(); len(p.afe) == 0 || n.Type == scopeMarkerNode { return } } @@ -625,25 +636,29 @@ func inHeadIM(p *parser) bool { switch p.tok.DataAtom { case a.Html: return inBodyIM(p) - case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta: + case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta: p.addElement() p.oe.pop() p.acknowledgeSelfClosingTag() return true case a.Noscript: - p.addElement() if p.scripting { - p.setOriginalIM() - p.im = textIM - } else { - p.im = inHeadNoscriptIM + p.parseGenericRawTextElement() + return true } + p.addElement() + p.im = inHeadNoscriptIM + // Don't let the tokenizer go into raw text mode when scripting is disabled. + p.tokenizer.NextIsNotRawText() return true - case a.Script, a.Title, a.Noframes, a.Style: + case a.Script, a.Title: p.addElement() p.setOriginalIM() p.im = textIM return true + case a.Noframes, a.Style: + p.parseGenericRawTextElement() + return true case a.Head: // Ignore the token. return true @@ -855,7 +870,7 @@ func inBodyIM(p *parser) bool { return true } copyAttributes(p.oe[0], p.tok) - case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title: + case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title: return inHeadIM(p) case a.Body: if p.oe.contains(a.Template) { @@ -1014,53 +1029,6 @@ func inBodyIM(p *parser) bool { p.tok.DataAtom = a.Img p.tok.Data = a.Img.String() return false - case a.Isindex: - if p.form != nil { - // Ignore the token. - return true - } - action := "" - prompt := "This is a searchable index. Enter search keywords: " - attr := []Attribute{{Key: "name", Val: "isindex"}} - for _, t := range p.tok.Attr { - switch t.Key { - case "action": - action = t.Val - case "name": - // Ignore the attribute. - case "prompt": - prompt = t.Val - default: - attr = append(attr, t) - } - } - p.acknowledgeSelfClosingTag() - p.popUntil(buttonScope, a.P) - p.parseImpliedToken(StartTagToken, a.Form, a.Form.String()) - if p.form == nil { - // NOTE: The 'isindex' element has been removed, - // and the 'template' element has not been designed to be - // collaborative with the index element. - // - // Ignore the token. - return true - } - if action != "" { - p.form.Attr = []Attribute{{Key: "action", Val: action}} - } - p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String()) - p.parseImpliedToken(StartTagToken, a.Label, a.Label.String()) - p.addText(prompt) - p.addChild(&Node{ - Type: ElementNode, - DataAtom: a.Input, - Data: a.Input.String(), - Attr: attr, - }) - p.oe.pop() - p.parseImpliedToken(EndTagToken, a.Label, a.Label.String()) - p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String()) - p.parseImpliedToken(EndTagToken, a.Form, a.Form.String()) case a.Textarea: p.addElement() p.setOriginalIM() @@ -1070,18 +1038,21 @@ func inBodyIM(p *parser) bool { p.popUntil(buttonScope, a.P) p.reconstructActiveFormattingElements() p.framesetOK = false - p.addElement() - p.setOriginalIM() - p.im = textIM + p.parseGenericRawTextElement() case a.Iframe: p.framesetOK = false + p.parseGenericRawTextElement() + case a.Noembed: + p.parseGenericRawTextElement() + case a.Noscript: + if p.scripting { + p.parseGenericRawTextElement() + return true + } + p.reconstructActiveFormattingElements() p.addElement() - p.setOriginalIM() - p.im = textIM - case a.Noembed, a.Noscript: - p.addElement() - p.setOriginalIM() - p.im = textIM + // Don't let the tokenizer go into raw text mode when scripting is disabled. + p.tokenizer.NextIsNotRawText() case a.Select: p.reconstructActiveFormattingElements() p.addElement() @@ -1198,14 +1169,13 @@ func inBodyIM(p *parser) bool { if len(p.templateStack) > 0 { p.im = inTemplateIM return false - } else { - for _, e := range p.oe { - switch e.DataAtom { - case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc, a.Tbody, a.Td, a.Tfoot, a.Th, - a.Thead, a.Tr, a.Body, a.Html: - default: - return true - } + } + for _, e := range p.oe { + switch e.DataAtom { + case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc, a.Tbody, a.Td, a.Tfoot, a.Th, + a.Thead, a.Tr, a.Body, a.Html: + default: + return true } } } @@ -1221,9 +1191,15 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) { // Once the code successfully parses the comprehensive test suite, we should // refactor this code to be more idiomatic. - // Steps 1-4. The outer loop. + // Steps 1-2 + if current := p.oe.top(); current.Data == tagName && p.afe.index(current) == -1 { + p.oe.pop() + return + } + + // Steps 3-5. The outer loop. for i := 0; i < 8; i++ { - // Step 5. Find the formatting element. + // Step 6. Find the formatting element. var formattingElement *Node for j := len(p.afe) - 1; j >= 0; j-- { if p.afe[j].Type == scopeMarkerNode { @@ -1238,17 +1214,22 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) { p.inBodyEndTagOther(tagAtom, tagName) return } + + // Step 7. Ignore the tag if formatting element is not in the stack of open elements. feIndex := p.oe.index(formattingElement) if feIndex == -1 { p.afe.remove(formattingElement) return } + // Step 8. Ignore the tag if formatting element is not in the scope. if !p.elementInScope(defaultScope, tagAtom) { // Ignore the tag. return } - // Steps 9-10. Find the furthest block. + // Step 9. This step is omitted because it's just a parse error but no need to return. + + // Steps 10-11. Find the furthest block. var furthestBlock *Node for _, e := range p.oe[feIndex:] { if isSpecialElement(e) { @@ -1265,47 +1246,65 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) { return } - // Steps 11-12. Find the common ancestor and bookmark node. + // Steps 12-13. Find the common ancestor and bookmark node. commonAncestor := p.oe[feIndex-1] bookmark := p.afe.index(formattingElement) - // Step 13. The inner loop. Find the lastNode to reparent. + // Step 14. The inner loop. Find the lastNode to reparent. lastNode := furthestBlock node := furthestBlock x := p.oe.index(node) - // Steps 13.1-13.2 - for j := 0; j < 3; j++ { - // Step 13.3. + // Step 14.1. + j := 0 + for { + // Step 14.2. + j++ + // Step. 14.3. x-- node = p.oe[x] - // Step 13.4 - 13.5. + // Step 14.4. Go to the next step if node is formatting element. + if node == formattingElement { + break + } + // Step 14.5. Remove node from the list of active formatting elements if + // inner loop counter is greater than three and node is in the list of + // active formatting elements. + if ni := p.afe.index(node); j > 3 && ni > -1 { + p.afe.remove(node) + // If any element of the list of active formatting elements is removed, + // we need to take care whether bookmark should be decremented or not. + // This is because the value of bookmark may exceed the size of the + // list by removing elements from the list. + if ni <= bookmark { + bookmark-- + } + continue + } + // Step 14.6. Continue the next inner loop if node is not in the list of + // active formatting elements. if p.afe.index(node) == -1 { p.oe.remove(node) continue } - // Step 13.6. - if node == formattingElement { - break - } - // Step 13.7. + // Step 14.7. clone := node.clone() p.afe[p.afe.index(node)] = clone p.oe[p.oe.index(node)] = clone node = clone - // Step 13.8. + // Step 14.8. if lastNode == furthestBlock { bookmark = p.afe.index(node) + 1 } - // Step 13.9. + // Step 14.9. if lastNode.Parent != nil { lastNode.Parent.RemoveChild(lastNode) } node.AppendChild(lastNode) - // Step 13.10. + // Step 14.10. lastNode = node } - // Step 14. Reparent lastNode to the common ancestor, + // Step 15. Reparent lastNode to the common ancestor, // or for misnested table nodes, to the foster parent. if lastNode.Parent != nil { lastNode.Parent.RemoveChild(lastNode) @@ -1317,13 +1316,13 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) { commonAncestor.AppendChild(lastNode) } - // Steps 15-17. Reparent nodes from the furthest block's children + // Steps 16-18. Reparent nodes from the furthest block's children // to a clone of the formatting element. clone := formattingElement.clone() reparentChildren(clone, furthestBlock) furthestBlock.AppendChild(clone) - // Step 18. Fix up the list of active formatting elements. + // Step 19. Fix up the list of active formatting elements. if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark { // Move the bookmark with the rest of the list. bookmark-- @@ -1331,7 +1330,7 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) { p.afe.remove(formattingElement) p.afe.insert(bookmark, clone) - // Step 19. Fix up the stack of open elements. + // Step 20. Fix up the stack of open elements. p.oe.remove(formattingElement) p.oe.insert(p.oe.index(furthestBlock)+1, clone) } @@ -1502,14 +1501,13 @@ func inCaptionIM(p *parser) bool { case StartTagToken: switch p.tok.DataAtom { case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Td, a.Tfoot, a.Thead, a.Tr: - if p.popUntil(tableScope, a.Caption) { - p.clearActiveFormattingElements() - p.im = inTableIM - return false - } else { + if !p.popUntil(tableScope, a.Caption) { // Ignore the token. return true } + p.clearActiveFormattingElements() + p.im = inTableIM + return false case a.Select: p.reconstructActiveFormattingElements() p.addElement() @@ -1526,14 +1524,13 @@ func inCaptionIM(p *parser) bool { } return true case a.Table: - if p.popUntil(tableScope, a.Caption) { - p.clearActiveFormattingElements() - p.im = inTableIM - return false - } else { + if !p.popUntil(tableScope, a.Caption) { // Ignore the token. return true } + p.clearActiveFormattingElements() + p.im = inTableIM + return false case a.Body, a.Col, a.Colgroup, a.Html, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr: // Ignore the token. return true @@ -1777,12 +1774,11 @@ func inSelectIM(p *parser) bool { } p.addElement() case a.Select: - if p.popUntil(selectScope, a.Select) { - p.resetInsertionMode() - } else { + if !p.popUntil(selectScope, a.Select) { // Ignore the token. return true } + p.resetInsertionMode() case a.Input, a.Keygen, a.Textarea: if p.elementInScope(selectScope, a.Select) { p.parseImpliedToken(EndTagToken, a.Select, a.Select.String()) @@ -1810,12 +1806,11 @@ func inSelectIM(p *parser) bool { p.oe = p.oe[:i] } case a.Select: - if p.popUntil(selectScope, a.Select) { - p.resetInsertionMode() - } else { + if !p.popUntil(selectScope, a.Select) { // Ignore the token. return true } + p.resetInsertionMode() case a.Template: return inHeadIM(p) } @@ -2352,8 +2347,7 @@ func ParseWithOptions(r io.Reader, opts ...ParseOption) (*Node, error) { f(p) } - err := p.parse() - if err != nil { + if err := p.parse(); err != nil { return nil, err } return p.doc, nil @@ -2411,8 +2405,7 @@ func ParseFragmentWithOptions(r io.Reader, context *Node, opts ...ParseOption) ( } } - err := p.parse() - if err != nil { + if err := p.parse(); err != nil { return nil, err } diff --git a/vendor/golang.org/x/net/html/render.go b/vendor/golang.org/x/net/html/render.go index d34564f49..8bf47ede7 100644 --- a/vendor/golang.org/x/net/html/render.go +++ b/vendor/golang.org/x/net/html/render.go @@ -252,20 +252,19 @@ func writeQuoted(w writer, s string) error { // Section 12.1.2, "Elements", gives this list of void elements. Void elements // are those that can't have any contents. var voidElements = map[string]bool{ - "area": true, - "base": true, - "br": true, - "col": true, - "command": true, - "embed": true, - "hr": true, - "img": true, - "input": true, - "keygen": true, - "link": true, - "meta": true, - "param": true, - "source": true, - "track": true, - "wbr": true, + "area": true, + "base": true, + "br": true, + "col": true, + "embed": true, + "hr": true, + "img": true, + "input": true, + "keygen": true, + "link": true, + "meta": true, + "param": true, + "source": true, + "track": true, + "wbr": true, } diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go index ae0d1b05c..877709f99 100644 --- a/vendor/golang.org/x/net/html/token.go +++ b/vendor/golang.org/x/net/html/token.go @@ -296,8 +296,7 @@ func (z *Tokenizer) Buffered() []byte { // too many times in succession. func readAtLeastOneByte(r io.Reader, b []byte) (int, error) { for i := 0; i < 100; i++ { - n, err := r.Read(b) - if n != 0 || err != nil { + if n, err := r.Read(b); n != 0 || err != nil { return n, err } } diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go index aa0d34f1e..90657915f 100644 --- a/vendor/golang.org/x/oauth2/transport.go +++ b/vendor/golang.org/x/oauth2/transport.go @@ -6,7 +6,7 @@ package oauth2 import ( "errors" - "io" + "log" "net/http" "sync" ) @@ -25,9 +25,6 @@ type Transport struct { // Base is the base RoundTripper used to make HTTP requests. // If nil, http.DefaultTransport is used. Base http.RoundTripper - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified } // RoundTrip authorizes and authenticates the request with an @@ -52,35 +49,22 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { req2 := cloneRequest(req) // per RoundTripper contract token.SetAuthHeader(req2) - t.setModReq(req, req2) - res, err := t.base().RoundTrip(req2) - // req.Body is assumed to have been closed by the base RoundTripper. + // req.Body is assumed to be closed by the base RoundTripper. reqBodyClosed = true - - if err != nil { - t.setModReq(req, nil) - return nil, err - } - res.Body = &onEOFReader{ - rc: res.Body, - fn: func() { t.setModReq(req, nil) }, - } - return res, nil + return t.base().RoundTrip(req2) } -// CancelRequest cancels an in-flight request by closing its connection. +var cancelOnce sync.Once + +// CancelRequest does nothing. It used to be a legacy cancellation mechanism +// but now only it only logs on first use to warn that it's deprecated. +// +// Deprecated: use contexts for cancellation instead. func (t *Transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - t.mu.Lock() - modReq := t.modReq[req] - delete(t.modReq, req) - t.mu.Unlock() - cr.CancelRequest(modReq) - } + cancelOnce.Do(func() { + log.Printf("deprecated: golang.org/x/oauth2: Transport.CancelRequest no longer does anything; use contexts") + }) } func (t *Transport) base() http.RoundTripper { @@ -90,19 +74,6 @@ func (t *Transport) base() http.RoundTripper { return http.DefaultTransport } -func (t *Transport) setModReq(orig, mod *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if t.modReq == nil { - t.modReq = make(map[*http.Request]*http.Request) - } - if mod == nil { - delete(t.modReq, orig) - } else { - t.modReq[orig] = mod - } -} - // cloneRequest returns a clone of the provided *http.Request. // The clone is a shallow copy of the struct and its Header map. func cloneRequest(r *http.Request) *http.Request { @@ -116,29 +87,3 @@ func cloneRequest(r *http.Request) *http.Request { } return r2 } - -type onEOFReader struct { - rc io.ReadCloser - fn func() -} - -func (r *onEOFReader) Read(p []byte) (n int, err error) { - n, err = r.rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *onEOFReader) Close() error { - err := r.rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.fn; fn != nil { - fn() - r.fn = nil - } -} diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go index 39c03f1ef..4dc534864 100644 --- a/vendor/golang.org/x/sys/unix/fcntl.go +++ b/vendor/golang.org/x/sys/unix/fcntl.go @@ -9,12 +9,11 @@ package unix import "unsafe" // fcntl64Syscall is usually SYS_FCNTL, but is overridden on 32-bit Linux -// systems by flock_linux_32bit.go to be SYS_FCNTL64. +// systems by fcntl_linux_32bit.go to be SYS_FCNTL64. var fcntl64Syscall uintptr = SYS_FCNTL -// FcntlInt performs a fcntl syscall on fd with the provided command and argument. -func FcntlInt(fd uintptr, cmd, arg int) (int, error) { - valptr, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(arg)) +func fcntl(fd int, cmd, arg int) (int, error) { + valptr, _, errno := Syscall(fcntl64Syscall, uintptr(fd), uintptr(cmd), uintptr(arg)) var err error if errno != 0 { err = errno @@ -22,6 +21,11 @@ func FcntlInt(fd uintptr, cmd, arg int) (int, error) { return int(valptr), err } +// FcntlInt performs a fcntl syscall on fd with the provided command and argument. +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return fcntl(int(fd), cmd, arg) +} + // FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { _, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk))) diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index 890ec464c..fa0c69b9d 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -50,7 +50,7 @@ if [[ "$GOOS" = "linux" ]]; then # Use the Docker-based build system # Files generated through docker (use $cmd so you can Ctl-C the build or run) $cmd docker build --tag generate:$GOOS $GOOS - $cmd docker run --interactive --tty --volume $(dirname "$(readlink -f "$0")"):/build generate:$GOOS + $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")" && /bin/pwd):/build generate:$GOOS exit fi diff --git a/vendor/golang.org/x/sys/unix/mkasm_darwin.go b/vendor/golang.org/x/sys/unix/mkasm_darwin.go deleted file mode 100644 index 6f7bb6edf..000000000 --- a/vendor/golang.org/x/sys/unix/mkasm_darwin.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go. -//This program must be run after mksyscall.go. -package main - -import ( - "bytes" - "fmt" - "io/ioutil" - "log" - "os" - "strings" -) - -func writeASMFile(in string, fileName string, buildTags string) { - trampolines := map[string]bool{} - - var out bytes.Buffer - - fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " ")) - fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n") - fmt.Fprintf(&out, "\n") - fmt.Fprintf(&out, "// +build %s\n", buildTags) - fmt.Fprintf(&out, "\n") - fmt.Fprintf(&out, "#include \"textflag.h\"\n") - for _, line := range strings.Split(in, "\n") { - if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") { - continue - } - fn := line[5 : len(line)-13] - if !trampolines[fn] { - trampolines[fn] = true - fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn) - fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn) - } - } - err := ioutil.WriteFile(fileName, out.Bytes(), 0644) - if err != nil { - log.Fatalf("can't write %s: %s", fileName, err) - } -} - -func main() { - in1, err := ioutil.ReadFile("syscall_darwin.go") - if err != nil { - log.Fatalf("can't open syscall_darwin.go: %s", err) - } - arch := os.Args[1] - in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch)) - if err != nil { - log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err) - } - in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch)) - if err != nil { - log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err) - } - in := string(in1) + string(in2) + string(in3) - - writeASMFile(in, fmt.Sprintf("zsyscall_darwin_%s.s", arch), "go1.12") - - in1, err = ioutil.ReadFile("syscall_darwin.1_13.go") - if err != nil { - log.Fatalf("can't open syscall_darwin.1_13.go: %s", err) - } - in2, err = ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.1_13.go", arch)) - if err != nil { - log.Fatalf("can't open zsyscall_darwin_%s.1_13.go: %s", arch, err) - } - - in = string(in1) + string(in2) - - writeASMFile(in, fmt.Sprintf("zsyscall_darwin_%s.1_13.s", arch), "go1.13") -} diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index b5462b85f..6ffac9250 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -186,6 +186,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -526,6 +527,7 @@ ccflags="$@" $2 ~ /^WDIOC_/ || $2 ~ /^NFN/ || $2 ~ /^XDP_/ || + $2 ~ /^RWF_/ || $2 ~ /^(HDIO|WIN|SMART)_/ || $2 ~ /^CRYPTO_/ || $2 ~ /^TIPC_/ || diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go deleted file mode 100644 index eb4332059..000000000 --- a/vendor/golang.org/x/sys/unix/mkpost.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// mkpost processes the output of cgo -godefs to -// modify the generated types. It is used to clean up -// the sys API in an architecture specific manner. -// -// mkpost is run after cgo -godefs; see README.md. -package main - -import ( - "bytes" - "fmt" - "go/format" - "io/ioutil" - "log" - "os" - "regexp" -) - -func main() { - // Get the OS and architecture (using GOARCH_TARGET if it exists) - goos := os.Getenv("GOOS") - goarch := os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check that we are using the Docker-based build system if we should be. - if goos == "linux" { - if os.Getenv("GOLANG_SYS_BUILD") != "docker" { - os.Stderr.WriteString("In the Docker-based build system, mkpost should not be called directly.\n") - os.Stderr.WriteString("See README.md\n") - os.Exit(1) - } - } - - b, err := ioutil.ReadAll(os.Stdin) - if err != nil { - log.Fatal(err) - } - - if goos == "aix" { - // Replace type of Atim, Mtim and Ctim by Timespec in Stat_t - // to avoid having both StTimespec and Timespec. - sttimespec := regexp.MustCompile(`_Ctype_struct_st_timespec`) - b = sttimespec.ReplaceAll(b, []byte("Timespec")) - } - - // Intentionally export __val fields in Fsid and Sigset_t - valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__(bits|val)(\s+\S+\s+)}`) - b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$4}")) - - // Intentionally export __fds_bits field in FdSet - fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`) - b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}")) - - // If we have empty Ptrace structs, we should delete them. Only s390x emits - // nonempty Ptrace structs. - ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`) - b = ptraceRexexp.ReplaceAll(b, nil) - - // Replace the control_regs union with a blank identifier for now. - controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`) - b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64")) - - // Remove fields that are added by glibc - // Note that this is unstable as the identifers are private. - removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`) - b = removeFieldsRegex.ReplaceAll(b, []byte("_")) - - // Convert [65]int8 to [65]byte in Utsname members to simplify - // conversion to string; see golang.org/issue/20753 - convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`) - b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte")) - - // Convert [1024]int8 to [1024]byte in Ptmget members - convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`) - b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte")) - - // Remove spare fields (e.g. in Statx_t) - spareFieldsRegex := regexp.MustCompile(`X__spare\S*`) - b = spareFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove cgo padding fields - removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`) - b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove padding, hidden, or unused fields - removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`) - b = removeFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove the first line of warning from cgo - b = b[bytes.IndexByte(b, '\n')+1:] - // Modify the command in the header to include: - // mkpost, our own warning, and a build tag. - replacement := fmt.Sprintf(`$1 | go run mkpost.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s,%s`, goarch, goos) - cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`) - b = cgoCommandRegex.ReplaceAll(b, []byte(replacement)) - - // Rename Stat_t time fields - if goos == "freebsd" && goarch == "386" { - // Hide Stat_t.[AMCB]tim_ext fields - renameStatTimeExtFieldsRegex := regexp.MustCompile(`[AMCB]tim_ext`) - b = renameStatTimeExtFieldsRegex.ReplaceAll(b, []byte("_")) - } - renameStatTimeFieldsRegex := regexp.MustCompile(`([AMCB])(?:irth)?time?(?:spec)?\s+(Timespec|StTimespec)`) - b = renameStatTimeFieldsRegex.ReplaceAll(b, []byte("${1}tim ${2}")) - - // gofmt - b, err = format.Source(b) - if err != nil { - log.Fatal(err) - } - - os.Stdout.Write(b) -} diff --git a/vendor/golang.org/x/sys/unix/mksyscall.go b/vendor/golang.org/x/sys/unix/mksyscall.go deleted file mode 100644 index 9e540cc89..000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall.go +++ /dev/null @@ -1,402 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_darwin.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named errno. - -A line beginning with //sysnb is like //sys, except that the -goroutine will not be suspended during the execution of the system -call. This must only be used for system calls which can never -block, as otherwise the system call could cause all goroutines to -hang. -*/ -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - plan9 = flag.Bool("plan9", false, "plan9") - openbsd = flag.Bool("openbsd", false, "openbsd") - netbsd = flag.Bool("netbsd", false, "netbsd") - dragonfly = flag.Bool("dragonfly", false, "dragonfly") - arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair - tags = flag.String("tags", "", "build tags") - filename = flag.String("output", "", "output file name (standard output if omitted)") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - // Get the OS and architecture (using GOARCH_TARGET if it exists) - goos := os.Getenv("GOOS") - if goos == "" { - fmt.Fprintln(os.Stderr, "GOOS not defined in environment") - os.Exit(1) - } - goarch := os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - - // Check that we are using the Docker-based build system if we should - if goos == "linux" { - if os.Getenv("GOLANG_SYS_BUILD") != "docker" { - fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n") - fmt.Fprintf(os.Stderr, "See README.md\n") - os.Exit(1) - } - } - - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - libc := false - if goos == "darwin" && (strings.Contains(buildTags(), ",go1.12") || strings.Contains(buildTags(), ",go1.13")) { - libc = true - } - trampolines := map[string]bool{} - - text := "" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, errno error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, sysname := f[2], f[3], f[4], f[5] - - // ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers. - if goos == "darwin" && !libc && funct == "ClockGettime" { - continue - } - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // Go function header. - outDecl := "" - if len(out) > 0 { - outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", ")) - } - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl) - - // Check if err return available - errvar := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - break - } - } - - // Prepare arguments to Syscall. - var args []string - n := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\tvar _p%d *byte\n", n) - text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name) - text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\tvar _p%d *byte\n", n) - text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass dummy pointer in that case. - // Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). - text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name) - text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n) - args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) - n++ - } else if p.Type == "int64" && (*openbsd || *netbsd) { - args = append(args, "0") - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else if endianness == "little-endian" { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } else if p.Type == "int64" && *dragonfly { - if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil { - args = append(args, "0") - } - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else if endianness == "little-endian" { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" { - if len(args)%2 == 1 && *arm { - // arm abi specifies 64-bit argument uses - // (even, odd) pair - args = append(args, "0") - } - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } - - // Determine which form to use; pad args with zeros. - asm := "Syscall" - if nonblock != nil { - if errvar == "" && goos == "linux" { - asm = "RawSyscallNoError" - } else { - asm = "RawSyscall" - } - } else { - if errvar == "" && goos == "linux" { - asm = "SyscallNoError" - } - } - if len(args) <= 3 { - for len(args) < 3 { - args = append(args, "0") - } - } else if len(args) <= 6 { - asm += "6" - for len(args) < 6 { - args = append(args, "0") - } - } else if len(args) <= 9 { - asm += "9" - for len(args) < 9 { - args = append(args, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct) - } - - // System call number. - if sysname == "" { - sysname = "SYS_" + funct - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToUpper(sysname) - } - - var libcFn string - if libc { - asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call - sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_ - sysname = strings.ToLower(sysname) // lowercase - libcFn = sysname - sysname = "funcPC(libc_" + sysname + "_trampoline)" - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist) - - // Assign return values. - body := "" - ret := []string{"_", "_", "_"} - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" && !*plan9 { - reg = "e1" - ret[2] = reg - doErrno = true - } else if p.Name == "err" && *plan9 { - ret[0] = "r0" - ret[2] = "e1" - break - } else { - reg = fmt.Sprintf("r%d", i) - ret[i] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%s != 0", reg) - } - if p.Type == "int64" && endianness != "" { - // 64-bit number in r1:r0 or r0:r1. - if i+2 > len(out) { - fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct) - } - if endianness == "big-endian" { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) - } else { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) - } - ret[i] = fmt.Sprintf("r%d", i) - ret[i+1] = fmt.Sprintf("r%d", i+1) - } - if reg != "e1" || *plan9 { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { - text += fmt.Sprintf("\t%s\n", call) - } else { - if errvar == "" && goos == "linux" { - // raw syscall without error on Linux, see golang.org/issue/22924 - text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call) - } else { - text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) - } - } - text += body - - if *plan9 && ret[2] == "e1" { - text += "\tif int32(r0) == -1 {\n" - text += "\t\terr = e1\n" - text += "\t}\n" - } else if doErrno { - text += "\tif e1 != 0 {\n" - text += "\t\terr = errnoErr(e1)\n" - text += "\t}\n" - } - text += "\treturn\n" - text += "}\n\n" - - if libc && !trampolines[libcFn] { - // some system calls share a trampoline, like read and readlen. - trampolines[libcFn] = true - // Declare assembly trampoline. - text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn) - // Assembly trampoline calls the libc_* function, which this magic - // redirects to use the function from libSystem. - text += fmt.Sprintf("//go:linkname libc_%s libc_%s\n", libcFn, libcFn) - text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn) - text += "\n" - } - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go deleted file mode 100644 index 3be3cdfc3..000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_aix.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt -*/ -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - aix = flag.Bool("aix", false, "aix") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_aix_ppc.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - text := "" - cExtern := "/*\n#include \n#include \n" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // Check if value return, err return available - errvar := "" - retvar := "" - rettype := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - } else { - retvar = p.Name - rettype = p.Type - } - } - - // System call name. - if sysname == "" { - sysname = funct - } - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - cRettype := "" - if rettype == "unsafe.Pointer" { - cRettype = "uintptr_t" - } else if rettype == "uintptr" { - cRettype = "uintptr_t" - } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { - cRettype = "uintptr_t" - } else if rettype == "int" { - cRettype = "int" - } else if rettype == "int32" { - cRettype = "int" - } else if rettype == "int64" { - cRettype = "long long" - } else if rettype == "uint32" { - cRettype = "unsigned int" - } else if rettype == "uint64" { - cRettype = "unsigned long long" - } else { - cRettype = "int" - } - if sysname == "exit" { - cRettype = "void" - } - - // Change p.Types to c - var cIn []string - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "string" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t", "size_t") - } else if p.Type == "unsafe.Pointer" { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "uintptr" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "int" { - cIn = append(cIn, "int") - } else if p.Type == "int32" { - cIn = append(cIn, "int") - } else if p.Type == "int64" { - cIn = append(cIn, "long long") - } else if p.Type == "uint32" { - cIn = append(cIn, "unsigned int") - } else if p.Type == "uint64" { - cIn = append(cIn, "unsigned long long") - } else { - cIn = append(cIn, "int") - } - } - - if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" { - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - cExtern += "#define c_select select\n" - } - // Imports of system calls from libc - cExtern += fmt.Sprintf("%s %s", cRettype, sysname) - cIn := strings.Join(cIn, ", ") - cExtern += fmt.Sprintf("(%s);\n", cIn) - } - - // So file name. - if *aix { - if modname == "" { - modname = "libc.a/shr_64.o" - } else { - fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) - os.Exit(1) - } - } - - strconvfunc := "C.CString" - - // Go function header. - if outps != "" { - outps = fmt.Sprintf(" (%s)", outps) - } - if text != "" { - text += "\n" - } - - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) - - // Prepare arguments to Syscall. - var args []string - n := 0 - argN := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "C.uintptr_t(uintptr(unsafe.Pointer("+p.Name+")))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - text += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(unsafe.Pointer(_p%d)))", n)) - n++ - text += fmt.Sprintf("\tvar _p%d int\n", n) - text += fmt.Sprintf("\t_p%d = len(%s)\n", n, p.Name) - args = append(args, fmt.Sprintf("C.size_t(_p%d)", n)) - n++ - } else if p.Type == "int64" && endianness != "" { - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - n++ - } else if p.Type == "bool" { - text += fmt.Sprintf("\tvar _p%d uint32\n", n) - text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) - args = append(args, fmt.Sprintf("_p%d", n)) - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) - } else if p.Type == "unsafe.Pointer" { - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) - } else if p.Type == "int" { - if (argN == 2) && ((funct == "readlen") || (funct == "writelen")) { - args = append(args, fmt.Sprintf("C.size_t(%s)", p.Name)) - } else if argN == 0 && funct == "fcntl" { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if (argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt")) { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } - } else if p.Type == "int32" { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } else if p.Type == "int64" { - args = append(args, fmt.Sprintf("C.longlong(%s)", p.Name)) - } else if p.Type == "uint32" { - args = append(args, fmt.Sprintf("C.uint(%s)", p.Name)) - } else if p.Type == "uint64" { - args = append(args, fmt.Sprintf("C.ulonglong(%s)", p.Name)) - } else if p.Type == "uintptr" { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } - argN++ - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := "" - if sysname == "exit" { - if errvar != "" { - call += "er :=" - } else { - call += "" - } - } else if errvar != "" { - call += "r0,er :=" - } else if retvar != "" { - call += "r0,_ :=" - } else { - call += "" - } - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - call += fmt.Sprintf("C.c_%s(%s)", sysname, arglist) - } else { - call += fmt.Sprintf("C.%s(%s)", sysname, arglist) - } - - // Assign return values. - body := "" - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - } else { - reg = "r0" - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - - // verify return - if sysname != "exit" && errvar != "" { - if regexp.MustCompile(`^uintptr`).FindStringSubmatch(cRettype) != nil { - body += "\tif (uintptr(r0) ==^uintptr(0) && er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } else { - body += "\tif (r0 ==-1 && er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } - } else if errvar != "" { - body += "\tif (er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } - - text += fmt.Sprintf("\t%s\n", call) - text += body - - text += "\treturn\n" - text += "}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, cExtern, imp, text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - - -%s -*/ -import "C" -import ( - "unsafe" -) - - -%s - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go deleted file mode 100644 index c96009951..000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_aix.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt - - -This program will generate three files and handle both gc and gccgo implementation: - - zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation) - - zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6 - - zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type. - - The generated code looks like this - -zsyscall_aix_ppc64.go -func asyscall(...) (n int, err error) { - // Pointer Creation - r1, e1 := callasyscall(...) - // Type Conversion - // Error Handler - return -} - -zsyscall_aix_ppc64_gc.go -//go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o" -//go:linkname libc_asyscall libc_asyscall -var asyscall syscallFunc - -func callasyscall(...) (r1 uintptr, e1 Errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... ) - return -} - -zsyscall_aix_ppc64_ggcgo.go - -// int asyscall(...) - -import "C" - -func callasyscall(...) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.asyscall(...)) - e1 = syscall.GetErrno() - return -} -*/ - -package main - -import ( - "bufio" - "flag" - "fmt" - "io/ioutil" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - aix = flag.Bool("aix", false, "aix") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_aix_ppc64.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - // GCCGO - textgccgo := "" - cExtern := "/*\n#include \n" - // GC - textgc := "" - dynimports := "" - linknames := "" - var vars []string - // COMMON - textcommon := "" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - if sysname == "" { - sysname = funct - } - - onlyCommon := false - if funct == "readlen" || funct == "writelen" || funct == "FcntlInt" || funct == "FcntlFlock" { - // This function call another syscall which is already implemented. - // Therefore, the gc and gccgo part must not be generated. - onlyCommon = true - } - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - - textcommon += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - if !onlyCommon { - textgccgo += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - textgc += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - } - - // Check if value return, err return available - errvar := "" - rettype := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - } else { - rettype = p.Type - } - } - - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - // GCCGO Prototype return type - cRettype := "" - if rettype == "unsafe.Pointer" { - cRettype = "uintptr_t" - } else if rettype == "uintptr" { - cRettype = "uintptr_t" - } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { - cRettype = "uintptr_t" - } else if rettype == "int" { - cRettype = "int" - } else if rettype == "int32" { - cRettype = "int" - } else if rettype == "int64" { - cRettype = "long long" - } else if rettype == "uint32" { - cRettype = "unsigned int" - } else if rettype == "uint64" { - cRettype = "unsigned long long" - } else { - cRettype = "int" - } - if sysname == "exit" { - cRettype = "void" - } - - // GCCGO Prototype arguments type - var cIn []string - for i, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "string" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t", "size_t") - } else if p.Type == "unsafe.Pointer" { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "uintptr" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "int" { - if (i == 0 || i == 2) && funct == "fcntl" { - // These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock - cIn = append(cIn, "uintptr_t") - } else { - cIn = append(cIn, "int") - } - - } else if p.Type == "int32" { - cIn = append(cIn, "int") - } else if p.Type == "int64" { - cIn = append(cIn, "long long") - } else if p.Type == "uint32" { - cIn = append(cIn, "unsigned int") - } else if p.Type == "uint64" { - cIn = append(cIn, "unsigned long long") - } else { - cIn = append(cIn, "int") - } - } - - if !onlyCommon { - // GCCGO Prototype Generation - // Imports of system calls from libc - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - cExtern += "#define c_select select\n" - } - cExtern += fmt.Sprintf("%s %s", cRettype, sysname) - cIn := strings.Join(cIn, ", ") - cExtern += fmt.Sprintf("(%s);\n", cIn) - } - // GC Library name - if modname == "" { - modname = "libc.a/shr_64.o" - } else { - fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) - os.Exit(1) - } - sysvarname := fmt.Sprintf("libc_%s", sysname) - - if !onlyCommon { - // GC Runtime import of function to allow cross-platform builds. - dynimports += fmt.Sprintf("//go:cgo_import_dynamic %s %s \"%s\"\n", sysvarname, sysname, modname) - // GC Link symbol to proc address variable. - linknames += fmt.Sprintf("//go:linkname %s %s\n", sysvarname, sysvarname) - // GC Library proc address variable. - vars = append(vars, sysvarname) - } - - strconvfunc := "BytePtrFromString" - strconvtype := "*byte" - - // Go function header. - if outps != "" { - outps = fmt.Sprintf(" (%s)", outps) - } - if textcommon != "" { - textcommon += "\n" - } - - textcommon += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) - - // Prepare arguments tocall. - var argscommon []string // Arguments in the common part - var argscall []string // Arguments for call prototype - var argsgc []string // Arguments for gc call (with syscall6) - var argsgccgo []string // Arguments for gccgo call (with C.name_of_syscall) - n := 0 - argN := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if p.Type == "string" && errvar != "" { - textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr ", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - textcommon += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) - textcommon += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("len(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n), fmt.Sprintf("_lenp%d int", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n), fmt.Sprintf("uintptr(_lenp%d)", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n), fmt.Sprintf("C.size_t(_lenp%d)", n)) - n++ - } else if p.Type == "int64" && endianness != "" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses int64 with 32 bits mode. Case not yet implemented\n") - } else if p.Type == "bool" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses bool. Case not yet implemented\n") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil || p.Type == "unsafe.Pointer" { - argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if p.Type == "int" { - if (argN == 0 || argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt") || (funct == "FcntlFlock")) { - // These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock - argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - - } else { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } - } else if p.Type == "int32" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int32", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } else if p.Type == "int64" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int64", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.longlong(%s)", p.Name)) - } else if p.Type == "uint32" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uint32", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uint(%s)", p.Name)) - } else if p.Type == "uint64" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uint64", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.ulonglong(%s)", p.Name)) - } else if p.Type == "uintptr" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - argscommon = append(argscommon, fmt.Sprintf("int(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } - argN++ - } - nargs := len(argsgc) - - // COMMON function generation - argscommonlist := strings.Join(argscommon, ", ") - callcommon := fmt.Sprintf("call%s(%s)", sysname, argscommonlist) - ret := []string{"_", "_"} - body := "" - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - ret[1] = reg - doErrno = true - } else { - reg = "r0" - ret[0] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%s != 0", reg) - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" { - textcommon += fmt.Sprintf("\t%s\n", callcommon) - } else { - textcommon += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], callcommon) - } - textcommon += body - - if doErrno { - textcommon += "\tif e1 != 0 {\n" - textcommon += "\t\terr = errnoErr(e1)\n" - textcommon += "\t}\n" - } - textcommon += "\treturn\n" - textcommon += "}\n" - - if onlyCommon { - continue - } - - // CALL Prototype - callProto := fmt.Sprintf("func call%s(%s) (r1 uintptr, e1 Errno) {\n", sysname, strings.Join(argscall, ", ")) - - // GC function generation - asm := "syscall6" - if nonblock != nil { - asm = "rawSyscall6" - } - - if len(argsgc) <= 6 { - for len(argsgc) < 6 { - argsgc = append(argsgc, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s: too many arguments to system call", funct) - os.Exit(1) - } - argsgclist := strings.Join(argsgc, ", ") - callgc := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, argsgclist) - - textgc += callProto - textgc += fmt.Sprintf("\tr1, _, e1 = %s\n", callgc) - textgc += "\treturn\n}\n" - - // GCCGO function generation - argsgccgolist := strings.Join(argsgccgo, ", ") - var callgccgo string - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - callgccgo = fmt.Sprintf("C.c_%s(%s)", sysname, argsgccgolist) - } else { - callgccgo = fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist) - } - textgccgo += callProto - textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo) - textgccgo += "\te1 = syscall.GetErrno()\n" - textgccgo += "\treturn\n}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - - // Print zsyscall_aix_ppc64.go - err := ioutil.WriteFile("zsyscall_aix_ppc64.go", - []byte(fmt.Sprintf(srcTemplate1, cmdLine(), buildTags(), pack, imp, textcommon)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - - // Print zsyscall_aix_ppc64_gc.go - vardecls := "\t" + strings.Join(vars, ",\n\t") - vardecls += " syscallFunc" - err = ioutil.WriteFile("zsyscall_aix_ppc64_gc.go", - []byte(fmt.Sprintf(srcTemplate2, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, textgc)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - - // Print zsyscall_aix_ppc64_gccgo.go - err = ioutil.WriteFile("zsyscall_aix_ppc64_gccgo.go", - []byte(fmt.Sprintf(srcTemplate3, cmdLine(), buildTags(), pack, cExtern, imp, textgccgo)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } -} - -const srcTemplate1 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - -import ( - "unsafe" -) - - -%s - -%s -` -const srcTemplate2 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s -// +build !gccgo - -package %s - -import ( - "unsafe" -) -%s -%s -%s -type syscallFunc uintptr - -var ( -%s -) - -// Implemented in runtime/syscall_aix.go. -func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) - -%s -` -const srcTemplate3 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s -// +build gccgo - -package %s - -%s -*/ -import "C" -import ( - "syscall" -) - - -%s - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go b/vendor/golang.org/x/sys/unix/mksyscall_solaris.go deleted file mode 100644 index 3d864738b..000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* - This program reads a file containing function prototypes - (like syscall_solaris.go) and generates system call bodies. - The prototypes are marked by lines beginning with "//sys" - and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt -*/ - -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - text := "" - dynimports := "" - linknames := "" - var vars []string - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // So file name. - if modname == "" { - modname = "libc" - } - - // System call name. - if sysname == "" { - sysname = funct - } - - // System call pointer variable name. - sysvarname := fmt.Sprintf("proc%s", sysname) - - strconvfunc := "BytePtrFromString" - strconvtype := "*byte" - - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - // Runtime import of function to allow cross-platform builds. - dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname) - // Link symbol to proc address variable. - linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname) - // Library proc address variable. - vars = append(vars, sysvarname) - - // Go function header. - outlist := strings.Join(out, ", ") - if outlist != "" { - outlist = fmt.Sprintf(" (%s)", outlist) - } - if text != "" { - text += "\n" - } - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist) - - // Check if err return available - errvar := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - continue - } - } - - // Prepare arguments to Syscall. - var args []string - n := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1]) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) - n++ - } else if p.Type == "int64" && endianness != "" { - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - } else if p.Type == "bool" { - text += fmt.Sprintf("\tvar _p%d uint32\n", n) - text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) - args = append(args, fmt.Sprintf("uintptr(_p%d)", n)) - n++ - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } - nargs := len(args) - - // Determine which form to use; pad args with zeros. - asm := "sysvicall6" - if nonblock != nil { - asm = "rawSysvicall6" - } - if len(args) <= 6 { - for len(args) < 6 { - args = append(args, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path) - os.Exit(1) - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist) - - // Assign return values. - body := "" - ret := []string{"_", "_", "_"} - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - ret[2] = reg - doErrno = true - } else { - reg = fmt.Sprintf("r%d", i) - ret[i] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%d != 0", reg) - } - if p.Type == "int64" && endianness != "" { - // 64-bit number in r1:r0 or r0:r1. - if i+2 > len(out) { - fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path) - os.Exit(1) - } - if endianness == "big-endian" { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) - } else { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) - } - ret[i] = fmt.Sprintf("r%d", i) - ret[i+1] = fmt.Sprintf("r%d", i+1) - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { - text += fmt.Sprintf("\t%s\n", call) - } else { - text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) - } - text += body - - if doErrno { - text += "\tif e1 != 0 {\n" - text += "\t\terr = e1\n" - text += "\t}\n" - } - text += "\treturn\n" - text += "}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - vardecls := "\t" + strings.Join(vars, ",\n\t") - vardecls += " syscallFunc" - fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - -import ( - "syscall" - "unsafe" -) -%s -%s -%s -var ( -%s -) - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go deleted file mode 100644 index b6b409909..000000000 --- a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go +++ /dev/null @@ -1,355 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Parse the header files for OpenBSD and generate a Go usable sysctl MIB. -// -// Build a MIB with each entry being an array containing the level, type and -// a hash that will contain additional entries if the current entry is a node. -// We then walk this MIB and create a flattened sysctl name to OID hash. - -package main - -import ( - "bufio" - "fmt" - "os" - "path/filepath" - "regexp" - "sort" - "strings" -) - -var ( - goos, goarch string -) - -// cmdLine returns this programs's commandline arguments. -func cmdLine() string { - return "go run mksysctl_openbsd.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags. -func buildTags() string { - return fmt.Sprintf("%s,%s", goarch, goos) -} - -// reMatch performs regular expression match and stores the substring slice to value pointed by m. -func reMatch(re *regexp.Regexp, str string, m *[]string) bool { - *m = re.FindStringSubmatch(str) - if *m != nil { - return true - } - return false -} - -type nodeElement struct { - n int - t string - pE *map[string]nodeElement -} - -var ( - debugEnabled bool - mib map[string]nodeElement - node *map[string]nodeElement - nodeMap map[string]string - sysCtl []string -) - -var ( - ctlNames1RE = regexp.MustCompile(`^#define\s+(CTL_NAMES)\s+{`) - ctlNames2RE = regexp.MustCompile(`^#define\s+(CTL_(.*)_NAMES)\s+{`) - ctlNames3RE = regexp.MustCompile(`^#define\s+((.*)CTL_NAMES)\s+{`) - netInetRE = regexp.MustCompile(`^netinet/`) - netInet6RE = regexp.MustCompile(`^netinet6/`) - netRE = regexp.MustCompile(`^net/`) - bracesRE = regexp.MustCompile(`{.*}`) - ctlTypeRE = regexp.MustCompile(`{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}`) - fsNetKernRE = regexp.MustCompile(`^(fs|net|kern)_`) -) - -func debug(s string) { - if debugEnabled { - fmt.Fprintln(os.Stderr, s) - } -} - -// Walk the MIB and build a sysctl name to OID mapping. -func buildSysctl(pNode *map[string]nodeElement, name string, oid []int) { - lNode := pNode // local copy of pointer to node - var keys []string - for k := range *lNode { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, key := range keys { - nodename := name - if name != "" { - nodename += "." - } - nodename += key - - nodeoid := append(oid, (*pNode)[key].n) - - if (*pNode)[key].t == `CTLTYPE_NODE` { - if _, ok := nodeMap[nodename]; ok { - lNode = &mib - ctlName := nodeMap[nodename] - for _, part := range strings.Split(ctlName, ".") { - lNode = ((*lNode)[part]).pE - } - } else { - lNode = (*pNode)[key].pE - } - buildSysctl(lNode, nodename, nodeoid) - } else if (*pNode)[key].t != "" { - oidStr := []string{} - for j := range nodeoid { - oidStr = append(oidStr, fmt.Sprintf("%d", nodeoid[j])) - } - text := "\t{ \"" + nodename + "\", []_C_int{ " + strings.Join(oidStr, ", ") + " } }, \n" - sysCtl = append(sysCtl, text) - } - } -} - -func main() { - // Get the OS (using GOOS_TARGET if it exist) - goos = os.Getenv("GOOS_TARGET") - if goos == "" { - goos = os.Getenv("GOOS") - } - // Get the architecture (using GOARCH_TARGET if it exists) - goarch = os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check if GOOS and GOARCH environment variables are defined - if goarch == "" || goos == "" { - fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") - os.Exit(1) - } - - mib = make(map[string]nodeElement) - headers := [...]string{ - `sys/sysctl.h`, - `sys/socket.h`, - `sys/tty.h`, - `sys/malloc.h`, - `sys/mount.h`, - `sys/namei.h`, - `sys/sem.h`, - `sys/shm.h`, - `sys/vmmeter.h`, - `uvm/uvmexp.h`, - `uvm/uvm_param.h`, - `uvm/uvm_swap_encrypt.h`, - `ddb/db_var.h`, - `net/if.h`, - `net/if_pfsync.h`, - `net/pipex.h`, - `netinet/in.h`, - `netinet/icmp_var.h`, - `netinet/igmp_var.h`, - `netinet/ip_ah.h`, - `netinet/ip_carp.h`, - `netinet/ip_divert.h`, - `netinet/ip_esp.h`, - `netinet/ip_ether.h`, - `netinet/ip_gre.h`, - `netinet/ip_ipcomp.h`, - `netinet/ip_ipip.h`, - `netinet/pim_var.h`, - `netinet/tcp_var.h`, - `netinet/udp_var.h`, - `netinet6/in6.h`, - `netinet6/ip6_divert.h`, - `netinet6/pim6_var.h`, - `netinet/icmp6.h`, - `netmpls/mpls.h`, - } - - ctls := [...]string{ - `kern`, - `vm`, - `fs`, - `net`, - //debug /* Special handling required */ - `hw`, - //machdep /* Arch specific */ - `user`, - `ddb`, - //vfs /* Special handling required */ - `fs.posix`, - `kern.forkstat`, - `kern.intrcnt`, - `kern.malloc`, - `kern.nchstats`, - `kern.seminfo`, - `kern.shminfo`, - `kern.timecounter`, - `kern.tty`, - `kern.watchdog`, - `net.bpf`, - `net.ifq`, - `net.inet`, - `net.inet.ah`, - `net.inet.carp`, - `net.inet.divert`, - `net.inet.esp`, - `net.inet.etherip`, - `net.inet.gre`, - `net.inet.icmp`, - `net.inet.igmp`, - `net.inet.ip`, - `net.inet.ip.ifq`, - `net.inet.ipcomp`, - `net.inet.ipip`, - `net.inet.mobileip`, - `net.inet.pfsync`, - `net.inet.pim`, - `net.inet.tcp`, - `net.inet.udp`, - `net.inet6`, - `net.inet6.divert`, - `net.inet6.ip6`, - `net.inet6.icmp6`, - `net.inet6.pim6`, - `net.inet6.tcp6`, - `net.inet6.udp6`, - `net.mpls`, - `net.mpls.ifq`, - `net.key`, - `net.pflow`, - `net.pfsync`, - `net.pipex`, - `net.rt`, - `vm.swapencrypt`, - //vfsgenctl /* Special handling required */ - } - - // Node name "fixups" - ctlMap := map[string]string{ - "ipproto": "net.inet", - "net.inet.ipproto": "net.inet", - "net.inet6.ipv6proto": "net.inet6", - "net.inet6.ipv6": "net.inet6.ip6", - "net.inet.icmpv6": "net.inet6.icmp6", - "net.inet6.divert6": "net.inet6.divert", - "net.inet6.tcp6": "net.inet.tcp", - "net.inet6.udp6": "net.inet.udp", - "mpls": "net.mpls", - "swpenc": "vm.swapencrypt", - } - - // Node mappings - nodeMap = map[string]string{ - "net.inet.ip.ifq": "net.ifq", - "net.inet.pfsync": "net.pfsync", - "net.mpls.ifq": "net.ifq", - } - - mCtls := make(map[string]bool) - for _, ctl := range ctls { - mCtls[ctl] = true - } - - for _, header := range headers { - debug("Processing " + header) - file, err := os.Open(filepath.Join("/usr/include", header)) - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - var sub []string - if reMatch(ctlNames1RE, s.Text(), &sub) || - reMatch(ctlNames2RE, s.Text(), &sub) || - reMatch(ctlNames3RE, s.Text(), &sub) { - if sub[1] == `CTL_NAMES` { - // Top level. - node = &mib - } else { - // Node. - nodename := strings.ToLower(sub[2]) - ctlName := "" - if reMatch(netInetRE, header, &sub) { - ctlName = "net.inet." + nodename - } else if reMatch(netInet6RE, header, &sub) { - ctlName = "net.inet6." + nodename - } else if reMatch(netRE, header, &sub) { - ctlName = "net." + nodename - } else { - ctlName = nodename - ctlName = fsNetKernRE.ReplaceAllString(ctlName, `$1.`) - } - - if val, ok := ctlMap[ctlName]; ok { - ctlName = val - } - if _, ok := mCtls[ctlName]; !ok { - debug("Ignoring " + ctlName + "...") - continue - } - - // Walk down from the top of the MIB. - node = &mib - for _, part := range strings.Split(ctlName, ".") { - if _, ok := (*node)[part]; !ok { - debug("Missing node " + part) - (*node)[part] = nodeElement{n: 0, t: "", pE: &map[string]nodeElement{}} - } - node = (*node)[part].pE - } - } - - // Populate current node with entries. - i := -1 - for !strings.HasPrefix(s.Text(), "}") { - s.Scan() - if reMatch(bracesRE, s.Text(), &sub) { - i++ - } - if !reMatch(ctlTypeRE, s.Text(), &sub) { - continue - } - (*node)[sub[1]] = nodeElement{n: i, t: sub[2], pE: &map[string]nodeElement{}} - } - } - } - err = s.Err() - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } - file.Close() - } - buildSysctl(&mib, "", []int{}) - - sort.Strings(sysCtl) - text := strings.Join(sysCtl, "") - - fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) -} - -const srcTemplate = `// %s -// Code generated by the command above; DO NOT EDIT. - -// +build %s - -package unix - -type mibentry struct { - ctlname string - ctloid []_C_int -} - -var sysctlMib = []mibentry { -%s -} -` diff --git a/vendor/golang.org/x/sys/unix/mksysnum.go b/vendor/golang.org/x/sys/unix/mksysnum.go deleted file mode 100644 index baa6ecd85..000000000 --- a/vendor/golang.org/x/sys/unix/mksysnum.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Generate system call table for DragonFly, NetBSD, -// FreeBSD, OpenBSD or Darwin from master list -// (for example, /usr/src/sys/kern/syscalls.master or -// sys/syscall.h). -package main - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "regexp" - "strings" -) - -var ( - goos, goarch string -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksysnum.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return fmt.Sprintf("%s,%s", goarch, goos) -} - -func checkErr(err error) { - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } -} - -// source string and substring slice for regexp -type re struct { - str string // source string - sub []string // matched sub-string -} - -// Match performs regular expression match -func (r *re) Match(exp string) bool { - r.sub = regexp.MustCompile(exp).FindStringSubmatch(r.str) - if r.sub != nil { - return true - } - return false -} - -// fetchFile fetches a text file from URL -func fetchFile(URL string) io.Reader { - resp, err := http.Get(URL) - checkErr(err) - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - checkErr(err) - return strings.NewReader(string(body)) -} - -// readFile reads a text file from path -func readFile(path string) io.Reader { - file, err := os.Open(os.Args[1]) - checkErr(err) - return file -} - -func format(name, num, proto string) string { - name = strings.ToUpper(name) - // There are multiple entries for enosys and nosys, so comment them out. - nm := re{str: name} - if nm.Match(`^SYS_E?NOSYS$`) { - name = fmt.Sprintf("// %s", name) - } - if name == `SYS_SYS_EXIT` { - name = `SYS_EXIT` - } - return fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) -} - -func main() { - // Get the OS (using GOOS_TARGET if it exist) - goos = os.Getenv("GOOS_TARGET") - if goos == "" { - goos = os.Getenv("GOOS") - } - // Get the architecture (using GOARCH_TARGET if it exists) - goarch = os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check if GOOS and GOARCH environment variables are defined - if goarch == "" || goos == "" { - fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") - os.Exit(1) - } - - file := strings.TrimSpace(os.Args[1]) - var syscalls io.Reader - if strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "http://") { - // Download syscalls.master file - syscalls = fetchFile(file) - } else { - syscalls = readFile(file) - } - - var text, line string - s := bufio.NewScanner(syscalls) - for s.Scan() { - t := re{str: line} - if t.Match(`^(.*)\\$`) { - // Handle continuation - line = t.sub[1] - line += strings.TrimLeft(s.Text(), " \t") - } else { - // New line - line = s.Text() - } - t = re{str: line} - if t.Match(`\\$`) { - continue - } - t = re{str: line} - - switch goos { - case "dragonfly": - if t.Match(`^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$`) { - num, proto := t.sub[1], t.sub[2] - name := fmt.Sprintf("SYS_%s", t.sub[3]) - text += format(name, num, proto) - } - case "freebsd": - if t.Match(`^([0-9]+)\s+\S+\s+(?:(?:NO)?STD|COMPAT10)\s+({ \S+\s+(\w+).*)$`) { - num, proto := t.sub[1], t.sub[2] - name := fmt.Sprintf("SYS_%s", t.sub[3]) - text += format(name, num, proto) - } - case "openbsd": - if t.Match(`^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$`) { - num, proto, name := t.sub[1], t.sub[3], t.sub[4] - text += format(name, num, proto) - } - case "netbsd": - if t.Match(`^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$`) { - num, proto, compat := t.sub[1], t.sub[6], t.sub[8] - name := t.sub[7] + "_" + t.sub[9] - if t.sub[11] != "" { - name = t.sub[7] + "_" + t.sub[11] - } - name = strings.ToUpper(name) - if compat == "" || compat == "13" || compat == "30" || compat == "50" { - text += fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) - } - } - case "darwin": - if t.Match(`^#define\s+SYS_(\w+)\s+([0-9]+)`) { - name, num := t.sub[1], t.sub[2] - name = strings.ToUpper(name) - text += fmt.Sprintf(" SYS_%s = %s;\n", name, num) - } - default: - fmt.Fprintf(os.Stderr, "unrecognized GOOS=%s\n", goos) - os.Exit(1) - - } - } - err := s.Err() - checkErr(err) - - fmt.Printf(template, cmdLine(), buildTags(), text) -} - -const template = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package unix - -const( -%s)` diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index d52bcc41c..c70405491 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -577,8 +577,6 @@ func Futimes(fd int, tv []Timeval) error { return futimes(fd, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } -//sys fcntl(fd int, cmd int, arg int) (val int, err error) - //sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 0a1cc74b3..3440c52e5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -333,6 +333,8 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) error { * Wrapped */ +//sys fcntl(fd int, cmd int, arg int) (val int, err error) + //sys kill(pid int, signum int, posix int) (err error) func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(signum), 1) } diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go index c81510da2..0e3f25aca 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin,386,!go1.12 +// +build darwin,arm,!go1.12 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 26903bca8..b8837fcc6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1575,7 +1575,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Fchdir(fd int) (err error) //sys Fchmod(fd int, mode uint32) (err error) //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) -//sys fcntl(fd int, cmd int, arg int) (val int, err error) //sys Fdatasync(fd int) (err error) //sys Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) //sys FinitModule(fd int, params string, flags int) (err error) @@ -1631,6 +1630,17 @@ func Getpgrp() (pid int) { //sysnb Settimeofday(tv *Timeval) (err error) //sys Setns(fd int, nstype int) (err error) +// PrctlRetInt performs a prctl operation specified by option and further +// optional arguments arg2 through arg5 depending on option. It returns a +// non-negative integer that is returned by the prctl syscall. +func PrctlRetInt(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (int, error) { + ret, _, err := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if err != 0 { + return 0, err + } + return int(ret), nil +} + // issue 1435. // On linux Setuid and Setgid only affects the current thread, not the process. // This does not match what most callers expect so we must return an error @@ -1666,6 +1676,123 @@ func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { //sys exitThread(code int) (err error) = SYS_EXIT //sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ //sys writelen(fd int, p *byte, np int) (n int, err error) = SYS_WRITE +//sys readv(fd int, iovs []Iovec) (n int, err error) = SYS_READV +//sys writev(fd int, iovs []Iovec) (n int, err error) = SYS_WRITEV +//sys preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) = SYS_PREADV +//sys pwritev(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) = SYS_PWRITEV +//sys preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PREADV2 +//sys pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PWRITEV2 + +func bytes2iovec(bs [][]byte) []Iovec { + iovecs := make([]Iovec, len(bs)) + for i, b := range bs { + iovecs[i].SetLen(len(b)) + if len(b) > 0 { + iovecs[i].Base = &b[0] + } else { + iovecs[i].Base = (*byte)(unsafe.Pointer(&_zero)) + } + } + return iovecs +} + +// offs2lohi splits offs into its lower and upper unsigned long. On 64-bit +// systems, hi will always be 0. On 32-bit systems, offs will be split in half. +// preadv/pwritev chose this calling convention so they don't need to add a +// padding-register for alignment on ARM. +func offs2lohi(offs int64) (lo, hi uintptr) { + return uintptr(offs), uintptr(uint64(offs) >> SizeofLong) +} + +func Readv(fd int, iovs [][]byte) (n int, err error) { + iovecs := bytes2iovec(iovs) + n, err = readv(fd, iovecs) + readvRacedetect(iovecs, n, err) + return n, err +} + +func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { + iovecs := bytes2iovec(iovs) + lo, hi := offs2lohi(offset) + n, err = preadv(fd, iovecs, lo, hi) + readvRacedetect(iovecs, n, err) + return n, err +} + +func Preadv2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) { + iovecs := bytes2iovec(iovs) + lo, hi := offs2lohi(offset) + n, err = preadv2(fd, iovecs, lo, hi, flags) + readvRacedetect(iovecs, n, err) + return n, err +} + +func readvRacedetect(iovecs []Iovec, n int, err error) { + if !raceenabled { + return + } + for i := 0; n > 0 && i < len(iovecs); i++ { + m := int(iovecs[i].Len) + if m > n { + m = n + } + n -= m + if m > 0 { + raceWriteRange(unsafe.Pointer(iovecs[i].Base), m) + } + } + if err == nil { + raceAcquire(unsafe.Pointer(&ioSync)) + } +} + +func Writev(fd int, iovs [][]byte) (n int, err error) { + iovecs := bytes2iovec(iovs) + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = writev(fd, iovecs) + writevRacedetect(iovecs, n) + return n, err +} + +func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { + iovecs := bytes2iovec(iovs) + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + lo, hi := offs2lohi(offset) + n, err = pwritev(fd, iovecs, lo, hi) + writevRacedetect(iovecs, n) + return n, err +} + +func Pwritev2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) { + iovecs := bytes2iovec(iovs) + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + lo, hi := offs2lohi(offset) + n, err = pwritev2(fd, iovecs, lo, hi, flags) + writevRacedetect(iovecs, n) + return n, err +} + +func writevRacedetect(iovecs []Iovec, n int) { + if !raceenabled { + return + } + for i := 0; n > 0 && i < len(iovecs); i++ { + m := int(iovecs[i].Len) + if m > n { + m = n + } + n -= m + if m > 0 { + raceReadRange(unsafe.Pointer(iovecs[i].Base), m) + } + } +} // mmap varies by architecture; see syscall_linux_*.go. //sys munmap(addr uintptr, length uintptr) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 211131d9c..6135d383c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -249,6 +249,14 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return sendfile(outfd, infd, offset, count) } +func Fstatvfs(fd int, buf *Statvfs_t) (err error) { + return Fstatvfs1(fd, buf, ST_WAIT) +} + +func Statvfs(path string, buf *Statvfs_t) (err error) { + return Statvfs1(path, buf, ST_WAIT) +} + /* * Exposed directly */ @@ -287,6 +295,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) +//sys Fstatvfs1(fd int, buf *Statvfs_t) (err error) = SYS_FSTATVFS1 //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) //sysnb Getegid() (egid int) @@ -343,6 +352,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) //sys Stat(path string, stat *Stat_t) (err error) +//sys Statvfs1(path string, buf *Statvfs_t) (err error) = SYS_STATVFS1 //sys Symlink(path string, link string) (err error) //sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) //sys Sync() (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 92ed67de0..5ca0df2bd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -352,7 +352,6 @@ func Uname(uname *Utsname) error { // clock_settime // closefrom // execve -// fcntl // fhopen // fhstat // fhstatfs diff --git a/vendor/golang.org/x/sys/unix/types_aix.go b/vendor/golang.org/x/sys/unix/types_aix.go deleted file mode 100644 index 40d2beede..000000000 --- a/vendor/golang.org/x/sys/unix/types_aix.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore -// +build aix - -/* -Input to cgo -godefs. See also mkerrors.sh and mkall.sh -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include - - -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong - PathMax = C.PATH_MAX -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -type off64 C.off64_t -type off C.off_t -type Mode_t C.mode_t - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -type Timex C.struct_timex - -type Time_t C.time_t - -type Tms C.struct_tms - -type Utimbuf C.struct_utimbuf - -type Timezone C.struct_timezone - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit64 - -type Pid_t C.pid_t - -type _Gid_t C.gid_t - -type dev_t C.dev_t - -// Files - -type Stat_t C.struct_stat - -type StatxTimestamp C.struct_statx_timestamp - -type Statx_t C.struct_statx - -type Dirent C.struct_dirent - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Cmsghdr C.struct_cmsghdr - -type ICMPv6Filter C.struct_icmp6_filter - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type Linger C.struct_linger - -type Msghdr C.struct_msghdr - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr -) - -type IfMsgHdr C.struct_if_msghdr - -// Misc - -type FdSet C.fd_set - -type Utsname C.struct_utsname - -type Ustat_t C.struct_ustat - -type Sigset_t C.sigset_t - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// Terminal handling - -type Termios C.struct_termios - -type Termio C.struct_termio - -type Winsize C.struct_winsize - -//poll - -type PollFd struct { - Fd int32 - Events uint16 - Revents uint16 -} - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -//flock_t - -type Flock_t C.struct_flock64 - -// Statfs - -type Fsid_t C.struct_fsid_t -type Fsid64_t C.struct_fsid64_t - -type Statfs_t C.struct_statfs - -const RNDGETENTCNT = 0x80045200 diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go deleted file mode 100644 index 155c2e692..000000000 --- a/vendor/golang.org/x/sys/unix/types_darwin.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define __DARWIN_UNIX03 0 -#define KERNEL -#define _DARWIN_USE_64_BIT_INODE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat64 - -type Statfs_t C.struct_statfs64 - -type Flock_t C.struct_flock - -type Fstore_t C.struct_fstore - -type Radvisory_t C.struct_radvisory - -type Fbootstraptransfer_t C.struct_fbootstraptransfer - -type Log2phys_t C.struct_log2phys - -type Fsid C.struct_fsid - -type Dirent C.struct_dirent - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet4Pktinfo C.struct_in_pktinfo - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2 - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfmaMsghdr2 C.struct_ifma_msghdr2 - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// uname - -type Utsname C.struct_utsname - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go deleted file mode 100644 index 3365dd79d..000000000 --- a/vendor/golang.org/x/sys/unix/types_dragonfly.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.struct_fsid - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Uname - -type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go deleted file mode 100644 index a121dc336..000000000 --- a/vendor/golang.org/x/sys/unix/types_freebsd.go +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define _WANT_FREEBSD11_STAT 1 -#define _WANT_FREEBSD11_STATFS 1 -#define _WANT_FREEBSD11_DIRENT 1 -#define _WANT_FREEBSD11_KEVENT 1 - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -// This structure is a duplicate of if_data on FreeBSD 8-STABLE. -// See /usr/include/net/if.h. -struct if_data8 { - u_char ifi_type; - u_char ifi_physical; - u_char ifi_addrlen; - u_char ifi_hdrlen; - u_char ifi_link_state; - u_char ifi_spare_char1; - u_char ifi_spare_char2; - u_char ifi_datalen; - u_long ifi_mtu; - u_long ifi_metric; - u_long ifi_baudrate; - u_long ifi_ipackets; - u_long ifi_ierrors; - u_long ifi_opackets; - u_long ifi_oerrors; - u_long ifi_collisions; - u_long ifi_ibytes; - u_long ifi_obytes; - u_long ifi_imcasts; - u_long ifi_omcasts; - u_long ifi_iqdrops; - u_long ifi_noproto; - u_long ifi_hwassist; -// FIXME: these are now unions, so maybe need to change definitions? -#undef ifi_epoch - time_t ifi_epoch; -#undef ifi_lastchange - struct timeval ifi_lastchange; -}; - -// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE. -// See /usr/include/net/if.h. -struct if_msghdr8 { - u_short ifm_msglen; - u_char ifm_version; - u_char ifm_type; - int ifm_addrs; - int ifm_flags; - u_short ifm_index; - struct if_data8 ifm_data; -}; -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -const ( - _statfsVersion = C.STATFS_VERSION - _dirblksiz = C.DIRBLKSIZ -) - -type Stat_t C.struct_stat - -type stat_freebsd11_t C.struct_freebsd11_stat - -type Statfs_t C.struct_statfs - -type statfs_freebsd11_t C.struct_freebsd11_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type dirent_freebsd11 C.struct_freebsd11_dirent - -type Fsid C.struct_fsid - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Advice to Fadvise - -const ( - FADV_NORMAL = C.POSIX_FADV_NORMAL - FADV_RANDOM = C.POSIX_FADV_RANDOM - FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL - FADV_WILLNEED = C.POSIX_FADV_WILLNEED - FADV_DONTNEED = C.POSIX_FADV_DONTNEED - FADV_NOREUSE = C.POSIX_FADV_NOREUSE -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPMreqn C.struct_ip_mreqn - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPMreqn = C.sizeof_struct_ip_mreqn - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_ATTACH = C.PT_ATTACH - PTRACE_CONT = C.PT_CONTINUE - PTRACE_DETACH = C.PT_DETACH - PTRACE_GETFPREGS = C.PT_GETFPREGS - PTRACE_GETFSBASE = C.PT_GETFSBASE - PTRACE_GETLWPLIST = C.PT_GETLWPLIST - PTRACE_GETNUMLWPS = C.PT_GETNUMLWPS - PTRACE_GETREGS = C.PT_GETREGS - PTRACE_GETXSTATE = C.PT_GETXSTATE - PTRACE_IO = C.PT_IO - PTRACE_KILL = C.PT_KILL - PTRACE_LWPEVENTS = C.PT_LWP_EVENTS - PTRACE_LWPINFO = C.PT_LWPINFO - PTRACE_SETFPREGS = C.PT_SETFPREGS - PTRACE_SETREGS = C.PT_SETREGS - PTRACE_SINGLESTEP = C.PT_STEP - PTRACE_TRACEME = C.PT_TRACE_ME -) - -const ( - PIOD_READ_D = C.PIOD_READ_D - PIOD_WRITE_D = C.PIOD_WRITE_D - PIOD_READ_I = C.PIOD_READ_I - PIOD_WRITE_I = C.PIOD_WRITE_I -) - -const ( - PL_FLAG_BORN = C.PL_FLAG_BORN - PL_FLAG_EXITED = C.PL_FLAG_EXITED - PL_FLAG_SI = C.PL_FLAG_SI -) - -const ( - TRAP_BRKPT = C.TRAP_BRKPT - TRAP_TRACE = C.TRAP_TRACE -) - -type PtraceLwpInfoStruct C.struct_ptrace_lwpinfo - -type __Siginfo C.struct___siginfo - -type Sigset_t C.sigset_t - -type Reg C.struct_reg - -type FpReg C.struct_fpreg - -type PtraceIoDesc C.struct_ptrace_io_desc - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent_freebsd11 - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - sizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfMsghdr = C.sizeof_struct_if_msghdr8 - sizeofIfData = C.sizeof_struct_if_data - SizeofIfData = C.sizeof_struct_if_data8 - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type ifMsghdr C.struct_if_msghdr - -type IfMsghdr C.struct_if_msghdr8 - -type ifData C.struct_if_data - -type IfData C.struct_if_data8 - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr - SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfZbuf C.struct_bpf_zbuf - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfZbufHeader C.struct_bpf_zbuf_header - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLINIGNEOF = C.POLLINIGNEOF - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Capabilities - -type CapRights C.struct_cap_rights - -// Uname - -type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go deleted file mode 100644 index 4a96d72c3..000000000 --- a/vendor/golang.org/x/sys/unix/types_netbsd.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.fsid_t - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Advice to Fadvise - -const ( - FADV_NORMAL = C.POSIX_FADV_NORMAL - FADV_RANDOM = C.POSIX_FADV_RANDOM - FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL - FADV_WILLNEED = C.POSIX_FADV_WILLNEED - FADV_DONTNEED = C.POSIX_FADV_DONTNEED - FADV_NOREUSE = C.POSIX_FADV_NOREUSE -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -type Mclpool C.struct_mclpool - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfTimeval C.struct_bpf_timeval - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -type Ptmget C.struct_ptmget - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Sysctl - -type Sysctlnode C.struct_sysctlnode - -// Uname - -type Utsname C.struct_utsname - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go deleted file mode 100644 index 775cb57dc..000000000 --- a/vendor/golang.org/x/sys/unix/types_openbsd.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.fsid_t - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -type Mclpool C.struct_mclpool - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfTimeval C.struct_bpf_timeval - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Signal Sets - -type Sigset_t C.sigset_t - -// Uname - -type Utsname C.struct_utsname - -// Uvmexp - -const SizeofUvmexp = C.sizeof_struct_uvmexp - -type Uvmexp C.struct_uvmexp - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go deleted file mode 100644 index 2b716f934..000000000 --- a/vendor/golang.org/x/sys/unix/types_solaris.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -// These defines ensure that builds done on newer versions of Solaris are -// backwards-compatible with older versions of Solaris and -// OpenSolaris-based derivatives. -#define __USE_SUNOS_SOCKETS__ // msghdr -#define __USE_LEGACY_PROTOTYPES__ // iovec -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong - PathMax = C.PATH_MAX - MaxHostNameLen = C.MAXHOSTNAMELEN -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -type Tms C.struct_tms - -type Utimbuf C.struct_utimbuf - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -// Filesystems - -type _Fsblkcnt_t C.fsblkcnt_t - -type Statvfs_t C.struct_statvfs - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Select - -type FdSet C.fd_set - -// Misc - -type Utsname C.struct_utsname - -type Ustat_t C.struct_ustat - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_EACCESS = C.AT_EACCESS -) - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfTimeval C.struct_bpf_timeval - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Termio C.struct_termio - -type Winsize C.struct_winsize - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 9ac523000..62f110fa7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -2035,6 +2035,13 @@ const ( RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 + RWF_APPEND = 0x10 + RWF_DSYNC = 0x2 + RWF_HIPRI = 0x1 + RWF_NOWAIT = 0x8 + RWF_SUPPORTED = 0x1f + RWF_SYNC = 0x4 + RWF_WRITE_LIFE_NOT_SET = 0x0 SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 47622b955..865154d94 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -2036,6 +2036,13 @@ const ( RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 + RWF_APPEND = 0x10 + RWF_DSYNC = 0x2 + RWF_HIPRI = 0x1 + RWF_NOWAIT = 0x8 + RWF_SUPPORTED = 0x1f + RWF_SYNC = 0x4 + RWF_WRITE_LIFE_NOT_SET = 0x0 SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 56cd2ebe1..386beb450 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -2042,6 +2042,13 @@ const ( RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 + RWF_APPEND = 0x10 + RWF_DSYNC = 0x2 + RWF_HIPRI = 0x1 + RWF_NOWAIT = 0x8 + RWF_SUPPORTED = 0x1f + RWF_SYNC = 0x4 + RWF_WRITE_LIFE_NOT_SET = 0x0 SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 39712d777..09e11cc93 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -2028,6 +2028,13 @@ const ( RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 + RWF_APPEND = 0x10 + RWF_DSYNC = 0x2 + RWF_HIPRI = 0x1 + RWF_NOWAIT = 0x8 + RWF_SUPPORTED = 0x1f + RWF_SYNC = 0x4 + RWF_WRITE_LIFE_NOT_SET = 0x0 SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 89163c410..e3910aa11 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -2035,6 +2035,13 @@ const ( RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 + RWF_APPEND = 0x10 + RWF_DSYNC = 0x2 + RWF_HIPRI = 0x1 + RWF_NOWAIT = 0x8 + RWF_SUPPORTED = 0x1f + RWF_SYNC = 0x4 + RWF_WRITE_LIFE_NOT_SET = 0x0 SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 15284ff41..9877b7c75 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -2035,6 +2035,13 @@ const ( RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 + RWF_APPEND = 0x10 + RWF_DSYNC = 0x2 + RWF_HIPRI = 0x1 + RWF_NOWAIT = 0x8 + RWF_SUPPORTED = 0x1f + RWF_SYNC = 0x4 + RWF_WRITE_LIFE_NOT_SET = 0x0 SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 1ec776940..b77dd8431 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -2035,6 +2035,13 @@ const ( RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 + RWF_APPEND = 0x10 + RWF_DSYNC = 0x2 + RWF_HIPRI = 0x1 + RWF_NOWAIT = 0x8 + RWF_SUPPORTED = 0x1f + RWF_SYNC = 0x4 + RWF_WRITE_LIFE_NOT_SET = 0x0 SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index f96d54563..966d025f7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -2035,6 +2035,13 @@ const ( RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 + RWF_APPEND = 0x10 + RWF_DSYNC = 0x2 + RWF_HIPRI = 0x1 + RWF_NOWAIT = 0x8 + RWF_SUPPORTED = 0x1f + RWF_SYNC = 0x4 + RWF_WRITE_LIFE_NOT_SET = 0x0 SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 9adb20e3f..3baadbe2f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -2093,6 +2093,13 @@ const ( RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 + RWF_APPEND = 0x10 + RWF_DSYNC = 0x2 + RWF_HIPRI = 0x1 + RWF_NOWAIT = 0x8 + RWF_SUPPORTED = 0x1f + RWF_SYNC = 0x4 + RWF_WRITE_LIFE_NOT_SET = 0x0 SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 080e2f0aa..b0bc48768 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -2093,6 +2093,13 @@ const ( RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 + RWF_APPEND = 0x10 + RWF_DSYNC = 0x2 + RWF_HIPRI = 0x1 + RWF_NOWAIT = 0x8 + RWF_SUPPORTED = 0x1f + RWF_SYNC = 0x4 + RWF_WRITE_LIFE_NOT_SET = 0x0 SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index db20a369d..86976e419 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -2023,6 +2023,13 @@ const ( RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 + RWF_APPEND = 0x10 + RWF_DSYNC = 0x2 + RWF_HIPRI = 0x1 + RWF_NOWAIT = 0x8 + RWF_SUPPORTED = 0x1f + RWF_SYNC = 0x4 + RWF_WRITE_LIFE_NOT_SET = 0x0 SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 734757aec..cd6a04641 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -2096,6 +2096,13 @@ const ( RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 + RWF_APPEND = 0x10 + RWF_DSYNC = 0x2 + RWF_HIPRI = 0x1 + RWF_NOWAIT = 0x8 + RWF_SUPPORTED = 0x1f + RWF_SYNC = 0x4 + RWF_WRITE_LIFE_NOT_SET = 0x0 SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index c8d2c2351..89da22b79 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -2088,6 +2088,13 @@ const ( RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 + RWF_APPEND = 0x10 + RWF_DSYNC = 0x2 + RWF_HIPRI = 0x1 + RWF_NOWAIT = 0x8 + RWF_SUPPORTED = 0x1f + RWF_SYNC = 0x4 + RWF_WRITE_LIFE_NOT_SET = 0x0 SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d diff --git a/vendor/golang.org/x/sys/unix/zptracearm_linux.go b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go similarity index 93% rename from vendor/golang.org/x/sys/unix/zptracearm_linux.go rename to vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go index faf23bbed..89c5920e0 100644 --- a/vendor/golang.org/x/sys/unix/zptracearm_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go @@ -1,4 +1,4 @@ -// Code generated by linux/mkall.go generatePtracePair(arm, arm64). DO NOT EDIT. +// Code generated by linux/mkall.go generatePtracePair("arm", "arm64"). DO NOT EDIT. // +build linux // +build arm arm64 diff --git a/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go b/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go new file mode 100644 index 000000000..6cb6d688a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go @@ -0,0 +1,17 @@ +// Code generated by linux/mkall.go generatePtraceRegSet("arm64"). DO NOT EDIT. + +package unix + +import "unsafe" + +// PtraceGetRegSetArm64 fetches the registers used by arm64 binaries. +func PtraceGetRegSetArm64(pid, addr int, regsout *PtraceRegsArm64) error { + iovec := Iovec{(*byte)(unsafe.Pointer(regsout)), uint64(unsafe.Sizeof(*regsout))} + return ptrace(PTRACE_GETREGSET, pid, uintptr(addr), uintptr(unsafe.Pointer(&iovec))) +} + +// PtraceSetRegSetArm64 sets the registers used by arm64 binaries. +func PtraceSetRegSetArm64(pid, addr int, regs *PtraceRegsArm64) error { + iovec := Iovec{(*byte)(unsafe.Pointer(regs)), uint64(unsafe.Sizeof(*regs))} + return ptrace(PTRACE_SETREGSET, pid, uintptr(addr), uintptr(unsafe.Pointer(&iovec))) +} diff --git a/vendor/golang.org/x/sys/unix/zptracemips_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go similarity index 93% rename from vendor/golang.org/x/sys/unix/zptracemips_linux.go rename to vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go index c431131e6..24b841eec 100644 --- a/vendor/golang.org/x/sys/unix/zptracemips_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go @@ -1,4 +1,4 @@ -// Code generated by linux/mkall.go generatePtracePair(mips, mips64). DO NOT EDIT. +// Code generated by linux/mkall.go generatePtracePair("mips", "mips64"). DO NOT EDIT. // +build linux // +build mips mips64 diff --git a/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go similarity index 93% rename from vendor/golang.org/x/sys/unix/zptracemipsle_linux.go rename to vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go index dc3d6d373..47b048956 100644 --- a/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go @@ -1,4 +1,4 @@ -// Code generated by linux/mkall.go generatePtracePair(mipsle, mips64le). DO NOT EDIT. +// Code generated by linux/mkall.go generatePtracePair("mipsle", "mips64le"). DO NOT EDIT. // +build linux // +build mipsle mips64le diff --git a/vendor/golang.org/x/sys/unix/zptrace386_linux.go b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go similarity index 95% rename from vendor/golang.org/x/sys/unix/zptrace386_linux.go rename to vendor/golang.org/x/sys/unix/zptrace_x86_linux.go index 2d21c49e1..ea5d9cb53 100644 --- a/vendor/golang.org/x/sys/unix/zptrace386_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go @@ -1,4 +1,4 @@ -// Code generated by linux/mkall.go generatePtracePair(386, amd64). DO NOT EDIT. +// Code generated by linux/mkall.go generatePtracePair("386", "amd64"). DO NOT EDIT. // +build linux // +build 386 amd64 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go index b5ed80589..c1cc0a415 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -527,6 +516,17 @@ func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintp // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index cdf8a7000..a3fc49004 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -339,22 +339,6 @@ func libc_futimes_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fcntl_trampoline() - -//go:linkname libc_fcntl libc_fcntl -//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_poll_trampoline), uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -727,6 +711,22 @@ func libc_setattrlist_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fcntl_trampoline() + +//go:linkname libc_fcntl libc_fcntl +//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s index 9cae5b1da..6836a4129 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s @@ -44,8 +44,6 @@ TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0 JMP libc_poll(SB) TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0 @@ -84,6 +82,8 @@ TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_setattrlist(SB) +TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 JMP libc_kill(SB) TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 @@ -106,6 +106,8 @@ TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 JMP libc_chown(SB) TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) +TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go index 8bde8235a..f8e5c37c5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -527,6 +516,17 @@ func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintp // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 63b51fbf0..50d6437e6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -339,22 +339,6 @@ func libc_futimes_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fcntl_trampoline() - -//go:linkname libc_fcntl libc_fcntl -//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_poll_trampoline), uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -727,6 +711,22 @@ func libc_setattrlist_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fcntl_trampoline() + +//go:linkname libc_fcntl libc_fcntl +//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 1a0e52aa2..a3fdf099d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -44,8 +44,6 @@ TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0 JMP libc_poll(SB) TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0 @@ -84,6 +82,8 @@ TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_setattrlist(SB) +TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 JMP libc_kill(SB) TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go index 63a236b50..cea04e041 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -527,6 +516,17 @@ func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintp // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index adb8668c2..63103950c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -339,22 +339,6 @@ func libc_futimes_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fcntl_trampoline() - -//go:linkname libc_fcntl libc_fcntl -//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_poll_trampoline), uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -727,6 +711,22 @@ func libc_setattrlist_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fcntl_trampoline() + +//go:linkname libc_fcntl libc_fcntl +//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s index 5bebb1bbd..b67f518fa 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s @@ -44,8 +44,6 @@ TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0 JMP libc_poll(SB) TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0 @@ -84,10 +82,14 @@ TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_setattrlist(SB) +TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 JMP libc_kill(SB) TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) +TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go index 87c0b6122..8c3bb3a25 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -527,6 +516,17 @@ func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintp // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index c882a4f9d..a8709f72d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -339,22 +339,6 @@ func libc_futimes_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fcntl_trampoline() - -//go:linkname libc_fcntl libc_fcntl -//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_poll_trampoline), uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -727,6 +711,22 @@ func libc_setattrlist_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fcntl_trampoline() + +//go:linkname libc_fcntl libc_fcntl +//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 19faa4d8d..40cce1bb2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -44,8 +44,6 @@ TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0 JMP libc_poll(SB) TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0 @@ -84,6 +82,8 @@ TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_setattrlist(SB) +TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 JMP libc_kill(SB) TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 @@ -106,6 +106,8 @@ TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 JMP libc_chown(SB) TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) +TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index df199b345..fe1fdd78d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -255,17 +255,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index e68185f1e..c9058f309 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -255,17 +255,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 2f77f93c4..49b20c229 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index e9a12c9d9..31d2c4616 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 27ab0fbda..abab3d7cb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index fe5d462e4..c18f6c6e6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -659,17 +659,6 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fdatasync(fd int) (err error) { _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1608,6 +1597,108 @@ func writelen(fd int, p *byte, np int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 536abcea3..54c0f39b7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -659,17 +659,6 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fdatasync(fd int) (err error) { _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1608,6 +1597,108 @@ func writelen(fd int, p *byte, np int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 37823cd6b..3eaa45fb8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -659,17 +659,6 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fdatasync(fd int) (err error) { _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1608,6 +1597,108 @@ func writelen(fd int, p *byte, np int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 794f61264..be7f2db31 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -659,17 +659,6 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fdatasync(fd int) (err error) { _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1608,6 +1597,108 @@ func writelen(fd int, p *byte, np int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 1b34b550c..3a0d9105e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -659,17 +659,6 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fdatasync(fd int) (err error) { _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1608,6 +1597,108 @@ func writelen(fd int, p *byte, np int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 5714e2592..05f62cbf0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -659,17 +659,6 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fdatasync(fd int) (err error) { _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1608,6 +1597,108 @@ func writelen(fd int, p *byte, np int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 88a6b3362..29bdea98a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -659,17 +659,6 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fdatasync(fd int) (err error) { _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1608,6 +1597,108 @@ func writelen(fd int, p *byte, np int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index c09dbe345..1dac21980 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -659,17 +659,6 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fdatasync(fd int) (err error) { _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1608,6 +1597,108 @@ func writelen(fd int, p *byte, np int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 42f6c2103..7b76d6025 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -659,17 +659,6 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fdatasync(fd int) (err error) { _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1608,6 +1597,108 @@ func writelen(fd int, p *byte, np int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index de2cd8db9..5f2e31cd1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -659,17 +659,6 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fdatasync(fd int) (err error) { _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1608,6 +1597,108 @@ func writelen(fd int, p *byte, np int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index d51bf07fc..ea52bd335 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -659,17 +659,6 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fdatasync(fd int) (err error) { _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1608,6 +1597,108 @@ func writelen(fd int, p *byte, np int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 1e3a3cb73..3f1e461ce 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -659,17 +659,6 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fdatasync(fd int) (err error) { _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1608,6 +1597,108 @@ func writelen(fd int, p *byte, np int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 3c97008cd..6bcd0a312 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -659,17 +659,6 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fdatasync(fd int) (err error) { _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1608,6 +1597,108 @@ func writelen(fd int, p *byte, np int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 5ade42cce..2d1e46b28 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -926,6 +915,16 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatvfs1(fd int, buf *Statvfs_t, flags int) (err error) { + _, _, e1 := Syscall(SYS_FSTATVFS1, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1635,6 +1634,21 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statvfs1(path string, buf *Statvfs_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATVFS1, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 3e0bbc5f1..b494ccc06 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -926,6 +915,16 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatvfs1(fd int, buf *Statvfs_t, flags int) (err error) { + _, _, e1 := Syscall(SYS_FSTATVFS1, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1635,6 +1634,21 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statvfs1(path string, buf *Statvfs_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATVFS1, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index cb0af13a3..729e849d6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -926,6 +915,16 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatvfs1(fd int, buf *Statvfs_t, flags int) (err error) { + _, _, e1 := Syscall(SYS_FSTATVFS1, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1635,6 +1634,21 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statvfs1(path string, buf *Statvfs_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATVFS1, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 6fd48d3dc..ea8f5ffc4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -926,6 +915,16 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatvfs1(fd int, buf *Statvfs_t, flags int) (err error) { + _, _, e1 := Syscall(SYS_FSTATVFS1, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1635,6 +1634,21 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statvfs1(path string, buf *Statvfs_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATVFS1, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 2938e4124..262029ab5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 22b79ab0e..75654e7a8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index cb921f37a..ee386e3c1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 5a7438035..c6d9f5d7f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 86736ab6e..a89100c08 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -78,6 +78,33 @@ type Stat_t struct { type Statfs_t [0]byte +type Statvfs_t struct { + Flag uint32 + Bsize uint32 + Frsize uint32 + Iosize uint32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Bresvd uint64 + Files uint64 + Ffree uint64 + Favail uint64 + Fresvd uint64 + Syncreads uint64 + Syncwrites uint64 + Asyncreads uint64 + Asyncwrites uint64 + Fsidx Fsid + Fsid uint32 + Namemax uint32 + Owner uint32 + Spare [4]uint32 + Fstypename [32]byte + Mntonname [1024]byte + Mntfromname [1024]byte +} + type Flock_t struct { Start int64 Len int64 @@ -103,6 +130,11 @@ const ( PathMax = 0x400 ) +const ( + ST_WAIT = 0x1 + ST_NOWAIT = 0x2 +) + const ( FADV_NORMAL = 0x0 FADV_RANDOM = 0x1 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 3427811f9..289184e0b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -82,6 +82,34 @@ type Stat_t struct { type Statfs_t [0]byte +type Statvfs_t struct { + Flag uint64 + Bsize uint64 + Frsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Bresvd uint64 + Files uint64 + Ffree uint64 + Favail uint64 + Fresvd uint64 + Syncreads uint64 + Syncwrites uint64 + Asyncreads uint64 + Asyncwrites uint64 + Fsidx Fsid + Fsid uint64 + Namemax uint64 + Owner uint32 + Spare [4]uint32 + Fstypename [32]byte + Mntonname [1024]byte + Mntfromname [1024]byte + _ [4]byte +} + type Flock_t struct { Start int64 Len int64 @@ -107,6 +135,11 @@ const ( PathMax = 0x400 ) +const ( + ST_WAIT = 0x1 + ST_NOWAIT = 0x2 +) + const ( FADV_NORMAL = 0x0 FADV_RANDOM = 0x1 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 399f37a43..428c450e4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -83,6 +83,33 @@ type Stat_t struct { type Statfs_t [0]byte +type Statvfs_t struct { + Flag uint32 + Bsize uint32 + Frsize uint32 + Iosize uint32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Bresvd uint64 + Files uint64 + Ffree uint64 + Favail uint64 + Fresvd uint64 + Syncreads uint64 + Syncwrites uint64 + Asyncreads uint64 + Asyncwrites uint64 + Fsidx Fsid + Fsid uint32 + Namemax uint32 + Owner uint32 + Spare [4]uint32 + Fstypename [32]byte + Mntonname [1024]byte + Mntfromname [1024]byte +} + type Flock_t struct { Start int64 Len int64 @@ -108,6 +135,11 @@ const ( PathMax = 0x400 ) +const ( + ST_WAIT = 0x1 + ST_NOWAIT = 0x2 +) + const ( FADV_NORMAL = 0x0 FADV_RANDOM = 0x1 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index 32f0c15d9..6f1f2842c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -82,6 +82,34 @@ type Stat_t struct { type Statfs_t [0]byte +type Statvfs_t struct { + Flag uint64 + Bsize uint64 + Frsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Bresvd uint64 + Files uint64 + Ffree uint64 + Favail uint64 + Fresvd uint64 + Syncreads uint64 + Syncwrites uint64 + Asyncreads uint64 + Asyncwrites uint64 + Fsidx Fsid + Fsid uint64 + Namemax uint64 + Owner uint32 + Spare [4]uint32 + Fstypename [32]byte + Mntonname [1024]byte + Mntfromname [1024]byte + _ [4]byte +} + type Flock_t struct { Start int64 Len int64 @@ -107,6 +135,11 @@ const ( PathMax = 0x400 ) +const ( + ST_WAIT = 0x1 + ST_NOWAIT = 0x2 +) + const ( FADV_NORMAL = 0x0 FADV_RANDOM = 0x1 diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index a0cd45c14..053d664d0 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -701,8 +701,8 @@ const socket_error = uintptr(^uint32(0)) //sys WSACleanup() (err error) [failretval==socket_error] = ws2_32.WSACleanup //sys WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) [failretval==socket_error] = ws2_32.WSAIoctl //sys socket(af int32, typ int32, protocol int32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.socket -//sys sendto(s Handle, buf []byte, flags int, to unsafe.Pointer, tolen int32) (err error) [failretval==socket_error] = ws2_32.sendto -//sys recvfrom(s Handle, buf []byte, flags int, from *RawSockaddrAny, fromlen *int) (n int, err error) [failretval==-1] = ws2_32.recvfrom +//sys sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) [failretval==socket_error] = ws2_32.sendto +//sys recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) [failretval==-1] = ws2_32.recvfrom //sys Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) [failretval==socket_error] = ws2_32.setsockopt //sys Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockopt //sys bind(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.bind @@ -1134,8 +1134,9 @@ func Accept(fd Handle) (nfd Handle, sa Sockaddr, err error) { return 0, nil, sys func Recvfrom(fd Handle, p []byte, flags int) (n int, from Sockaddr, err error) { var rsa RawSockaddrAny - l := int(unsafe.Sizeof(rsa)) - n, err = recvfrom(fd, p, flags, &rsa, &l) + l := int32(unsafe.Sizeof(rsa)) + n32, err := recvfrom(fd, p, int32(flags), &rsa, &l) + n = int(n32) if err != nil { return } @@ -1148,7 +1149,7 @@ func Sendto(fd Handle, p []byte, flags int, to Sockaddr) (err error) { if err != nil { return err } - return sendto(fd, p, flags, ptr, l) + return sendto(fd, p, int32(flags), ptr, l) } func SetsockoptTimeval(fd Handle, level, opt int, tv *Timeval) (err error) { return syscall.EWINDOWS } diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 8dd95a0a6..aefb7fc39 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -690,6 +690,7 @@ const ( SOCK_SEQPACKET = 5 IPPROTO_IP = 0 + IPPROTO_ICMP = 1 IPPROTO_IPV6 = 0x29 IPPROTO_TCP = 6 IPPROTO_UDP = 17 @@ -701,6 +702,7 @@ const ( SO_BROADCAST = 32 SO_LINGER = 128 SO_RCVBUF = 0x1002 + SO_RCVTIMEO = 0x1006 SO_SNDBUF = 0x1001 SO_UPDATE_ACCEPT_CONTEXT = 0x700b SO_UPDATE_CONNECT_CONTEXT = 0x7010 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 4563918f1..2aa4fa642 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -2875,7 +2875,7 @@ func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { return } -func sendto(s Handle, buf []byte, flags int, to unsafe.Pointer, tolen int32) (err error) { +func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) { var _p0 *byte if len(buf) > 0 { _p0 = &buf[0] @@ -2891,13 +2891,13 @@ func sendto(s Handle, buf []byte, flags int, to unsafe.Pointer, tolen int32) (er return } -func recvfrom(s Handle, buf []byte, flags int, from *RawSockaddrAny, fromlen *int) (n int, err error) { +func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) { var _p0 *byte if len(buf) > 0 { _p0 = &buf[0] } r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - n = int(r0) + n = int32(r0) if n == -1 { if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/text/encoding/charmap/maketables.go b/vendor/golang.org/x/text/encoding/charmap/maketables.go deleted file mode 100644 index f7941701e..000000000 --- a/vendor/golang.org/x/text/encoding/charmap/maketables.go +++ /dev/null @@ -1,556 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "bufio" - "fmt" - "log" - "net/http" - "sort" - "strings" - "unicode/utf8" - - "golang.org/x/text/encoding" - "golang.org/x/text/internal/gen" -) - -const ascii = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + - "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + - ` !"#$%&'()*+,-./0123456789:;<=>?` + - `@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_` + - "`abcdefghijklmnopqrstuvwxyz{|}~\u007f" - -var encodings = []struct { - name string - mib string - comment string - varName string - replacement byte - mapping string -}{ - { - "IBM Code Page 037", - "IBM037", - "", - "CodePage037", - 0x3f, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM037-2.1.2.ucm", - }, - { - "IBM Code Page 437", - "PC8CodePage437", - "", - "CodePage437", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM437-2.1.2.ucm", - }, - { - "IBM Code Page 850", - "PC850Multilingual", - "", - "CodePage850", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM850-2.1.2.ucm", - }, - { - "IBM Code Page 852", - "PCp852", - "", - "CodePage852", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM852-2.1.2.ucm", - }, - { - "IBM Code Page 855", - "IBM855", - "", - "CodePage855", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM855-2.1.2.ucm", - }, - { - "Windows Code Page 858", // PC latin1 with Euro - "IBM00858", - "", - "CodePage858", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/windows-858-2000.ucm", - }, - { - "IBM Code Page 860", - "IBM860", - "", - "CodePage860", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM860-2.1.2.ucm", - }, - { - "IBM Code Page 862", - "PC862LatinHebrew", - "", - "CodePage862", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM862-2.1.2.ucm", - }, - { - "IBM Code Page 863", - "IBM863", - "", - "CodePage863", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM863-2.1.2.ucm", - }, - { - "IBM Code Page 865", - "IBM865", - "", - "CodePage865", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM865-2.1.2.ucm", - }, - { - "IBM Code Page 866", - "IBM866", - "", - "CodePage866", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-ibm866.txt", - }, - { - "IBM Code Page 1047", - "IBM1047", - "", - "CodePage1047", - 0x3f, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM1047-2.1.2.ucm", - }, - { - "IBM Code Page 1140", - "IBM01140", - "", - "CodePage1140", - 0x3f, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/ibm-1140_P100-1997.ucm", - }, - { - "ISO 8859-1", - "ISOLatin1", - "", - "ISO8859_1", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_1-1998.ucm", - }, - { - "ISO 8859-2", - "ISOLatin2", - "", - "ISO8859_2", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-2.txt", - }, - { - "ISO 8859-3", - "ISOLatin3", - "", - "ISO8859_3", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-3.txt", - }, - { - "ISO 8859-4", - "ISOLatin4", - "", - "ISO8859_4", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-4.txt", - }, - { - "ISO 8859-5", - "ISOLatinCyrillic", - "", - "ISO8859_5", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-5.txt", - }, - { - "ISO 8859-6", - "ISOLatinArabic", - "", - "ISO8859_6,ISO8859_6E,ISO8859_6I", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-6.txt", - }, - { - "ISO 8859-7", - "ISOLatinGreek", - "", - "ISO8859_7", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-7.txt", - }, - { - "ISO 8859-8", - "ISOLatinHebrew", - "", - "ISO8859_8,ISO8859_8E,ISO8859_8I", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-8.txt", - }, - { - "ISO 8859-9", - "ISOLatin5", - "", - "ISO8859_9", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_9-1999.ucm", - }, - { - "ISO 8859-10", - "ISOLatin6", - "", - "ISO8859_10", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-10.txt", - }, - { - "ISO 8859-13", - "ISO885913", - "", - "ISO8859_13", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-13.txt", - }, - { - "ISO 8859-14", - "ISO885914", - "", - "ISO8859_14", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-14.txt", - }, - { - "ISO 8859-15", - "ISO885915", - "", - "ISO8859_15", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-15.txt", - }, - { - "ISO 8859-16", - "ISO885916", - "", - "ISO8859_16", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-16.txt", - }, - { - "KOI8-R", - "KOI8R", - "", - "KOI8R", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-koi8-r.txt", - }, - { - "KOI8-U", - "KOI8U", - "", - "KOI8U", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-koi8-u.txt", - }, - { - "Macintosh", - "Macintosh", - "", - "Macintosh", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-macintosh.txt", - }, - { - "Macintosh Cyrillic", - "MacintoshCyrillic", - "", - "MacintoshCyrillic", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-x-mac-cyrillic.txt", - }, - { - "Windows 874", - "Windows874", - "", - "Windows874", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-874.txt", - }, - { - "Windows 1250", - "Windows1250", - "", - "Windows1250", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1250.txt", - }, - { - "Windows 1251", - "Windows1251", - "", - "Windows1251", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1251.txt", - }, - { - "Windows 1252", - "Windows1252", - "", - "Windows1252", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1252.txt", - }, - { - "Windows 1253", - "Windows1253", - "", - "Windows1253", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1253.txt", - }, - { - "Windows 1254", - "Windows1254", - "", - "Windows1254", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1254.txt", - }, - { - "Windows 1255", - "Windows1255", - "", - "Windows1255", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1255.txt", - }, - { - "Windows 1256", - "Windows1256", - "", - "Windows1256", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1256.txt", - }, - { - "Windows 1257", - "Windows1257", - "", - "Windows1257", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1257.txt", - }, - { - "Windows 1258", - "Windows1258", - "", - "Windows1258", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1258.txt", - }, - { - "X-User-Defined", - "XUserDefined", - "It is defined at http://encoding.spec.whatwg.org/#x-user-defined", - "XUserDefined", - encoding.ASCIISub, - ascii + - "\uf780\uf781\uf782\uf783\uf784\uf785\uf786\uf787" + - "\uf788\uf789\uf78a\uf78b\uf78c\uf78d\uf78e\uf78f" + - "\uf790\uf791\uf792\uf793\uf794\uf795\uf796\uf797" + - "\uf798\uf799\uf79a\uf79b\uf79c\uf79d\uf79e\uf79f" + - "\uf7a0\uf7a1\uf7a2\uf7a3\uf7a4\uf7a5\uf7a6\uf7a7" + - "\uf7a8\uf7a9\uf7aa\uf7ab\uf7ac\uf7ad\uf7ae\uf7af" + - "\uf7b0\uf7b1\uf7b2\uf7b3\uf7b4\uf7b5\uf7b6\uf7b7" + - "\uf7b8\uf7b9\uf7ba\uf7bb\uf7bc\uf7bd\uf7be\uf7bf" + - "\uf7c0\uf7c1\uf7c2\uf7c3\uf7c4\uf7c5\uf7c6\uf7c7" + - "\uf7c8\uf7c9\uf7ca\uf7cb\uf7cc\uf7cd\uf7ce\uf7cf" + - "\uf7d0\uf7d1\uf7d2\uf7d3\uf7d4\uf7d5\uf7d6\uf7d7" + - "\uf7d8\uf7d9\uf7da\uf7db\uf7dc\uf7dd\uf7de\uf7df" + - "\uf7e0\uf7e1\uf7e2\uf7e3\uf7e4\uf7e5\uf7e6\uf7e7" + - "\uf7e8\uf7e9\uf7ea\uf7eb\uf7ec\uf7ed\uf7ee\uf7ef" + - "\uf7f0\uf7f1\uf7f2\uf7f3\uf7f4\uf7f5\uf7f6\uf7f7" + - "\uf7f8\uf7f9\uf7fa\uf7fb\uf7fc\uf7fd\uf7fe\uf7ff", - }, -} - -func getWHATWG(url string) string { - res, err := http.Get(url) - if err != nil { - log.Fatalf("%q: Get: %v", url, err) - } - defer res.Body.Close() - - mapping := make([]rune, 128) - for i := range mapping { - mapping[i] = '\ufffd' - } - - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := 0, 0 - if _, err := fmt.Sscanf(s, "%d\t0x%x", &x, &y); err != nil { - log.Fatalf("could not parse %q", s) - } - if x < 0 || 128 <= x { - log.Fatalf("code %d is out of range", x) - } - if 0x80 <= y && y < 0xa0 { - // We diverge from the WHATWG spec by mapping control characters - // in the range [0x80, 0xa0) to U+FFFD. - continue - } - mapping[x] = rune(y) - } - return ascii + string(mapping) -} - -func getUCM(url string) string { - res, err := http.Get(url) - if err != nil { - log.Fatalf("%q: Get: %v", url, err) - } - defer res.Body.Close() - - mapping := make([]rune, 256) - for i := range mapping { - mapping[i] = '\ufffd' - } - - charsFound := 0 - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - var c byte - var r rune - if _, err := fmt.Sscanf(s, ` \x%x |0`, &r, &c); err != nil { - continue - } - mapping[c] = r - charsFound++ - } - - if charsFound < 200 { - log.Fatalf("%q: only %d characters found (wrong page format?)", url, charsFound) - } - - return string(mapping) -} - -func main() { - mibs := map[string]bool{} - all := []string{} - - w := gen.NewCodeWriter() - defer w.WriteGoFile("tables.go", "charmap") - - printf := func(s string, a ...interface{}) { fmt.Fprintf(w, s, a...) } - - printf("import (\n") - printf("\t\"golang.org/x/text/encoding\"\n") - printf("\t\"golang.org/x/text/encoding/internal/identifier\"\n") - printf(")\n\n") - for _, e := range encodings { - varNames := strings.Split(e.varName, ",") - all = append(all, varNames...) - varName := varNames[0] - switch { - case strings.HasPrefix(e.mapping, "http://encoding.spec.whatwg.org/"): - e.mapping = getWHATWG(e.mapping) - case strings.HasPrefix(e.mapping, "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/"): - e.mapping = getUCM(e.mapping) - } - - asciiSuperset, low := strings.HasPrefix(e.mapping, ascii), 0x00 - if asciiSuperset { - low = 0x80 - } - lvn := 1 - if strings.HasPrefix(varName, "ISO") || strings.HasPrefix(varName, "KOI") { - lvn = 3 - } - lowerVarName := strings.ToLower(varName[:lvn]) + varName[lvn:] - printf("// %s is the %s encoding.\n", varName, e.name) - if e.comment != "" { - printf("//\n// %s\n", e.comment) - } - printf("var %s *Charmap = &%s\n\nvar %s = Charmap{\nname: %q,\n", - varName, lowerVarName, lowerVarName, e.name) - if mibs[e.mib] { - log.Fatalf("MIB type %q declared multiple times.", e.mib) - } - printf("mib: identifier.%s,\n", e.mib) - printf("asciiSuperset: %t,\n", asciiSuperset) - printf("low: 0x%02x,\n", low) - printf("replacement: 0x%02x,\n", e.replacement) - - printf("decode: [256]utf8Enc{\n") - i, backMapping := 0, map[rune]byte{} - for _, c := range e.mapping { - if _, ok := backMapping[c]; !ok && c != utf8.RuneError { - backMapping[c] = byte(i) - } - var buf [8]byte - n := utf8.EncodeRune(buf[:], c) - if n > 3 { - panic(fmt.Sprintf("rune %q (%U) is too long", c, c)) - } - printf("{%d,[3]byte{0x%02x,0x%02x,0x%02x}},", n, buf[0], buf[1], buf[2]) - if i%2 == 1 { - printf("\n") - } - i++ - } - printf("},\n") - - printf("encode: [256]uint32{\n") - encode := make([]uint32, 0, 256) - for c, i := range backMapping { - encode = append(encode, uint32(i)<<24|uint32(c)) - } - sort.Sort(byRune(encode)) - for len(encode) < cap(encode) { - encode = append(encode, encode[len(encode)-1]) - } - for i, enc := range encode { - printf("0x%08x,", enc) - if i%8 == 7 { - printf("\n") - } - } - printf("},\n}\n") - - // Add an estimate of the size of a single Charmap{} struct value, which - // includes two 256 elem arrays of 4 bytes and some extra fields, which - // align to 3 uint64s on 64-bit architectures. - w.Size += 2*4*256 + 3*8 - } - // TODO: add proper line breaking. - printf("var listAll = []encoding.Encoding{\n%s,\n}\n\n", strings.Join(all, ",\n")) -} - -type byRune []uint32 - -func (b byRune) Len() int { return len(b) } -func (b byRune) Less(i, j int) bool { return b[i]&0xffffff < b[j]&0xffffff } -func (b byRune) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/golang.org/x/text/encoding/htmlindex/gen.go b/vendor/golang.org/x/text/encoding/htmlindex/gen.go deleted file mode 100644 index ac6b4a77f..000000000 --- a/vendor/golang.org/x/text/encoding/htmlindex/gen.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "log" - "strings" - - "golang.org/x/text/internal/gen" -) - -type group struct { - Encodings []struct { - Labels []string - Name string - } -} - -func main() { - gen.Init() - - r := gen.Open("https://encoding.spec.whatwg.org", "whatwg", "encodings.json") - var groups []group - if err := json.NewDecoder(r).Decode(&groups); err != nil { - log.Fatalf("Error reading encodings.json: %v", err) - } - - w := &bytes.Buffer{} - fmt.Fprintln(w, "type htmlEncoding byte") - fmt.Fprintln(w, "const (") - for i, g := range groups { - for _, e := range g.Encodings { - key := strings.ToLower(e.Name) - name := consts[key] - if name == "" { - log.Fatalf("No const defined for %s.", key) - } - if i == 0 { - fmt.Fprintf(w, "%s htmlEncoding = iota\n", name) - } else { - fmt.Fprintf(w, "%s\n", name) - } - } - } - fmt.Fprintln(w, "numEncodings") - fmt.Fprint(w, ")\n\n") - - fmt.Fprintln(w, "var canonical = [numEncodings]string{") - for _, g := range groups { - for _, e := range g.Encodings { - fmt.Fprintf(w, "%q,\n", strings.ToLower(e.Name)) - } - } - fmt.Fprint(w, "}\n\n") - - fmt.Fprintln(w, "var nameMap = map[string]htmlEncoding{") - for _, g := range groups { - for _, e := range g.Encodings { - for _, l := range e.Labels { - key := strings.ToLower(e.Name) - name := consts[key] - fmt.Fprintf(w, "%q: %s,\n", l, name) - } - } - } - fmt.Fprint(w, "}\n\n") - - var tags []string - fmt.Fprintln(w, "var localeMap = []htmlEncoding{") - for _, loc := range locales { - tags = append(tags, loc.tag) - fmt.Fprintf(w, "%s, // %s \n", consts[loc.name], loc.tag) - } - fmt.Fprint(w, "}\n\n") - - fmt.Fprintf(w, "const locales = %q\n", strings.Join(tags, " ")) - - gen.WriteGoFile("tables.go", "htmlindex", w.Bytes()) -} - -// consts maps canonical encoding name to internal constant. -var consts = map[string]string{ - "utf-8": "utf8", - "ibm866": "ibm866", - "iso-8859-2": "iso8859_2", - "iso-8859-3": "iso8859_3", - "iso-8859-4": "iso8859_4", - "iso-8859-5": "iso8859_5", - "iso-8859-6": "iso8859_6", - "iso-8859-7": "iso8859_7", - "iso-8859-8": "iso8859_8", - "iso-8859-8-i": "iso8859_8I", - "iso-8859-10": "iso8859_10", - "iso-8859-13": "iso8859_13", - "iso-8859-14": "iso8859_14", - "iso-8859-15": "iso8859_15", - "iso-8859-16": "iso8859_16", - "koi8-r": "koi8r", - "koi8-u": "koi8u", - "macintosh": "macintosh", - "windows-874": "windows874", - "windows-1250": "windows1250", - "windows-1251": "windows1251", - "windows-1252": "windows1252", - "windows-1253": "windows1253", - "windows-1254": "windows1254", - "windows-1255": "windows1255", - "windows-1256": "windows1256", - "windows-1257": "windows1257", - "windows-1258": "windows1258", - "x-mac-cyrillic": "macintoshCyrillic", - "gbk": "gbk", - "gb18030": "gb18030", - // "hz-gb-2312": "hzgb2312", // Was removed from WhatWG - "big5": "big5", - "euc-jp": "eucjp", - "iso-2022-jp": "iso2022jp", - "shift_jis": "shiftJIS", - "euc-kr": "euckr", - "replacement": "replacement", - "utf-16be": "utf16be", - "utf-16le": "utf16le", - "x-user-defined": "xUserDefined", -} - -// locales is taken from -// https://html.spec.whatwg.org/multipage/syntax.html#encoding-sniffing-algorithm. -var locales = []struct{ tag, name string }{ - // The default value. Explicitly state latin to benefit from the exact - // script option, while still making 1252 the default encoding for languages - // written in Latin script. - {"und_Latn", "windows-1252"}, - {"ar", "windows-1256"}, - {"ba", "windows-1251"}, - {"be", "windows-1251"}, - {"bg", "windows-1251"}, - {"cs", "windows-1250"}, - {"el", "iso-8859-7"}, - {"et", "windows-1257"}, - {"fa", "windows-1256"}, - {"he", "windows-1255"}, - {"hr", "windows-1250"}, - {"hu", "iso-8859-2"}, - {"ja", "shift_jis"}, - {"kk", "windows-1251"}, - {"ko", "euc-kr"}, - {"ku", "windows-1254"}, - {"ky", "windows-1251"}, - {"lt", "windows-1257"}, - {"lv", "windows-1257"}, - {"mk", "windows-1251"}, - {"pl", "iso-8859-2"}, - {"ru", "windows-1251"}, - {"sah", "windows-1251"}, - {"sk", "windows-1250"}, - {"sl", "iso-8859-2"}, - {"sr", "windows-1251"}, - {"tg", "windows-1251"}, - {"th", "windows-874"}, - {"tr", "windows-1254"}, - {"tt", "windows-1251"}, - {"uk", "windows-1251"}, - {"vi", "windows-1258"}, - {"zh-hans", "gb18030"}, - {"zh-hant", "big5"}, -} diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go b/vendor/golang.org/x/text/encoding/internal/identifier/gen.go deleted file mode 100644 index 26cfef9c6..000000000 --- a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "log" - "strings" - - "golang.org/x/text/internal/gen" -) - -type registry struct { - XMLName xml.Name `xml:"registry"` - Updated string `xml:"updated"` - Registry []struct { - ID string `xml:"id,attr"` - Record []struct { - Name string `xml:"name"` - Xref []struct { - Type string `xml:"type,attr"` - Data string `xml:"data,attr"` - } `xml:"xref"` - Desc struct { - Data string `xml:",innerxml"` - // Any []struct { - // Data string `xml:",chardata"` - // } `xml:",any"` - // Data string `xml:",chardata"` - } `xml:"description,"` - MIB string `xml:"value"` - Alias []string `xml:"alias"` - MIME string `xml:"preferred_alias"` - } `xml:"record"` - } `xml:"registry"` -} - -func main() { - r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml") - reg := ®istry{} - if err := xml.NewDecoder(r).Decode(®); err != nil && err != io.EOF { - log.Fatalf("Error decoding charset registry: %v", err) - } - if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" { - log.Fatalf("Unexpected ID %s", reg.Registry[0].ID) - } - - w := &bytes.Buffer{} - fmt.Fprintf(w, "const (\n") - for _, rec := range reg.Registry[0].Record { - constName := "" - for _, a := range rec.Alias { - if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 { - // Some of the constant definitions have comments in them. Strip those. - constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0]) - } - } - if constName == "" { - switch rec.MIB { - case "2085": - constName = "HZGB2312" // Not listed as alias for some reason. - default: - log.Fatalf("No cs alias defined for %s.", rec.MIB) - } - } - if rec.MIME != "" { - rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME) - } - fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME) - if len(rec.Desc.Data) > 0 { - fmt.Fprint(w, "// ") - d := xml.NewDecoder(strings.NewReader(rec.Desc.Data)) - inElem := true - attr := "" - for { - t, err := d.Token() - if err != nil { - if err != io.EOF { - log.Fatal(err) - } - break - } - switch x := t.(type) { - case xml.CharData: - attr = "" // Don't need attribute info. - a := bytes.Split([]byte(x), []byte("\n")) - for i, b := range a { - if b = bytes.TrimSpace(b); len(b) != 0 { - if !inElem && i > 0 { - fmt.Fprint(w, "\n// ") - } - inElem = false - fmt.Fprintf(w, "%s ", string(b)) - } - } - case xml.StartElement: - if x.Name.Local == "xref" { - inElem = true - use := false - for _, a := range x.Attr { - if a.Name.Local == "type" { - use = use || a.Value != "person" - } - if a.Name.Local == "data" && use { - // Patch up URLs to use https. From some links, the - // https version is different from the http one. - s := a.Value - s = strings.Replace(s, "http://", "https://", -1) - s = strings.Replace(s, "/unicode/", "/", -1) - attr = s + " " - } - } - } - case xml.EndElement: - inElem = false - fmt.Fprint(w, attr) - } - } - fmt.Fprint(w, "\n") - } - for _, x := range rec.Xref { - switch x.Type { - case "rfc": - fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data)) - case "uri": - fmt.Fprintf(w, "// Reference: %s\n", x.Data) - } - } - fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB) - fmt.Fprintln(w) - } - fmt.Fprintln(w, ")") - - gen.WriteGoFile("mib.go", "identifier", w.Bytes()) -} diff --git a/vendor/golang.org/x/text/encoding/japanese/maketables.go b/vendor/golang.org/x/text/encoding/japanese/maketables.go deleted file mode 100644 index 023957a67..000000000 --- a/vendor/golang.org/x/text/encoding/japanese/maketables.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates tables.go: -// go run maketables.go | gofmt > tables.go - -// TODO: Emoji extensions? -// https://www.unicode.org/faq/emoji_dingbats.html -// https://www.unicode.org/Public/UNIDATA/EmojiSources.txt - -import ( - "bufio" - "fmt" - "log" - "net/http" - "sort" - "strings" -) - -type entry struct { - jisCode, table int -} - -func main() { - fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") - fmt.Printf("// Package japanese provides Japanese encodings such as EUC-JP and Shift JIS.\n") - fmt.Printf(`package japanese // import "golang.org/x/text/encoding/japanese"` + "\n\n") - - reverse := [65536]entry{} - for i := range reverse { - reverse[i].table = -1 - } - - tables := []struct { - url string - name string - }{ - {"http://encoding.spec.whatwg.org/index-jis0208.txt", "0208"}, - {"http://encoding.spec.whatwg.org/index-jis0212.txt", "0212"}, - } - for i, table := range tables { - res, err := http.Get(table.url) - if err != nil { - log.Fatalf("%q: Get: %v", table.url, err) - } - defer res.Body.Close() - - mapping := [65536]uint16{} - - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := 0, uint16(0) - if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { - log.Fatalf("%q: could not parse %q", table.url, s) - } - if x < 0 || 120*94 <= x { - log.Fatalf("%q: JIS code %d is out of range", table.url, x) - } - mapping[x] = y - if reverse[y].table == -1 { - reverse[y] = entry{jisCode: x, table: i} - } - } - if err := scanner.Err(); err != nil { - log.Fatalf("%q: scanner error: %v", table.url, err) - } - - fmt.Printf("// jis%sDecode is the decoding table from JIS %s code to Unicode.\n// It is defined at %s\n", - table.name, table.name, table.url) - fmt.Printf("var jis%sDecode = [...]uint16{\n", table.name) - for i, m := range mapping { - if m != 0 { - fmt.Printf("\t%d: 0x%04X,\n", i, m) - } - } - fmt.Printf("}\n\n") - } - - // Any run of at least separation continuous zero entries in the reverse map will - // be a separate encode table. - const separation = 1024 - - intervals := []interval(nil) - low, high := -1, -1 - for i, v := range reverse { - if v.table == -1 { - continue - } - if low < 0 { - low = i - } else if i-high >= separation { - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - low = i - } - high = i + 1 - } - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - sort.Sort(byDecreasingLength(intervals)) - - fmt.Printf("const (\n") - fmt.Printf("\tjis0208 = 1\n") - fmt.Printf("\tjis0212 = 2\n") - fmt.Printf("\tcodeMask = 0x7f\n") - fmt.Printf("\tcodeShift = 7\n") - fmt.Printf("\ttableShift = 14\n") - fmt.Printf(")\n\n") - - fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) - fmt.Printf("// encodeX are the encoding tables from Unicode to JIS code,\n") - fmt.Printf("// sorted by decreasing length.\n") - for i, v := range intervals { - fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high) - } - fmt.Printf("//\n") - fmt.Printf("// The high two bits of the value record whether the JIS code comes from the\n") - fmt.Printf("// JIS0208 table (high bits == 1) or the JIS0212 table (high bits == 2).\n") - fmt.Printf("// The low 14 bits are two 7-bit unsigned integers j1 and j2 that form the\n") - fmt.Printf("// JIS code (94*j1 + j2) within that table.\n") - fmt.Printf("\n") - - for i, v := range intervals { - fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) - fmt.Printf("var encode%d = [...]uint16{\n", i) - for j := v.low; j < v.high; j++ { - x := reverse[j] - if x.table == -1 { - continue - } - fmt.Printf("\t%d - %d: jis%s<<14 | 0x%02X<<7 | 0x%02X,\n", - j, v.low, tables[x.table].name, x.jisCode/94, x.jisCode%94) - } - fmt.Printf("}\n\n") - } -} - -// interval is a half-open interval [low, high). -type interval struct { - low, high int -} - -func (i interval) len() int { return i.high - i.low } - -// byDecreasingLength sorts intervals by decreasing length. -type byDecreasingLength []interval - -func (b byDecreasingLength) Len() int { return len(b) } -func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } -func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/golang.org/x/text/encoding/korean/maketables.go b/vendor/golang.org/x/text/encoding/korean/maketables.go deleted file mode 100644 index c84034fb6..000000000 --- a/vendor/golang.org/x/text/encoding/korean/maketables.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates tables.go: -// go run maketables.go | gofmt > tables.go - -import ( - "bufio" - "fmt" - "log" - "net/http" - "sort" - "strings" -) - -func main() { - fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") - fmt.Printf("// Package korean provides Korean encodings such as EUC-KR.\n") - fmt.Printf(`package korean // import "golang.org/x/text/encoding/korean"` + "\n\n") - - res, err := http.Get("http://encoding.spec.whatwg.org/index-euc-kr.txt") - if err != nil { - log.Fatalf("Get: %v", err) - } - defer res.Body.Close() - - mapping := [65536]uint16{} - reverse := [65536]uint16{} - - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := uint16(0), uint16(0) - if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { - log.Fatalf("could not parse %q", s) - } - if x < 0 || 178*(0xc7-0x81)+(0xfe-0xc7)*94+(0xff-0xa1) <= x { - log.Fatalf("EUC-KR code %d is out of range", x) - } - mapping[x] = y - if reverse[y] == 0 { - c0, c1 := uint16(0), uint16(0) - if x < 178*(0xc7-0x81) { - c0 = uint16(x/178) + 0x81 - c1 = uint16(x % 178) - switch { - case c1 < 1*26: - c1 += 0x41 - case c1 < 2*26: - c1 += 0x47 - default: - c1 += 0x4d - } - } else { - x -= 178 * (0xc7 - 0x81) - c0 = uint16(x/94) + 0xc7 - c1 = uint16(x%94) + 0xa1 - } - reverse[y] = c0<<8 | c1 - } - } - if err := scanner.Err(); err != nil { - log.Fatalf("scanner error: %v", err) - } - - fmt.Printf("// decode is the decoding table from EUC-KR code to Unicode.\n") - fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-euc-kr.txt\n") - fmt.Printf("var decode = [...]uint16{\n") - for i, v := range mapping { - if v != 0 { - fmt.Printf("\t%d: 0x%04X,\n", i, v) - } - } - fmt.Printf("}\n\n") - - // Any run of at least separation continuous zero entries in the reverse map will - // be a separate encode table. - const separation = 1024 - - intervals := []interval(nil) - low, high := -1, -1 - for i, v := range reverse { - if v == 0 { - continue - } - if low < 0 { - low = i - } else if i-high >= separation { - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - low = i - } - high = i + 1 - } - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - sort.Sort(byDecreasingLength(intervals)) - - fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) - fmt.Printf("// encodeX are the encoding tables from Unicode to EUC-KR code,\n") - fmt.Printf("// sorted by decreasing length.\n") - for i, v := range intervals { - fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high) - } - fmt.Printf("\n") - - for i, v := range intervals { - fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) - fmt.Printf("var encode%d = [...]uint16{\n", i) - for j := v.low; j < v.high; j++ { - x := reverse[j] - if x == 0 { - continue - } - fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x) - } - fmt.Printf("}\n\n") - } -} - -// interval is a half-open interval [low, high). -type interval struct { - low, high int -} - -func (i interval) len() int { return i.high - i.low } - -// byDecreasingLength sorts intervals by decreasing length. -type byDecreasingLength []interval - -func (b byDecreasingLength) Len() int { return len(b) } -func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } -func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go b/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go deleted file mode 100644 index 55016c786..000000000 --- a/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates tables.go: -// go run maketables.go | gofmt > tables.go - -import ( - "bufio" - "fmt" - "log" - "net/http" - "sort" - "strings" -) - -func main() { - fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") - fmt.Printf("// Package simplifiedchinese provides Simplified Chinese encodings such as GBK.\n") - fmt.Printf(`package simplifiedchinese // import "golang.org/x/text/encoding/simplifiedchinese"` + "\n\n") - - printGB18030() - printGBK() -} - -func printGB18030() { - res, err := http.Get("http://encoding.spec.whatwg.org/index-gb18030.txt") - if err != nil { - log.Fatalf("Get: %v", err) - } - defer res.Body.Close() - - fmt.Printf("// gb18030 is the table from http://encoding.spec.whatwg.org/index-gb18030.txt\n") - fmt.Printf("var gb18030 = [...][2]uint16{\n") - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := uint32(0), uint32(0) - if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { - log.Fatalf("could not parse %q", s) - } - if x < 0x10000 && y < 0x10000 { - fmt.Printf("\t{0x%04x, 0x%04x},\n", x, y) - } - } - fmt.Printf("}\n\n") -} - -func printGBK() { - res, err := http.Get("http://encoding.spec.whatwg.org/index-gbk.txt") - if err != nil { - log.Fatalf("Get: %v", err) - } - defer res.Body.Close() - - mapping := [65536]uint16{} - reverse := [65536]uint16{} - - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := uint16(0), uint16(0) - if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { - log.Fatalf("could not parse %q", s) - } - if x < 0 || 126*190 <= x { - log.Fatalf("GBK code %d is out of range", x) - } - mapping[x] = y - if reverse[y] == 0 { - c0, c1 := x/190, x%190 - if c1 >= 0x3f { - c1++ - } - reverse[y] = (0x81+c0)<<8 | (0x40 + c1) - } - } - if err := scanner.Err(); err != nil { - log.Fatalf("scanner error: %v", err) - } - - fmt.Printf("// decode is the decoding table from GBK code to Unicode.\n") - fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-gbk.txt\n") - fmt.Printf("var decode = [...]uint16{\n") - for i, v := range mapping { - if v != 0 { - fmt.Printf("\t%d: 0x%04X,\n", i, v) - } - } - fmt.Printf("}\n\n") - - // Any run of at least separation continuous zero entries in the reverse map will - // be a separate encode table. - const separation = 1024 - - intervals := []interval(nil) - low, high := -1, -1 - for i, v := range reverse { - if v == 0 { - continue - } - if low < 0 { - low = i - } else if i-high >= separation { - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - low = i - } - high = i + 1 - } - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - sort.Sort(byDecreasingLength(intervals)) - - fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) - fmt.Printf("// encodeX are the encoding tables from Unicode to GBK code,\n") - fmt.Printf("// sorted by decreasing length.\n") - for i, v := range intervals { - fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high) - } - fmt.Printf("\n") - - for i, v := range intervals { - fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) - fmt.Printf("var encode%d = [...]uint16{\n", i) - for j := v.low; j < v.high; j++ { - x := reverse[j] - if x == 0 { - continue - } - fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x) - } - fmt.Printf("}\n\n") - } -} - -// interval is a half-open interval [low, high). -type interval struct { - low, high int -} - -func (i interval) len() int { return i.high - i.low } - -// byDecreasingLength sorts intervals by decreasing length. -type byDecreasingLength []interval - -func (b byDecreasingLength) Len() int { return len(b) } -func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } -func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go b/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go deleted file mode 100644 index cf7fdb31a..000000000 --- a/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates tables.go: -// go run maketables.go | gofmt > tables.go - -import ( - "bufio" - "fmt" - "log" - "net/http" - "sort" - "strings" -) - -func main() { - fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") - fmt.Printf("// Package traditionalchinese provides Traditional Chinese encodings such as Big5.\n") - fmt.Printf(`package traditionalchinese // import "golang.org/x/text/encoding/traditionalchinese"` + "\n\n") - - res, err := http.Get("http://encoding.spec.whatwg.org/index-big5.txt") - if err != nil { - log.Fatalf("Get: %v", err) - } - defer res.Body.Close() - - mapping := [65536]uint32{} - reverse := [65536 * 4]uint16{} - - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := uint16(0), uint32(0) - if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { - log.Fatalf("could not parse %q", s) - } - if x < 0 || 126*157 <= x { - log.Fatalf("Big5 code %d is out of range", x) - } - mapping[x] = y - - // The WHATWG spec http://encoding.spec.whatwg.org/#indexes says that - // "The index pointer for code point in index is the first pointer - // corresponding to code point in index", which would normally mean - // that the code below should be guarded by "if reverse[y] == 0", but - // last instead of first seems to match the behavior of - // "iconv -f UTF-8 -t BIG5". For example, U+8005 者 occurs twice in - // http://encoding.spec.whatwg.org/index-big5.txt, as index 2148 - // (encoded as "\x8e\xcd") and index 6543 (encoded as "\xaa\xcc") - // and "echo 者 | iconv -f UTF-8 -t BIG5 | xxd" gives "\xaa\xcc". - c0, c1 := x/157, x%157 - if c1 < 0x3f { - c1 += 0x40 - } else { - c1 += 0x62 - } - reverse[y] = (0x81+c0)<<8 | c1 - } - if err := scanner.Err(); err != nil { - log.Fatalf("scanner error: %v", err) - } - - fmt.Printf("// decode is the decoding table from Big5 code to Unicode.\n") - fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-big5.txt\n") - fmt.Printf("var decode = [...]uint32{\n") - for i, v := range mapping { - if v != 0 { - fmt.Printf("\t%d: 0x%08X,\n", i, v) - } - } - fmt.Printf("}\n\n") - - // Any run of at least separation continuous zero entries in the reverse map will - // be a separate encode table. - const separation = 1024 - - intervals := []interval(nil) - low, high := -1, -1 - for i, v := range reverse { - if v == 0 { - continue - } - if low < 0 { - low = i - } else if i-high >= separation { - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - low = i - } - high = i + 1 - } - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - sort.Sort(byDecreasingLength(intervals)) - - fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) - fmt.Printf("// encodeX are the encoding tables from Unicode to Big5 code,\n") - fmt.Printf("// sorted by decreasing length.\n") - for i, v := range intervals { - fmt.Printf("// encode%d: %5d entries for runes in [%6d, %6d).\n", i, v.len(), v.low, v.high) - } - fmt.Printf("\n") - - for i, v := range intervals { - fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) - fmt.Printf("var encode%d = [...]uint16{\n", i) - for j := v.low; j < v.high; j++ { - x := reverse[j] - if x == 0 { - continue - } - fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x) - } - fmt.Printf("}\n\n") - } -} - -// interval is a half-open interval [low, high). -type interval struct { - low, high int -} - -func (i interval) len() int { return i.high - i.low } - -// byDecreasingLength sorts intervals by decreasing length. -type byDecreasingLength []interval - -func (b byDecreasingLength) Len() int { return len(b) } -func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } -func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/golang.org/x/text/internal/language/compact/gen.go b/vendor/golang.org/x/text/internal/language/compact/gen.go deleted file mode 100644 index 0c36a052f..000000000 --- a/vendor/golang.org/x/text/internal/language/compact/gen.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Language tag table generator. -// Data read from the web. - -package main - -import ( - "flag" - "fmt" - "log" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/unicode/cldr" -) - -var ( - test = flag.Bool("test", - false, - "test existing tables; can be used to compare web data with package data.") - outputFile = flag.String("output", - "tables.go", - "output file for generated tables") -) - -func main() { - gen.Init() - - w := gen.NewCodeWriter() - defer w.WriteGoFile("tables.go", "compact") - - fmt.Fprintln(w, `import "golang.org/x/text/internal/language"`) - - b := newBuilder(w) - gen.WriteCLDRVersion(w) - - b.writeCompactIndex() -} - -type builder struct { - w *gen.CodeWriter - data *cldr.CLDR - supp *cldr.SupplementalData -} - -func newBuilder(w *gen.CodeWriter) *builder { - r := gen.OpenCLDRCoreZip() - defer r.Close() - d := &cldr.Decoder{} - data, err := d.DecodeZip(r) - if err != nil { - log.Fatal(err) - } - b := builder{ - w: w, - data: data, - supp: data.Supplemental(), - } - return &b -} diff --git a/vendor/golang.org/x/text/internal/language/compact/gen_index.go b/vendor/golang.org/x/text/internal/language/compact/gen_index.go deleted file mode 100644 index 136cefaf0..000000000 --- a/vendor/golang.org/x/text/internal/language/compact/gen_index.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This file generates derivative tables based on the language package itself. - -import ( - "fmt" - "log" - "sort" - "strings" - - "golang.org/x/text/internal/language" -) - -// Compact indices: -// Note -va-X variants only apply to localization variants. -// BCP variants only ever apply to language. -// The only ambiguity between tags is with regions. - -func (b *builder) writeCompactIndex() { - // Collect all language tags for which we have any data in CLDR. - m := map[language.Tag]bool{} - for _, lang := range b.data.Locales() { - // We include all locales unconditionally to be consistent with en_US. - // We want en_US, even though it has no data associated with it. - - // TODO: put any of the languages for which no data exists at the end - // of the index. This allows all components based on ICU to use that - // as the cutoff point. - // if x := data.RawLDML(lang); false || - // x.LocaleDisplayNames != nil || - // x.Characters != nil || - // x.Delimiters != nil || - // x.Measurement != nil || - // x.Dates != nil || - // x.Numbers != nil || - // x.Units != nil || - // x.ListPatterns != nil || - // x.Collations != nil || - // x.Segmentations != nil || - // x.Rbnf != nil || - // x.Annotations != nil || - // x.Metadata != nil { - - // TODO: support POSIX natively, albeit non-standard. - tag := language.Make(strings.Replace(lang, "_POSIX", "-u-va-posix", 1)) - m[tag] = true - // } - } - - // TODO: plural rules are also defined for the deprecated tags: - // iw mo sh tl - // Consider removing these as compact tags. - - // Include locales for plural rules, which uses a different structure. - for _, plurals := range b.supp.Plurals { - for _, rules := range plurals.PluralRules { - for _, lang := range strings.Split(rules.Locales, " ") { - m[language.Make(lang)] = true - } - } - } - - var coreTags []language.CompactCoreInfo - var special []string - - for t := range m { - if x := t.Extensions(); len(x) != 0 && fmt.Sprint(x) != "[u-va-posix]" { - log.Fatalf("Unexpected extension %v in %v", x, t) - } - if len(t.Variants()) == 0 && len(t.Extensions()) == 0 { - cci, ok := language.GetCompactCore(t) - if !ok { - log.Fatalf("Locale for non-basic language %q", t) - } - coreTags = append(coreTags, cci) - } else { - special = append(special, t.String()) - } - } - - w := b.w - - sort.Slice(coreTags, func(i, j int) bool { return coreTags[i] < coreTags[j] }) - sort.Strings(special) - - w.WriteComment(` - NumCompactTags is the number of common tags. The maximum tag is - NumCompactTags-1.`) - w.WriteConst("NumCompactTags", len(m)) - - fmt.Fprintln(w, "const (") - for i, t := range coreTags { - fmt.Fprintf(w, "%s ID = %d\n", ident(t.Tag().String()), i) - } - for i, t := range special { - fmt.Fprintf(w, "%s ID = %d\n", ident(t), i+len(coreTags)) - } - fmt.Fprintln(w, ")") - - w.WriteVar("coreTags", coreTags) - - w.WriteConst("specialTagsStr", strings.Join(special, " ")) -} - -func ident(s string) string { - return strings.Replace(s, "-", "", -1) + "Index" -} diff --git a/vendor/golang.org/x/text/internal/language/compact/gen_parents.go b/vendor/golang.org/x/text/internal/language/compact/gen_parents.go deleted file mode 100644 index 9543d5832..000000000 --- a/vendor/golang.org/x/text/internal/language/compact/gen_parents.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "log" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/language" - "golang.org/x/text/internal/language/compact" - "golang.org/x/text/unicode/cldr" -) - -func main() { - r := gen.OpenCLDRCoreZip() - defer r.Close() - - d := &cldr.Decoder{} - data, err := d.DecodeZip(r) - if err != nil { - log.Fatalf("DecodeZip: %v", err) - } - - w := gen.NewCodeWriter() - defer w.WriteGoFile("parents.go", "compact") - - // Create parents table. - type ID uint16 - parents := make([]ID, compact.NumCompactTags) - for _, loc := range data.Locales() { - tag := language.MustParse(loc) - index, ok := compact.FromTag(tag) - if !ok { - continue - } - parentIndex := compact.ID(0) // und - for p := tag.Parent(); p != language.Und; p = p.Parent() { - if x, ok := compact.FromTag(p); ok { - parentIndex = x - break - } - } - parents[index] = ID(parentIndex) - } - - w.WriteComment(` - parents maps a compact index of a tag to the compact index of the parent of - this tag.`) - w.WriteVar("parents", parents) -} diff --git a/vendor/golang.org/x/text/internal/language/gen.go b/vendor/golang.org/x/text/internal/language/gen.go deleted file mode 100644 index cdcc7febc..000000000 --- a/vendor/golang.org/x/text/internal/language/gen.go +++ /dev/null @@ -1,1520 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Language tag table generator. -// Data read from the web. - -package main - -import ( - "bufio" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "math" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/tag" - "golang.org/x/text/unicode/cldr" -) - -var ( - test = flag.Bool("test", - false, - "test existing tables; can be used to compare web data with package data.") - outputFile = flag.String("output", - "tables.go", - "output file for generated tables") -) - -var comment = []string{ - ` -lang holds an alphabetically sorted list of ISO-639 language identifiers. -All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag. -For 2-byte language identifiers, the two successive bytes have the following meaning: - - if the first letter of the 2- and 3-letter ISO codes are the same: - the second and third letter of the 3-letter ISO code. - - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3. -For 3-byte language identifiers the 4th byte is 0.`, - ` -langNoIndex is a bit vector of all 3-letter language codes that are not used as an index -in lookup tables. The language ids for these language codes are derived directly -from the letters and are not consecutive.`, - ` -altLangISO3 holds an alphabetically sorted list of 3-letter language code alternatives -to 2-letter language codes that cannot be derived using the method described above. -Each 3-letter code is followed by its 1-byte langID.`, - ` -altLangIndex is used to convert indexes in altLangISO3 to langIDs.`, - ` -AliasMap maps langIDs to their suggested replacements.`, - ` -script is an alphabetically sorted list of ISO 15924 codes. The index -of the script in the string, divided by 4, is the internal scriptID.`, - ` -isoRegionOffset needs to be added to the index of regionISO to obtain the regionID -for 2-letter ISO codes. (The first isoRegionOffset regionIDs are reserved for -the UN.M49 codes used for groups.)`, - ` -regionISO holds a list of alphabetically sorted 2-letter ISO region codes. -Each 2-letter codes is followed by two bytes with the following meaning: - - [A-Z}{2}: the first letter of the 2-letter code plus these two - letters form the 3-letter ISO code. - - 0, n: index into altRegionISO3.`, - ` -regionTypes defines the status of a region for various standards.`, - ` -m49 maps regionIDs to UN.M49 codes. The first isoRegionOffset entries are -codes indicating collections of regions.`, - ` -m49Index gives indexes into fromM49 based on the three most significant bits -of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in - fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]] -for an entry where the first 7 bits match the 7 lsb of the UN.M49 code. -The region code is stored in the 9 lsb of the indexed value.`, - ` -fromM49 contains entries to map UN.M49 codes to regions. See m49Index for details.`, - ` -altRegionISO3 holds a list of 3-letter region codes that cannot be -mapped to 2-letter codes using the default algorithm. This is a short list.`, - ` -altRegionIDs holds a list of regionIDs the positions of which match those -of the 3-letter ISO codes in altRegionISO3.`, - ` -variantNumSpecialized is the number of specialized variants in variants.`, - ` -suppressScript is an index from langID to the dominant script for that language, -if it exists. If a script is given, it should be suppressed from the language tag.`, - ` -likelyLang is a lookup table, indexed by langID, for the most likely -scripts and regions given incomplete information. If more entries exist for a -given language, region and script are the index and size respectively -of the list in likelyLangList.`, - ` -likelyLangList holds lists info associated with likelyLang.`, - ` -likelyRegion is a lookup table, indexed by regionID, for the most likely -languages and scripts given incomplete information. If more entries exist -for a given regionID, lang and script are the index and size respectively -of the list in likelyRegionList. -TODO: exclude containers and user-definable regions from the list.`, - ` -likelyRegionList holds lists info associated with likelyRegion.`, - ` -likelyScript is a lookup table, indexed by scriptID, for the most likely -languages and regions given a script.`, - ` -nRegionGroups is the number of region groups.`, - ` -regionInclusion maps region identifiers to sets of regions in regionInclusionBits, -where each set holds all groupings that are directly connected in a region -containment graph.`, - ` -regionInclusionBits is an array of bit vectors where every vector represents -a set of region groupings. These sets are used to compute the distance -between two regions for the purpose of language matching.`, - ` -regionInclusionNext marks, for each entry in regionInclusionBits, the set of -all groups that are reachable from the groups set in the respective entry.`, -} - -// TODO: consider changing some of these structures to tries. This can reduce -// memory, but may increase the need for memory allocations. This could be -// mitigated if we can piggyback on language tags for common cases. - -func failOnError(e error) { - if e != nil { - log.Panic(e) - } -} - -type setType int - -const ( - Indexed setType = 1 + iota // all elements must be of same size - Linear -) - -type stringSet struct { - s []string - sorted, frozen bool - - // We often need to update values after the creation of an index is completed. - // We include a convenience map for keeping track of this. - update map[string]string - typ setType // used for checking. -} - -func (ss *stringSet) clone() stringSet { - c := *ss - c.s = append([]string(nil), c.s...) - return c -} - -func (ss *stringSet) setType(t setType) { - if ss.typ != t && ss.typ != 0 { - log.Panicf("type %d cannot be assigned as it was already %d", t, ss.typ) - } -} - -// parse parses a whitespace-separated string and initializes ss with its -// components. -func (ss *stringSet) parse(s string) { - scan := bufio.NewScanner(strings.NewReader(s)) - scan.Split(bufio.ScanWords) - for scan.Scan() { - ss.add(scan.Text()) - } -} - -func (ss *stringSet) assertChangeable() { - if ss.frozen { - log.Panic("attempt to modify a frozen stringSet") - } -} - -func (ss *stringSet) add(s string) { - ss.assertChangeable() - ss.s = append(ss.s, s) - ss.sorted = ss.frozen -} - -func (ss *stringSet) freeze() { - ss.compact() - ss.frozen = true -} - -func (ss *stringSet) compact() { - if ss.sorted { - return - } - a := ss.s - sort.Strings(a) - k := 0 - for i := 1; i < len(a); i++ { - if a[k] != a[i] { - a[k+1] = a[i] - k++ - } - } - ss.s = a[:k+1] - ss.sorted = ss.frozen -} - -type funcSorter struct { - fn func(a, b string) bool - sort.StringSlice -} - -func (s funcSorter) Less(i, j int) bool { - return s.fn(s.StringSlice[i], s.StringSlice[j]) -} - -func (ss *stringSet) sortFunc(f func(a, b string) bool) { - ss.compact() - sort.Sort(funcSorter{f, sort.StringSlice(ss.s)}) -} - -func (ss *stringSet) remove(s string) { - ss.assertChangeable() - if i, ok := ss.find(s); ok { - copy(ss.s[i:], ss.s[i+1:]) - ss.s = ss.s[:len(ss.s)-1] - } -} - -func (ss *stringSet) replace(ol, nu string) { - ss.s[ss.index(ol)] = nu - ss.sorted = ss.frozen -} - -func (ss *stringSet) index(s string) int { - ss.setType(Indexed) - i, ok := ss.find(s) - if !ok { - if i < len(ss.s) { - log.Panicf("find: item %q is not in list. Closest match is %q.", s, ss.s[i]) - } - log.Panicf("find: item %q is not in list", s) - - } - return i -} - -func (ss *stringSet) find(s string) (int, bool) { - ss.compact() - i := sort.SearchStrings(ss.s, s) - return i, i != len(ss.s) && ss.s[i] == s -} - -func (ss *stringSet) slice() []string { - ss.compact() - return ss.s -} - -func (ss *stringSet) updateLater(v, key string) { - if ss.update == nil { - ss.update = map[string]string{} - } - ss.update[v] = key -} - -// join joins the string and ensures that all entries are of the same length. -func (ss *stringSet) join() string { - ss.setType(Indexed) - n := len(ss.s[0]) - for _, s := range ss.s { - if len(s) != n { - log.Panicf("join: not all entries are of the same length: %q", s) - } - } - ss.s = append(ss.s, strings.Repeat("\xff", n)) - return strings.Join(ss.s, "") -} - -// ianaEntry holds information for an entry in the IANA Language Subtag Repository. -// All types use the same entry. -// See http://tools.ietf.org/html/bcp47#section-5.1 for a description of the various -// fields. -type ianaEntry struct { - typ string - description []string - scope string - added string - preferred string - deprecated string - suppressScript string - macro string - prefix []string -} - -type builder struct { - w *gen.CodeWriter - hw io.Writer // MultiWriter for w and w.Hash - data *cldr.CLDR - supp *cldr.SupplementalData - - // indices - locale stringSet // common locales - lang stringSet // canonical language ids (2 or 3 letter ISO codes) with data - langNoIndex stringSet // 3-letter ISO codes with no associated data - script stringSet // 4-letter ISO codes - region stringSet // 2-letter ISO or 3-digit UN M49 codes - variant stringSet // 4-8-alphanumeric variant code. - - // Region codes that are groups with their corresponding group IDs. - groups map[int]index - - // langInfo - registry map[string]*ianaEntry -} - -type index uint - -func newBuilder(w *gen.CodeWriter) *builder { - r := gen.OpenCLDRCoreZip() - defer r.Close() - d := &cldr.Decoder{} - data, err := d.DecodeZip(r) - failOnError(err) - b := builder{ - w: w, - hw: io.MultiWriter(w, w.Hash), - data: data, - supp: data.Supplemental(), - } - b.parseRegistry() - return &b -} - -func (b *builder) parseRegistry() { - r := gen.OpenIANAFile("assignments/language-subtag-registry") - defer r.Close() - b.registry = make(map[string]*ianaEntry) - - scan := bufio.NewScanner(r) - scan.Split(bufio.ScanWords) - var record *ianaEntry - for more := scan.Scan(); more; { - key := scan.Text() - more = scan.Scan() - value := scan.Text() - switch key { - case "Type:": - record = &ianaEntry{typ: value} - case "Subtag:", "Tag:": - if s := strings.SplitN(value, "..", 2); len(s) > 1 { - for a := s[0]; a <= s[1]; a = inc(a) { - b.addToRegistry(a, record) - } - } else { - b.addToRegistry(value, record) - } - case "Suppress-Script:": - record.suppressScript = value - case "Added:": - record.added = value - case "Deprecated:": - record.deprecated = value - case "Macrolanguage:": - record.macro = value - case "Preferred-Value:": - record.preferred = value - case "Prefix:": - record.prefix = append(record.prefix, value) - case "Scope:": - record.scope = value - case "Description:": - buf := []byte(value) - for more = scan.Scan(); more; more = scan.Scan() { - b := scan.Bytes() - if b[0] == '%' || b[len(b)-1] == ':' { - break - } - buf = append(buf, ' ') - buf = append(buf, b...) - } - record.description = append(record.description, string(buf)) - continue - default: - continue - } - more = scan.Scan() - } - if scan.Err() != nil { - log.Panic(scan.Err()) - } -} - -func (b *builder) addToRegistry(key string, entry *ianaEntry) { - if info, ok := b.registry[key]; ok { - if info.typ != "language" || entry.typ != "extlang" { - log.Fatalf("parseRegistry: tag %q already exists", key) - } - } else { - b.registry[key] = entry - } -} - -var commentIndex = make(map[string]string) - -func init() { - for _, s := range comment { - key := strings.TrimSpace(strings.SplitN(s, " ", 2)[0]) - commentIndex[key] = s - } -} - -func (b *builder) comment(name string) { - if s := commentIndex[name]; len(s) > 0 { - b.w.WriteComment(s) - } else { - fmt.Fprintln(b.w) - } -} - -func (b *builder) pf(f string, x ...interface{}) { - fmt.Fprintf(b.hw, f, x...) - fmt.Fprint(b.hw, "\n") -} - -func (b *builder) p(x ...interface{}) { - fmt.Fprintln(b.hw, x...) -} - -func (b *builder) addSize(s int) { - b.w.Size += s - b.pf("// Size: %d bytes", s) -} - -func (b *builder) writeConst(name string, x interface{}) { - b.comment(name) - b.w.WriteConst(name, x) -} - -// writeConsts computes f(v) for all v in values and writes the results -// as constants named _v to a single constant block. -func (b *builder) writeConsts(f func(string) int, values ...string) { - b.pf("const (") - for _, v := range values { - b.pf("\t_%s = %v", v, f(v)) - } - b.pf(")") -} - -// writeType writes the type of the given value, which must be a struct. -func (b *builder) writeType(value interface{}) { - b.comment(reflect.TypeOf(value).Name()) - b.w.WriteType(value) -} - -func (b *builder) writeSlice(name string, ss interface{}) { - b.writeSliceAddSize(name, 0, ss) -} - -func (b *builder) writeSliceAddSize(name string, extraSize int, ss interface{}) { - b.comment(name) - b.w.Size += extraSize - v := reflect.ValueOf(ss) - t := v.Type().Elem() - b.pf("// Size: %d bytes, %d elements", v.Len()*int(t.Size())+extraSize, v.Len()) - - fmt.Fprintf(b.w, "var %s = ", name) - b.w.WriteArray(ss) - b.p() -} - -type FromTo struct { - From, To uint16 -} - -func (b *builder) writeSortedMap(name string, ss *stringSet, index func(s string) uint16) { - ss.sortFunc(func(a, b string) bool { - return index(a) < index(b) - }) - m := []FromTo{} - for _, s := range ss.s { - m = append(m, FromTo{index(s), index(ss.update[s])}) - } - b.writeSlice(name, m) -} - -const base = 'z' - 'a' + 1 - -func strToInt(s string) uint { - v := uint(0) - for i := 0; i < len(s); i++ { - v *= base - v += uint(s[i] - 'a') - } - return v -} - -// converts the given integer to the original ASCII string passed to strToInt. -// len(s) must match the number of characters obtained. -func intToStr(v uint, s []byte) { - for i := len(s) - 1; i >= 0; i-- { - s[i] = byte(v%base) + 'a' - v /= base - } -} - -func (b *builder) writeBitVector(name string, ss []string) { - vec := make([]uint8, int(math.Ceil(math.Pow(base, float64(len(ss[0])))/8))) - for _, s := range ss { - v := strToInt(s) - vec[v/8] |= 1 << (v % 8) - } - b.writeSlice(name, vec) -} - -// TODO: convert this type into a list or two-stage trie. -func (b *builder) writeMapFunc(name string, m map[string]string, f func(string) uint16) { - b.comment(name) - v := reflect.ValueOf(m) - sz := v.Len() * (2 + int(v.Type().Key().Size())) - for _, k := range m { - sz += len(k) - } - b.addSize(sz) - keys := []string{} - b.pf(`var %s = map[string]uint16{`, name) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - b.pf("\t%q: %v,", k, f(m[k])) - } - b.p("}") -} - -func (b *builder) writeMap(name string, m interface{}) { - b.comment(name) - v := reflect.ValueOf(m) - sz := v.Len() * (2 + int(v.Type().Key().Size()) + int(v.Type().Elem().Size())) - b.addSize(sz) - f := strings.FieldsFunc(fmt.Sprintf("%#v", m), func(r rune) bool { - return strings.IndexRune("{}, ", r) != -1 - }) - sort.Strings(f[1:]) - b.pf(`var %s = %s{`, name, f[0]) - for _, kv := range f[1:] { - b.pf("\t%s,", kv) - } - b.p("}") -} - -func (b *builder) langIndex(s string) uint16 { - if s == "und" { - return 0 - } - if i, ok := b.lang.find(s); ok { - return uint16(i) - } - return uint16(strToInt(s)) + uint16(len(b.lang.s)) -} - -// inc advances the string to its lexicographical successor. -func inc(s string) string { - const maxTagLength = 4 - var buf [maxTagLength]byte - intToStr(strToInt(strings.ToLower(s))+1, buf[:len(s)]) - for i := 0; i < len(s); i++ { - if s[i] <= 'Z' { - buf[i] -= 'a' - 'A' - } - } - return string(buf[:len(s)]) -} - -func (b *builder) parseIndices() { - meta := b.supp.Metadata - - for k, v := range b.registry { - var ss *stringSet - switch v.typ { - case "language": - if len(k) == 2 || v.suppressScript != "" || v.scope == "special" { - b.lang.add(k) - continue - } else { - ss = &b.langNoIndex - } - case "region": - ss = &b.region - case "script": - ss = &b.script - case "variant": - ss = &b.variant - default: - continue - } - ss.add(k) - } - // Include any language for which there is data. - for _, lang := range b.data.Locales() { - if x := b.data.RawLDML(lang); false || - x.LocaleDisplayNames != nil || - x.Characters != nil || - x.Delimiters != nil || - x.Measurement != nil || - x.Dates != nil || - x.Numbers != nil || - x.Units != nil || - x.ListPatterns != nil || - x.Collations != nil || - x.Segmentations != nil || - x.Rbnf != nil || - x.Annotations != nil || - x.Metadata != nil { - - from := strings.Split(lang, "_") - if lang := from[0]; lang != "root" { - b.lang.add(lang) - } - } - } - // Include locales for plural rules, which uses a different structure. - for _, plurals := range b.data.Supplemental().Plurals { - for _, rules := range plurals.PluralRules { - for _, lang := range strings.Split(rules.Locales, " ") { - if lang = strings.Split(lang, "_")[0]; lang != "root" { - b.lang.add(lang) - } - } - } - } - // Include languages in likely subtags. - for _, m := range b.supp.LikelySubtags.LikelySubtag { - from := strings.Split(m.From, "_") - b.lang.add(from[0]) - } - // Include ISO-639 alpha-3 bibliographic entries. - for _, a := range meta.Alias.LanguageAlias { - if a.Reason == "bibliographic" { - b.langNoIndex.add(a.Type) - } - } - // Include regions in territoryAlias (not all are in the IANA registry!) - for _, reg := range b.supp.Metadata.Alias.TerritoryAlias { - if len(reg.Type) == 2 { - b.region.add(reg.Type) - } - } - - for _, s := range b.lang.s { - if len(s) == 3 { - b.langNoIndex.remove(s) - } - } - b.writeConst("NumLanguages", len(b.lang.slice())+len(b.langNoIndex.slice())) - b.writeConst("NumScripts", len(b.script.slice())) - b.writeConst("NumRegions", len(b.region.slice())) - - // Add dummy codes at the start of each list to represent "unspecified". - b.lang.add("---") - b.script.add("----") - b.region.add("---") - - // common locales - b.locale.parse(meta.DefaultContent.Locales) -} - -// TODO: region inclusion data will probably not be use used in future matchers. - -func (b *builder) computeRegionGroups() { - b.groups = make(map[int]index) - - // Create group indices. - for i := 1; b.region.s[i][0] < 'A'; i++ { // Base M49 indices on regionID. - b.groups[i] = index(len(b.groups)) - } - for _, g := range b.supp.TerritoryContainment.Group { - // Skip UN and EURO zone as they are flattening the containment - // relationship. - if g.Type == "EZ" || g.Type == "UN" { - continue - } - group := b.region.index(g.Type) - if _, ok := b.groups[group]; !ok { - b.groups[group] = index(len(b.groups)) - } - } - if len(b.groups) > 64 { - log.Fatalf("only 64 groups supported, found %d", len(b.groups)) - } - b.writeConst("nRegionGroups", len(b.groups)) -} - -var langConsts = []string{ - "af", "am", "ar", "az", "bg", "bn", "ca", "cs", "da", "de", "el", "en", "es", - "et", "fa", "fi", "fil", "fr", "gu", "he", "hi", "hr", "hu", "hy", "id", "is", - "it", "ja", "ka", "kk", "km", "kn", "ko", "ky", "lo", "lt", "lv", "mk", "ml", - "mn", "mo", "mr", "ms", "mul", "my", "nb", "ne", "nl", "no", "pa", "pl", "pt", - "ro", "ru", "sh", "si", "sk", "sl", "sq", "sr", "sv", "sw", "ta", "te", "th", - "tl", "tn", "tr", "uk", "ur", "uz", "vi", "zh", "zu", - - // constants for grandfathered tags (if not already defined) - "jbo", "ami", "bnn", "hak", "tlh", "lb", "nv", "pwn", "tao", "tay", "tsu", - "nn", "sfb", "vgt", "sgg", "cmn", "nan", "hsn", -} - -// writeLanguage generates all tables needed for language canonicalization. -func (b *builder) writeLanguage() { - meta := b.supp.Metadata - - b.writeConst("nonCanonicalUnd", b.lang.index("und")) - b.writeConsts(func(s string) int { return int(b.langIndex(s)) }, langConsts...) - b.writeConst("langPrivateStart", b.langIndex("qaa")) - b.writeConst("langPrivateEnd", b.langIndex("qtz")) - - // Get language codes that need to be mapped (overlong 3-letter codes, - // deprecated 2-letter codes, legacy and grandfathered tags.) - langAliasMap := stringSet{} - aliasTypeMap := map[string]AliasType{} - - // altLangISO3 get the alternative ISO3 names that need to be mapped. - altLangISO3 := stringSet{} - // Add dummy start to avoid the use of index 0. - altLangISO3.add("---") - altLangISO3.updateLater("---", "aa") - - lang := b.lang.clone() - for _, a := range meta.Alias.LanguageAlias { - if a.Replacement == "" { - a.Replacement = "und" - } - // TODO: support mapping to tags - repl := strings.SplitN(a.Replacement, "_", 2)[0] - if a.Reason == "overlong" { - if len(a.Replacement) == 2 && len(a.Type) == 3 { - lang.updateLater(a.Replacement, a.Type) - } - } else if len(a.Type) <= 3 { - switch a.Reason { - case "macrolanguage": - aliasTypeMap[a.Type] = Macro - case "deprecated": - // handled elsewhere - continue - case "bibliographic", "legacy": - if a.Type == "no" { - continue - } - aliasTypeMap[a.Type] = Legacy - default: - log.Fatalf("new %s alias: %s", a.Reason, a.Type) - } - langAliasMap.add(a.Type) - langAliasMap.updateLater(a.Type, repl) - } - } - // Manually add the mapping of "nb" (Norwegian) to its macro language. - // This can be removed if CLDR adopts this change. - langAliasMap.add("nb") - langAliasMap.updateLater("nb", "no") - aliasTypeMap["nb"] = Macro - - for k, v := range b.registry { - // Also add deprecated values for 3-letter ISO codes, which CLDR omits. - if v.typ == "language" && v.deprecated != "" && v.preferred != "" { - langAliasMap.add(k) - langAliasMap.updateLater(k, v.preferred) - aliasTypeMap[k] = Deprecated - } - } - // Fix CLDR mappings. - lang.updateLater("tl", "tgl") - lang.updateLater("sh", "hbs") - lang.updateLater("mo", "mol") - lang.updateLater("no", "nor") - lang.updateLater("tw", "twi") - lang.updateLater("nb", "nob") - lang.updateLater("ak", "aka") - lang.updateLater("bh", "bih") - - // Ensure that each 2-letter code is matched with a 3-letter code. - for _, v := range lang.s[1:] { - s, ok := lang.update[v] - if !ok { - if s, ok = lang.update[langAliasMap.update[v]]; !ok { - continue - } - lang.update[v] = s - } - if v[0] != s[0] { - altLangISO3.add(s) - altLangISO3.updateLater(s, v) - } - } - - // Complete canonicalized language tags. - lang.freeze() - for i, v := range lang.s { - // We can avoid these manual entries by using the IANA registry directly. - // Seems easier to update the list manually, as changes are rare. - // The panic in this loop will trigger if we miss an entry. - add := "" - if s, ok := lang.update[v]; ok { - if s[0] == v[0] { - add = s[1:] - } else { - add = string([]byte{0, byte(altLangISO3.index(s))}) - } - } else if len(v) == 3 { - add = "\x00" - } else { - log.Panicf("no data for long form of %q", v) - } - lang.s[i] += add - } - b.writeConst("lang", tag.Index(lang.join())) - - b.writeConst("langNoIndexOffset", len(b.lang.s)) - - // space of all valid 3-letter language identifiers. - b.writeBitVector("langNoIndex", b.langNoIndex.slice()) - - altLangIndex := []uint16{} - for i, s := range altLangISO3.slice() { - altLangISO3.s[i] += string([]byte{byte(len(altLangIndex))}) - if i > 0 { - idx := b.lang.index(altLangISO3.update[s]) - altLangIndex = append(altLangIndex, uint16(idx)) - } - } - b.writeConst("altLangISO3", tag.Index(altLangISO3.join())) - b.writeSlice("altLangIndex", altLangIndex) - - b.writeSortedMap("AliasMap", &langAliasMap, b.langIndex) - types := make([]AliasType, len(langAliasMap.s)) - for i, s := range langAliasMap.s { - types[i] = aliasTypeMap[s] - } - b.writeSlice("AliasTypes", types) -} - -var scriptConsts = []string{ - "Latn", "Hani", "Hans", "Hant", "Qaaa", "Qaai", "Qabx", "Zinh", "Zyyy", - "Zzzz", -} - -func (b *builder) writeScript() { - b.writeConsts(b.script.index, scriptConsts...) - b.writeConst("script", tag.Index(b.script.join())) - - supp := make([]uint8, len(b.lang.slice())) - for i, v := range b.lang.slice()[1:] { - if sc := b.registry[v].suppressScript; sc != "" { - supp[i+1] = uint8(b.script.index(sc)) - } - } - b.writeSlice("suppressScript", supp) - - // There is only one deprecated script in CLDR. This value is hard-coded. - // We check here if the code must be updated. - for _, a := range b.supp.Metadata.Alias.ScriptAlias { - if a.Type != "Qaai" { - log.Panicf("unexpected deprecated stript %q", a.Type) - } - } -} - -func parseM49(s string) int16 { - if len(s) == 0 { - return 0 - } - v, err := strconv.ParseUint(s, 10, 10) - failOnError(err) - return int16(v) -} - -var regionConsts = []string{ - "001", "419", "BR", "CA", "ES", "GB", "MD", "PT", "UK", "US", - "ZZ", "XA", "XC", "XK", // Unofficial tag for Kosovo. -} - -func (b *builder) writeRegion() { - b.writeConsts(b.region.index, regionConsts...) - - isoOffset := b.region.index("AA") - m49map := make([]int16, len(b.region.slice())) - fromM49map := make(map[int16]int) - altRegionISO3 := "" - altRegionIDs := []uint16{} - - b.writeConst("isoRegionOffset", isoOffset) - - // 2-letter region lookup and mapping to numeric codes. - regionISO := b.region.clone() - regionISO.s = regionISO.s[isoOffset:] - regionISO.sorted = false - - regionTypes := make([]byte, len(b.region.s)) - - // Is the region valid BCP 47? - for s, e := range b.registry { - if len(s) == 2 && s == strings.ToUpper(s) { - i := b.region.index(s) - for _, d := range e.description { - if strings.Contains(d, "Private use") { - regionTypes[i] = iso3166UserAssigned - } - } - regionTypes[i] |= bcp47Region - } - } - - // Is the region a valid ccTLD? - r := gen.OpenIANAFile("domains/root/db") - defer r.Close() - - buf, err := ioutil.ReadAll(r) - failOnError(err) - re := regexp.MustCompile(`"/domains/root/db/([a-z]{2}).html"`) - for _, m := range re.FindAllSubmatch(buf, -1) { - i := b.region.index(strings.ToUpper(string(m[1]))) - regionTypes[i] |= ccTLD - } - - b.writeSlice("regionTypes", regionTypes) - - iso3Set := make(map[string]int) - update := func(iso2, iso3 string) { - i := regionISO.index(iso2) - if j, ok := iso3Set[iso3]; !ok && iso3[0] == iso2[0] { - regionISO.s[i] += iso3[1:] - iso3Set[iso3] = -1 - } else { - if ok && j >= 0 { - regionISO.s[i] += string([]byte{0, byte(j)}) - } else { - iso3Set[iso3] = len(altRegionISO3) - regionISO.s[i] += string([]byte{0, byte(len(altRegionISO3))}) - altRegionISO3 += iso3 - altRegionIDs = append(altRegionIDs, uint16(isoOffset+i)) - } - } - } - for _, tc := range b.supp.CodeMappings.TerritoryCodes { - i := regionISO.index(tc.Type) + isoOffset - if d := m49map[i]; d != 0 { - log.Panicf("%s found as a duplicate UN.M49 code of %03d", tc.Numeric, d) - } - m49 := parseM49(tc.Numeric) - m49map[i] = m49 - if r := fromM49map[m49]; r == 0 { - fromM49map[m49] = i - } else if r != i { - dep := b.registry[regionISO.s[r-isoOffset]].deprecated - if t := b.registry[tc.Type]; t != nil && dep != "" && (t.deprecated == "" || t.deprecated > dep) { - fromM49map[m49] = i - } - } - } - for _, ta := range b.supp.Metadata.Alias.TerritoryAlias { - if len(ta.Type) == 3 && ta.Type[0] <= '9' && len(ta.Replacement) == 2 { - from := parseM49(ta.Type) - if r := fromM49map[from]; r == 0 { - fromM49map[from] = regionISO.index(ta.Replacement) + isoOffset - } - } - } - for _, tc := range b.supp.CodeMappings.TerritoryCodes { - if len(tc.Alpha3) == 3 { - update(tc.Type, tc.Alpha3) - } - } - // This entries are not included in territoryCodes. Mostly 3-letter variants - // of deleted codes and an entry for QU. - for _, m := range []struct{ iso2, iso3 string }{ - {"CT", "CTE"}, - {"DY", "DHY"}, - {"HV", "HVO"}, - {"JT", "JTN"}, - {"MI", "MID"}, - {"NH", "NHB"}, - {"NQ", "ATN"}, - {"PC", "PCI"}, - {"PU", "PUS"}, - {"PZ", "PCZ"}, - {"RH", "RHO"}, - {"VD", "VDR"}, - {"WK", "WAK"}, - // These three-letter codes are used for others as well. - {"FQ", "ATF"}, - } { - update(m.iso2, m.iso3) - } - for i, s := range regionISO.s { - if len(s) != 4 { - regionISO.s[i] = s + " " - } - } - b.writeConst("regionISO", tag.Index(regionISO.join())) - b.writeConst("altRegionISO3", altRegionISO3) - b.writeSlice("altRegionIDs", altRegionIDs) - - // Create list of deprecated regions. - // TODO: consider inserting SF -> FI. Not included by CLDR, but is the only - // Transitionally-reserved mapping not included. - regionOldMap := stringSet{} - // Include regions in territoryAlias (not all are in the IANA registry!) - for _, reg := range b.supp.Metadata.Alias.TerritoryAlias { - if len(reg.Type) == 2 && reg.Reason == "deprecated" && len(reg.Replacement) == 2 { - regionOldMap.add(reg.Type) - regionOldMap.updateLater(reg.Type, reg.Replacement) - i, _ := regionISO.find(reg.Type) - j, _ := regionISO.find(reg.Replacement) - if k := m49map[i+isoOffset]; k == 0 { - m49map[i+isoOffset] = m49map[j+isoOffset] - } - } - } - b.writeSortedMap("regionOldMap", ®ionOldMap, func(s string) uint16 { - return uint16(b.region.index(s)) - }) - // 3-digit region lookup, groupings. - for i := 1; i < isoOffset; i++ { - m := parseM49(b.region.s[i]) - m49map[i] = m - fromM49map[m] = i - } - b.writeSlice("m49", m49map) - - const ( - searchBits = 7 - regionBits = 9 - ) - if len(m49map) >= 1< %d", len(m49map), 1<>searchBits] = int16(len(fromM49)) - } - b.writeSlice("m49Index", m49Index) - b.writeSlice("fromM49", fromM49) -} - -const ( - // TODO: put these lists in regionTypes as user data? Could be used for - // various optimizations and refinements and could be exposed in the API. - iso3166Except = "AC CP DG EA EU FX IC SU TA UK" - iso3166Trans = "AN BU CS NT TP YU ZR" // SF is not in our set of Regions. - // DY and RH are actually not deleted, but indeterminately reserved. - iso3166DelCLDR = "CT DD DY FQ HV JT MI NH NQ PC PU PZ RH VD WK YD" -) - -const ( - iso3166UserAssigned = 1 << iota - ccTLD - bcp47Region -) - -func find(list []string, s string) int { - for i, t := range list { - if t == s { - return i - } - } - return -1 -} - -// writeVariants generates per-variant information and creates a map from variant -// name to index value. We assign index values such that sorting multiple -// variants by index value will result in the correct order. -// There are two types of variants: specialized and general. Specialized variants -// are only applicable to certain language or language-script pairs. Generalized -// variants apply to any language. Generalized variants always sort after -// specialized variants. We will therefore always assign a higher index value -// to a generalized variant than any other variant. Generalized variants are -// sorted alphabetically among themselves. -// Specialized variants may also sort after other specialized variants. Such -// variants will be ordered after any of the variants they may follow. -// We assume that if a variant x is followed by a variant y, then for any prefix -// p of x, p-x is a prefix of y. This allows us to order tags based on the -// maximum of the length of any of its prefixes. -// TODO: it is possible to define a set of Prefix values on variants such that -// a total order cannot be defined to the point that this algorithm breaks. -// In other words, we cannot guarantee the same order of variants for the -// future using the same algorithm or for non-compliant combinations of -// variants. For this reason, consider using simple alphabetic sorting -// of variants and ignore Prefix restrictions altogether. -func (b *builder) writeVariant() { - generalized := stringSet{} - specialized := stringSet{} - specializedExtend := stringSet{} - // Collate the variants by type and check assumptions. - for _, v := range b.variant.slice() { - e := b.registry[v] - if len(e.prefix) == 0 { - generalized.add(v) - continue - } - c := strings.Split(e.prefix[0], "-") - hasScriptOrRegion := false - if len(c) > 1 { - _, hasScriptOrRegion = b.script.find(c[1]) - if !hasScriptOrRegion { - _, hasScriptOrRegion = b.region.find(c[1]) - - } - } - if len(c) == 1 || len(c) == 2 && hasScriptOrRegion { - // Variant is preceded by a language. - specialized.add(v) - continue - } - // Variant is preceded by another variant. - specializedExtend.add(v) - prefix := c[0] + "-" - if hasScriptOrRegion { - prefix += c[1] - } - for _, p := range e.prefix { - // Verify that the prefix minus the last element is a prefix of the - // predecessor element. - i := strings.LastIndex(p, "-") - pred := b.registry[p[i+1:]] - if find(pred.prefix, p[:i]) < 0 { - log.Fatalf("prefix %q for variant %q not consistent with predecessor spec", p, v) - } - // The sorting used below does not work in the general case. It works - // if we assume that variants that may be followed by others only have - // prefixes of the same length. Verify this. - count := strings.Count(p[:i], "-") - for _, q := range pred.prefix { - if c := strings.Count(q, "-"); c != count { - log.Fatalf("variant %q preceding %q has a prefix %q of size %d; want %d", p[i+1:], v, q, c, count) - } - } - if !strings.HasPrefix(p, prefix) { - log.Fatalf("prefix %q of variant %q should start with %q", p, v, prefix) - } - } - } - - // Sort extended variants. - a := specializedExtend.s - less := func(v, w string) bool { - // Sort by the maximum number of elements. - maxCount := func(s string) (max int) { - for _, p := range b.registry[s].prefix { - if c := strings.Count(p, "-"); c > max { - max = c - } - } - return - } - if cv, cw := maxCount(v), maxCount(w); cv != cw { - return cv < cw - } - // Sort by name as tie breaker. - return v < w - } - sort.Sort(funcSorter{less, sort.StringSlice(a)}) - specializedExtend.frozen = true - - // Create index from variant name to index. - variantIndex := make(map[string]uint8) - add := func(s []string) { - for _, v := range s { - variantIndex[v] = uint8(len(variantIndex)) - } - } - add(specialized.slice()) - add(specializedExtend.s) - numSpecialized := len(variantIndex) - add(generalized.slice()) - if n := len(variantIndex); n > 255 { - log.Fatalf("maximum number of variants exceeded: was %d; want <= 255", n) - } - b.writeMap("variantIndex", variantIndex) - b.writeConst("variantNumSpecialized", numSpecialized) -} - -func (b *builder) writeLanguageInfo() { -} - -// writeLikelyData writes tables that are used both for finding parent relations and for -// language matching. Each entry contains additional bits to indicate the status of the -// data to know when it cannot be used for parent relations. -func (b *builder) writeLikelyData() { - const ( - isList = 1 << iota - scriptInFrom - regionInFrom - ) - type ( // generated types - likelyScriptRegion struct { - region uint16 - script uint8 - flags uint8 - } - likelyLangScript struct { - lang uint16 - script uint8 - flags uint8 - } - likelyLangRegion struct { - lang uint16 - region uint16 - } - // likelyTag is used for getting likely tags for group regions, where - // the likely region might be a region contained in the group. - likelyTag struct { - lang uint16 - region uint16 - script uint8 - } - ) - var ( // generated variables - likelyRegionGroup = make([]likelyTag, len(b.groups)) - likelyLang = make([]likelyScriptRegion, len(b.lang.s)) - likelyRegion = make([]likelyLangScript, len(b.region.s)) - likelyScript = make([]likelyLangRegion, len(b.script.s)) - likelyLangList = []likelyScriptRegion{} - likelyRegionList = []likelyLangScript{} - ) - type fromTo struct { - from, to []string - } - langToOther := map[int][]fromTo{} - regionToOther := map[int][]fromTo{} - for _, m := range b.supp.LikelySubtags.LikelySubtag { - from := strings.Split(m.From, "_") - to := strings.Split(m.To, "_") - if len(to) != 3 { - log.Fatalf("invalid number of subtags in %q: found %d, want 3", m.To, len(to)) - } - if len(from) > 3 { - log.Fatalf("invalid number of subtags: found %d, want 1-3", len(from)) - } - if from[0] != to[0] && from[0] != "und" { - log.Fatalf("unexpected language change in expansion: %s -> %s", from, to) - } - if len(from) == 3 { - if from[2] != to[2] { - log.Fatalf("unexpected region change in expansion: %s -> %s", from, to) - } - if from[0] != "und" { - log.Fatalf("unexpected fully specified from tag: %s -> %s", from, to) - } - } - if len(from) == 1 || from[0] != "und" { - id := 0 - if from[0] != "und" { - id = b.lang.index(from[0]) - } - langToOther[id] = append(langToOther[id], fromTo{from, to}) - } else if len(from) == 2 && len(from[1]) == 4 { - sid := b.script.index(from[1]) - likelyScript[sid].lang = uint16(b.langIndex(to[0])) - likelyScript[sid].region = uint16(b.region.index(to[2])) - } else { - r := b.region.index(from[len(from)-1]) - if id, ok := b.groups[r]; ok { - if from[0] != "und" { - log.Fatalf("region changed unexpectedly: %s -> %s", from, to) - } - likelyRegionGroup[id].lang = uint16(b.langIndex(to[0])) - likelyRegionGroup[id].script = uint8(b.script.index(to[1])) - likelyRegionGroup[id].region = uint16(b.region.index(to[2])) - } else { - regionToOther[r] = append(regionToOther[r], fromTo{from, to}) - } - } - } - b.writeType(likelyLangRegion{}) - b.writeSlice("likelyScript", likelyScript) - - for id := range b.lang.s { - list := langToOther[id] - if len(list) == 1 { - likelyLang[id].region = uint16(b.region.index(list[0].to[2])) - likelyLang[id].script = uint8(b.script.index(list[0].to[1])) - } else if len(list) > 1 { - likelyLang[id].flags = isList - likelyLang[id].region = uint16(len(likelyLangList)) - likelyLang[id].script = uint8(len(list)) - for _, x := range list { - flags := uint8(0) - if len(x.from) > 1 { - if x.from[1] == x.to[2] { - flags = regionInFrom - } else { - flags = scriptInFrom - } - } - likelyLangList = append(likelyLangList, likelyScriptRegion{ - region: uint16(b.region.index(x.to[2])), - script: uint8(b.script.index(x.to[1])), - flags: flags, - }) - } - } - } - // TODO: merge suppressScript data with this table. - b.writeType(likelyScriptRegion{}) - b.writeSlice("likelyLang", likelyLang) - b.writeSlice("likelyLangList", likelyLangList) - - for id := range b.region.s { - list := regionToOther[id] - if len(list) == 1 { - likelyRegion[id].lang = uint16(b.langIndex(list[0].to[0])) - likelyRegion[id].script = uint8(b.script.index(list[0].to[1])) - if len(list[0].from) > 2 { - likelyRegion[id].flags = scriptInFrom - } - } else if len(list) > 1 { - likelyRegion[id].flags = isList - likelyRegion[id].lang = uint16(len(likelyRegionList)) - likelyRegion[id].script = uint8(len(list)) - for i, x := range list { - if len(x.from) == 2 && i != 0 || i > 0 && len(x.from) != 3 { - log.Fatalf("unspecified script must be first in list: %v at %d", x.from, i) - } - x := likelyLangScript{ - lang: uint16(b.langIndex(x.to[0])), - script: uint8(b.script.index(x.to[1])), - } - if len(list[0].from) > 2 { - x.flags = scriptInFrom - } - likelyRegionList = append(likelyRegionList, x) - } - } - } - b.writeType(likelyLangScript{}) - b.writeSlice("likelyRegion", likelyRegion) - b.writeSlice("likelyRegionList", likelyRegionList) - - b.writeType(likelyTag{}) - b.writeSlice("likelyRegionGroup", likelyRegionGroup) -} - -func (b *builder) writeRegionInclusionData() { - var ( - // mm holds for each group the set of groups with a distance of 1. - mm = make(map[int][]index) - - // containment holds for each group the transitive closure of - // containment of other groups. - containment = make(map[index][]index) - ) - for _, g := range b.supp.TerritoryContainment.Group { - // Skip UN and EURO zone as they are flattening the containment - // relationship. - if g.Type == "EZ" || g.Type == "UN" { - continue - } - group := b.region.index(g.Type) - groupIdx := b.groups[group] - for _, mem := range strings.Split(g.Contains, " ") { - r := b.region.index(mem) - mm[r] = append(mm[r], groupIdx) - if g, ok := b.groups[r]; ok { - mm[group] = append(mm[group], g) - containment[groupIdx] = append(containment[groupIdx], g) - } - } - } - - regionContainment := make([]uint64, len(b.groups)) - for _, g := range b.groups { - l := containment[g] - - // Compute the transitive closure of containment. - for i := 0; i < len(l); i++ { - l = append(l, containment[l[i]]...) - } - - // Compute the bitmask. - regionContainment[g] = 1 << g - for _, v := range l { - regionContainment[g] |= 1 << v - } - } - b.writeSlice("regionContainment", regionContainment) - - regionInclusion := make([]uint8, len(b.region.s)) - bvs := make(map[uint64]index) - // Make the first bitvector positions correspond with the groups. - for r, i := range b.groups { - bv := uint64(1 << i) - for _, g := range mm[r] { - bv |= 1 << g - } - bvs[bv] = i - regionInclusion[r] = uint8(bvs[bv]) - } - for r := 1; r < len(b.region.s); r++ { - if _, ok := b.groups[r]; !ok { - bv := uint64(0) - for _, g := range mm[r] { - bv |= 1 << g - } - if bv == 0 { - // Pick the world for unspecified regions. - bv = 1 << b.groups[b.region.index("001")] - } - if _, ok := bvs[bv]; !ok { - bvs[bv] = index(len(bvs)) - } - regionInclusion[r] = uint8(bvs[bv]) - } - } - b.writeSlice("regionInclusion", regionInclusion) - regionInclusionBits := make([]uint64, len(bvs)) - for k, v := range bvs { - regionInclusionBits[v] = uint64(k) - } - // Add bit vectors for increasingly large distances until a fixed point is reached. - regionInclusionNext := []uint8{} - for i := 0; i < len(regionInclusionBits); i++ { - bits := regionInclusionBits[i] - next := bits - for i := uint(0); i < uint(len(b.groups)); i++ { - if bits&(1< 6 { - log.Fatalf("Too many groups: %d", i) - } - idToIndex[mv.Id] = uint8(i + 1) - // TODO: also handle '-' - for _, r := range strings.Split(mv.Value, "+") { - todo := []string{r} - for k := 0; k < len(todo); k++ { - r := todo[k] - regionToGroups[b.regionIndex(r)] |= 1 << uint8(i) - todo = append(todo, regionHierarchy[r]...) - } - } - } - b.w.WriteVar("regionToGroups", regionToGroups) - - // maps language id to in- and out-of-group region. - paradigmLocales := [][3]uint16{} - locales := strings.Split(lm[0].ParadigmLocales[0].Locales, " ") - for i := 0; i < len(locales); i += 2 { - x := [3]uint16{} - for j := 0; j < 2; j++ { - pc := strings.SplitN(locales[i+j], "-", 2) - x[0] = b.langIndex(pc[0]) - if len(pc) == 2 { - x[1+j] = uint16(b.regionIndex(pc[1])) - } - } - paradigmLocales = append(paradigmLocales, x) - } - b.w.WriteVar("paradigmLocales", paradigmLocales) - - b.w.WriteType(mutualIntelligibility{}) - b.w.WriteType(scriptIntelligibility{}) - b.w.WriteType(regionIntelligibility{}) - - matchLang := []mutualIntelligibility{} - matchScript := []scriptIntelligibility{} - matchRegion := []regionIntelligibility{} - // Convert the languageMatch entries in lists keyed by desired language. - for _, m := range lm[0].LanguageMatch { - // Different versions of CLDR use different separators. - desired := strings.Replace(m.Desired, "-", "_", -1) - supported := strings.Replace(m.Supported, "-", "_", -1) - d := strings.Split(desired, "_") - s := strings.Split(supported, "_") - if len(d) != len(s) { - log.Fatalf("not supported: desired=%q; supported=%q", desired, supported) - continue - } - distance, _ := strconv.ParseInt(m.Distance, 10, 8) - switch len(d) { - case 2: - if desired == supported && desired == "*_*" { - continue - } - // language-script pair. - matchScript = append(matchScript, scriptIntelligibility{ - wantLang: uint16(b.langIndex(d[0])), - haveLang: uint16(b.langIndex(s[0])), - wantScript: uint8(b.scriptIndex(d[1])), - haveScript: uint8(b.scriptIndex(s[1])), - distance: uint8(distance), - }) - if m.Oneway != "true" { - matchScript = append(matchScript, scriptIntelligibility{ - wantLang: uint16(b.langIndex(s[0])), - haveLang: uint16(b.langIndex(d[0])), - wantScript: uint8(b.scriptIndex(s[1])), - haveScript: uint8(b.scriptIndex(d[1])), - distance: uint8(distance), - }) - } - case 1: - if desired == supported && desired == "*" { - continue - } - if distance == 1 { - // nb == no is already handled by macro mapping. Check there - // really is only this case. - if d[0] != "no" || s[0] != "nb" { - log.Fatalf("unhandled equivalence %s == %s", s[0], d[0]) - } - continue - } - // TODO: consider dropping oneway field and just doubling the entry. - matchLang = append(matchLang, mutualIntelligibility{ - want: uint16(b.langIndex(d[0])), - have: uint16(b.langIndex(s[0])), - distance: uint8(distance), - oneway: m.Oneway == "true", - }) - case 3: - if desired == supported && desired == "*_*_*" { - continue - } - if desired != supported { - // This is now supported by CLDR, but only one case, which - // should already be covered by paradigm locales. For instance, - // test case "und, en, en-GU, en-IN, en-GB ; en-ZA ; en-GB" in - // testdata/CLDRLocaleMatcherTest.txt tests this. - if supported != "en_*_GB" { - log.Fatalf("not supported: desired=%q; supported=%q", desired, supported) - } - continue - } - ri := regionIntelligibility{ - lang: b.langIndex(d[0]), - distance: uint8(distance), - } - if d[1] != "*" { - ri.script = uint8(b.scriptIndex(d[1])) - } - switch { - case d[2] == "*": - ri.group = 0x80 // not contained in anything - case strings.HasPrefix(d[2], "$!"): - ri.group = 0x80 - d[2] = "$" + d[2][len("$!"):] - fallthrough - case strings.HasPrefix(d[2], "$"): - ri.group |= idToIndex[d[2]] - } - matchRegion = append(matchRegion, ri) - default: - log.Fatalf("not supported: desired=%q; supported=%q", desired, supported) - } - } - sort.SliceStable(matchLang, func(i, j int) bool { - return matchLang[i].distance < matchLang[j].distance - }) - b.w.WriteComment(` - matchLang holds pairs of langIDs of base languages that are typically - mutually intelligible. Each pair is associated with a confidence and - whether the intelligibility goes one or both ways.`) - b.w.WriteVar("matchLang", matchLang) - - b.w.WriteComment(` - matchScript holds pairs of scriptIDs where readers of one script - can typically also read the other. Each is associated with a confidence.`) - sort.SliceStable(matchScript, func(i, j int) bool { - return matchScript[i].distance < matchScript[j].distance - }) - b.w.WriteVar("matchScript", matchScript) - - sort.SliceStable(matchRegion, func(i, j int) bool { - return matchRegion[i].distance < matchRegion[j].distance - }) - b.w.WriteVar("matchRegion", matchRegion) -} diff --git a/vendor/golang.org/x/text/unicode/bidi/gen.go b/vendor/golang.org/x/text/unicode/bidi/gen.go deleted file mode 100644 index 987fc169c..000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "flag" - "log" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" - "golang.org/x/text/internal/ucd" -) - -var outputFile = flag.String("out", "tables.go", "output file") - -func main() { - gen.Init() - gen.Repackage("gen_trieval.go", "trieval.go", "bidi") - gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi") - - genTables() -} - -// bidiClass names and codes taken from class "bc" in -// https://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt -var bidiClass = map[string]Class{ - "AL": AL, // ArabicLetter - "AN": AN, // ArabicNumber - "B": B, // ParagraphSeparator - "BN": BN, // BoundaryNeutral - "CS": CS, // CommonSeparator - "EN": EN, // EuropeanNumber - "ES": ES, // EuropeanSeparator - "ET": ET, // EuropeanTerminator - "L": L, // LeftToRight - "NSM": NSM, // NonspacingMark - "ON": ON, // OtherNeutral - "R": R, // RightToLeft - "S": S, // SegmentSeparator - "WS": WS, // WhiteSpace - - "FSI": Control, - "PDF": Control, - "PDI": Control, - "LRE": Control, - "LRI": Control, - "LRO": Control, - "RLE": Control, - "RLI": Control, - "RLO": Control, -} - -func genTables() { - if numClass > 0x0F { - log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass) - } - w := gen.NewCodeWriter() - defer w.WriteVersionedGoFile(*outputFile, "bidi") - - gen.WriteUnicodeVersion(w) - - t := triegen.NewTrie("bidi") - - // Build data about bracket mapping. These bits need to be or-ed with - // any other bits. - orMask := map[rune]uint64{} - - xorMap := map[rune]int{} - xorMasks := []rune{0} // First value is no-op. - - ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) { - r1 := p.Rune(0) - r2 := p.Rune(1) - xor := r1 ^ r2 - if _, ok := xorMap[xor]; !ok { - xorMap[xor] = len(xorMasks) - xorMasks = append(xorMasks, xor) - } - entry := uint64(xorMap[xor]) << xorMaskShift - switch p.String(2) { - case "o": - entry |= openMask - case "c", "n": - default: - log.Fatalf("Unknown bracket class %q.", p.String(2)) - } - orMask[r1] = entry - }) - - w.WriteComment(` - xorMasks contains masks to be xor-ed with brackets to get the reverse - version.`) - w.WriteVar("xorMasks", xorMasks) - - done := map[rune]bool{} - - insert := func(r rune, c Class) { - if !done[r] { - t.Insert(r, orMask[r]|uint64(c)) - done[r] = true - } - } - - // Insert the derived BiDi properties. - ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) { - r := p.Rune(0) - class, ok := bidiClass[p.String(1)] - if !ok { - log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1)) - } - insert(r, class) - }) - visitDefaults(insert) - - // TODO: use sparse blocks. This would reduce table size considerably - // from the looks of it. - - sz, err := t.Gen(w) - if err != nil { - log.Fatal(err) - } - w.Size += sz -} - -// dummy values to make methods in gen_common compile. The real versions -// will be generated by this file to tables.go. -var ( - xorMasks []rune -) diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go deleted file mode 100644 index 02c3b505d..000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "unicode" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/ucd" - "golang.org/x/text/unicode/rangetable" -) - -// These tables are hand-extracted from: -// https://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt -func visitDefaults(fn func(r rune, c Class)) { - // first write default values for ranges listed above. - visitRunes(fn, AL, []rune{ - 0x0600, 0x07BF, // Arabic - 0x08A0, 0x08FF, // Arabic Extended-A - 0xFB50, 0xFDCF, // Arabic Presentation Forms - 0xFDF0, 0xFDFF, - 0xFE70, 0xFEFF, - 0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols - }) - visitRunes(fn, R, []rune{ - 0x0590, 0x05FF, // Hebrew - 0x07C0, 0x089F, // Nko et al. - 0xFB1D, 0xFB4F, - 0x00010800, 0x00010FFF, // Cypriot Syllabary et. al. - 0x0001E800, 0x0001EDFF, - 0x0001EF00, 0x0001EFFF, - }) - visitRunes(fn, ET, []rune{ // European Terminator - 0x20A0, 0x20Cf, // Currency symbols - }) - rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) { - fn(r, BN) // Boundary Neutral - }) - ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) { - if p.String(1) == "Default_Ignorable_Code_Point" { - fn(p.Rune(0), BN) // Boundary Neutral - } - }) -} - -func visitRunes(fn func(r rune, c Class), c Class, runes []rune) { - for i := 0; i < len(runes); i += 2 { - lo, hi := runes[i], runes[i+1] - for j := lo; j <= hi; j++ { - fn(j, c) - } - } -} diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go deleted file mode 100644 index 9cb994289..000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// Class is the Unicode BiDi class. Each rune has a single class. -type Class uint - -const ( - L Class = iota // LeftToRight - R // RightToLeft - EN // EuropeanNumber - ES // EuropeanSeparator - ET // EuropeanTerminator - AN // ArabicNumber - CS // CommonSeparator - B // ParagraphSeparator - S // SegmentSeparator - WS // WhiteSpace - ON // OtherNeutral - BN // BoundaryNeutral - NSM // NonspacingMark - AL // ArabicLetter - Control // Control LRO - PDI - - numClass - - LRO // LeftToRightOverride - RLO // RightToLeftOverride - LRE // LeftToRightEmbedding - RLE // RightToLeftEmbedding - PDF // PopDirectionalFormat - LRI // LeftToRightIsolate - RLI // RightToLeftIsolate - FSI // FirstStrongIsolate - PDI // PopDirectionalIsolate - - unknownClass = ^Class(0) -) - -var controlToClass = map[rune]Class{ - 0x202D: LRO, // LeftToRightOverride, - 0x202E: RLO, // RightToLeftOverride, - 0x202A: LRE, // LeftToRightEmbedding, - 0x202B: RLE, // RightToLeftEmbedding, - 0x202C: PDF, // PopDirectionalFormat, - 0x2066: LRI, // LeftToRightIsolate, - 0x2067: RLI, // RightToLeftIsolate, - 0x2068: FSI, // FirstStrongIsolate, - 0x2069: PDI, // PopDirectionalIsolate, -} - -// A trie entry has the following bits: -// 7..5 XOR mask for brackets -// 4 1: Bracket open, 0: Bracket close -// 3..0 Class type - -const ( - openMask = 0x10 - xorMaskShift = 5 -) diff --git a/vendor/golang.org/x/text/unicode/norm/maketables.go b/vendor/golang.org/x/text/unicode/norm/maketables.go deleted file mode 100644 index 30a3aa933..000000000 --- a/vendor/golang.org/x/text/unicode/norm/maketables.go +++ /dev/null @@ -1,986 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Normalization table generator. -// Data read from the web. -// See forminfo.go for a description of the trie values associated with each rune. - -package main - -import ( - "bytes" - "encoding/binary" - "flag" - "fmt" - "io" - "log" - "sort" - "strconv" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" - "golang.org/x/text/internal/ucd" -) - -func main() { - gen.Init() - loadUnicodeData() - compactCCC() - loadCompositionExclusions() - completeCharFields(FCanonical) - completeCharFields(FCompatibility) - computeNonStarterCounts() - verifyComputed() - printChars() - testDerived() - printTestdata() - makeTables() -} - -var ( - tablelist = flag.String("tables", - "all", - "comma-separated list of which tables to generate; "+ - "can be 'decomp', 'recomp', 'info' and 'all'") - test = flag.Bool("test", - false, - "test existing tables against DerivedNormalizationProps and generate test data for regression testing") - verbose = flag.Bool("verbose", - false, - "write data to stdout as it is parsed") -) - -const MaxChar = 0x10FFFF // anything above this shouldn't exist - -// Quick Check properties of runes allow us to quickly -// determine whether a rune may occur in a normal form. -// For a given normal form, a rune may be guaranteed to occur -// verbatim (QC=Yes), may or may not combine with another -// rune (QC=Maybe), or may not occur (QC=No). -type QCResult int - -const ( - QCUnknown QCResult = iota - QCYes - QCNo - QCMaybe -) - -func (r QCResult) String() string { - switch r { - case QCYes: - return "Yes" - case QCNo: - return "No" - case QCMaybe: - return "Maybe" - } - return "***UNKNOWN***" -} - -const ( - FCanonical = iota // NFC or NFD - FCompatibility // NFKC or NFKD - FNumberOfFormTypes -) - -const ( - MComposed = iota // NFC or NFKC - MDecomposed // NFD or NFKD - MNumberOfModes -) - -// This contains only the properties we're interested in. -type Char struct { - name string - codePoint rune // if zero, this index is not a valid code point. - ccc uint8 // canonical combining class - origCCC uint8 - excludeInComp bool // from CompositionExclusions.txt - compatDecomp bool // it has a compatibility expansion - - nTrailingNonStarters uint8 - nLeadingNonStarters uint8 // must be equal to trailing if non-zero - - forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility - - state State -} - -var chars = make([]Char, MaxChar+1) -var cccMap = make(map[uint8]uint8) - -func (c Char) String() string { - buf := new(bytes.Buffer) - - fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name) - fmt.Fprintf(buf, " ccc: %v\n", c.ccc) - fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp) - fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp) - fmt.Fprintf(buf, " state: %v\n", c.state) - fmt.Fprintf(buf, " NFC:\n") - fmt.Fprint(buf, c.forms[FCanonical]) - fmt.Fprintf(buf, " NFKC:\n") - fmt.Fprint(buf, c.forms[FCompatibility]) - - return buf.String() -} - -// In UnicodeData.txt, some ranges are marked like this: -// 3400;;Lo;0;L;;;;;N;;;;; -// 4DB5;;Lo;0;L;;;;;N;;;;; -// parseCharacter keeps a state variable indicating the weirdness. -type State int - -const ( - SNormal State = iota // known to be zero for the type - SFirst - SLast - SMissing -) - -var lastChar = rune('\u0000') - -func (c Char) isValid() bool { - return c.codePoint != 0 && c.state != SMissing -} - -type FormInfo struct { - quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed - verified [MNumberOfModes]bool // index: MComposed or MDecomposed - - combinesForward bool // May combine with rune on the right - combinesBackward bool // May combine with rune on the left - isOneWay bool // Never appears in result - inDecomp bool // Some decompositions result in this char. - decomp Decomposition - expandedDecomp Decomposition -} - -func (f FormInfo) String() string { - buf := bytes.NewBuffer(make([]byte, 0)) - - fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed]) - fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed]) - fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward) - fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward) - fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay) - fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp) - fmt.Fprintf(buf, " decomposition: %X\n", f.decomp) - fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp) - - return buf.String() -} - -type Decomposition []rune - -func parseDecomposition(s string, skipfirst bool) (a []rune, err error) { - decomp := strings.Split(s, " ") - if len(decomp) > 0 && skipfirst { - decomp = decomp[1:] - } - for _, d := range decomp { - point, err := strconv.ParseUint(d, 16, 64) - if err != nil { - return a, err - } - a = append(a, rune(point)) - } - return a, nil -} - -func loadUnicodeData() { - f := gen.OpenUCDFile("UnicodeData.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - r := p.Rune(ucd.CodePoint) - char := &chars[r] - - char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass)) - decmap := p.String(ucd.DecompMapping) - - exp, err := parseDecomposition(decmap, false) - isCompat := false - if err != nil { - if len(decmap) > 0 { - exp, err = parseDecomposition(decmap, true) - if err != nil { - log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err) - } - isCompat = true - } - } - - char.name = p.String(ucd.Name) - char.codePoint = r - char.forms[FCompatibility].decomp = exp - if !isCompat { - char.forms[FCanonical].decomp = exp - } else { - char.compatDecomp = true - } - if len(decmap) > 0 { - char.forms[FCompatibility].decomp = exp - } - } - if err := p.Err(); err != nil { - log.Fatal(err) - } -} - -// compactCCC converts the sparse set of CCC values to a continguous one, -// reducing the number of bits needed from 8 to 6. -func compactCCC() { - m := make(map[uint8]uint8) - for i := range chars { - c := &chars[i] - m[c.ccc] = 0 - } - cccs := []int{} - for v, _ := range m { - cccs = append(cccs, int(v)) - } - sort.Ints(cccs) - for i, c := range cccs { - cccMap[uint8(i)] = uint8(c) - m[uint8(c)] = uint8(i) - } - for i := range chars { - c := &chars[i] - c.origCCC = c.ccc - c.ccc = m[c.ccc] - } - if len(m) >= 1<<6 { - log.Fatalf("too many difference CCC values: %d >= 64", len(m)) - } -} - -// CompositionExclusions.txt has form: -// 0958 # ... -// See https://unicode.org/reports/tr44/ for full explanation -func loadCompositionExclusions() { - f := gen.OpenUCDFile("CompositionExclusions.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - c := &chars[p.Rune(0)] - if c.excludeInComp { - log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint) - } - c.excludeInComp = true - } - if e := p.Err(); e != nil { - log.Fatal(e) - } -} - -// hasCompatDecomp returns true if any of the recursive -// decompositions contains a compatibility expansion. -// In this case, the character may not occur in NFK*. -func hasCompatDecomp(r rune) bool { - c := &chars[r] - if c.compatDecomp { - return true - } - for _, d := range c.forms[FCompatibility].decomp { - if hasCompatDecomp(d) { - return true - } - } - return false -} - -// Hangul related constants. -const ( - HangulBase = 0xAC00 - HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28) - - JamoLBase = 0x1100 - JamoLEnd = 0x1113 - JamoVBase = 0x1161 - JamoVEnd = 0x1176 - JamoTBase = 0x11A8 - JamoTEnd = 0x11C3 - - JamoLVTCount = 19 * 21 * 28 - JamoTCount = 28 -) - -func isHangul(r rune) bool { - return HangulBase <= r && r < HangulEnd -} - -func isHangulWithoutJamoT(r rune) bool { - if !isHangul(r) { - return false - } - r -= HangulBase - return r < JamoLVTCount && r%JamoTCount == 0 -} - -func ccc(r rune) uint8 { - return chars[r].ccc -} - -// Insert a rune in a buffer, ordered by Canonical Combining Class. -func insertOrdered(b Decomposition, r rune) Decomposition { - n := len(b) - b = append(b, 0) - cc := ccc(r) - if cc > 0 { - // Use bubble sort. - for ; n > 0; n-- { - if ccc(b[n-1]) <= cc { - break - } - b[n] = b[n-1] - } - } - b[n] = r - return b -} - -// Recursively decompose. -func decomposeRecursive(form int, r rune, d Decomposition) Decomposition { - dcomp := chars[r].forms[form].decomp - if len(dcomp) == 0 { - return insertOrdered(d, r) - } - for _, c := range dcomp { - d = decomposeRecursive(form, c, d) - } - return d -} - -func completeCharFields(form int) { - // Phase 0: pre-expand decomposition. - for i := range chars { - f := &chars[i].forms[form] - if len(f.decomp) == 0 { - continue - } - exp := make(Decomposition, 0) - for _, c := range f.decomp { - exp = decomposeRecursive(form, c, exp) - } - f.expandedDecomp = exp - } - - // Phase 1: composition exclusion, mark decomposition. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - // Marks script-specific exclusions and version restricted. - f.isOneWay = c.excludeInComp - - // Singletons - f.isOneWay = f.isOneWay || len(f.decomp) == 1 - - // Non-starter decompositions - if len(f.decomp) > 1 { - chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0 - f.isOneWay = f.isOneWay || chk - } - - // Runes that decompose into more than two runes. - f.isOneWay = f.isOneWay || len(f.decomp) > 2 - - if form == FCompatibility { - f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint) - } - - for _, r := range f.decomp { - chars[r].forms[form].inDecomp = true - } - } - - // Phase 2: forward and backward combining. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - if !f.isOneWay && len(f.decomp) == 2 { - f0 := &chars[f.decomp[0]].forms[form] - f1 := &chars[f.decomp[1]].forms[form] - if !f0.isOneWay { - f0.combinesForward = true - } - if !f1.isOneWay { - f1.combinesBackward = true - } - } - if isHangulWithoutJamoT(rune(i)) { - f.combinesForward = true - } - } - - // Phase 3: quick check values. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - switch { - case len(f.decomp) > 0: - f.quickCheck[MDecomposed] = QCNo - case isHangul(rune(i)): - f.quickCheck[MDecomposed] = QCNo - default: - f.quickCheck[MDecomposed] = QCYes - } - switch { - case f.isOneWay: - f.quickCheck[MComposed] = QCNo - case (i & 0xffff00) == JamoLBase: - f.quickCheck[MComposed] = QCYes - if JamoLBase <= i && i < JamoLEnd { - f.combinesForward = true - } - if JamoVBase <= i && i < JamoVEnd { - f.quickCheck[MComposed] = QCMaybe - f.combinesBackward = true - f.combinesForward = true - } - if JamoTBase <= i && i < JamoTEnd { - f.quickCheck[MComposed] = QCMaybe - f.combinesBackward = true - } - case !f.combinesBackward: - f.quickCheck[MComposed] = QCYes - default: - f.quickCheck[MComposed] = QCMaybe - } - } -} - -func computeNonStarterCounts() { - // Phase 4: leading and trailing non-starter count - for i := range chars { - c := &chars[i] - - runes := []rune{rune(i)} - // We always use FCompatibility so that the CGJ insertion points do not - // change for repeated normalizations with different forms. - if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 { - runes = exp - } - // We consider runes that combine backwards to be non-starters for the - // purpose of Stream-Safe Text Processing. - for _, r := range runes { - if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { - break - } - c.nLeadingNonStarters++ - } - for i := len(runes) - 1; i >= 0; i-- { - if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { - break - } - c.nTrailingNonStarters++ - } - if c.nTrailingNonStarters > 3 { - log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes) - } - - if isHangul(rune(i)) { - c.nTrailingNonStarters = 2 - if isHangulWithoutJamoT(rune(i)) { - c.nTrailingNonStarters = 1 - } - } - - if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t { - log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t) - } - if t := c.nTrailingNonStarters; t > 3 { - log.Fatalf("%U: number of trailing non-starters is %d > 3", t) - } - } -} - -func printBytes(w io.Writer, b []byte, name string) { - fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b)) - fmt.Fprintf(w, "var %s = [...]byte {", name) - for i, c := range b { - switch { - case i%64 == 0: - fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63) - case i%8 == 0: - fmt.Fprintf(w, "\n") - } - fmt.Fprintf(w, "0x%.2X, ", c) - } - fmt.Fprint(w, "\n}\n\n") -} - -// See forminfo.go for format. -func makeEntry(f *FormInfo, c *Char) uint16 { - e := uint16(0) - if r := c.codePoint; HangulBase <= r && r < HangulEnd { - e |= 0x40 - } - if f.combinesForward { - e |= 0x20 - } - if f.quickCheck[MDecomposed] == QCNo { - e |= 0x4 - } - switch f.quickCheck[MComposed] { - case QCYes: - case QCNo: - e |= 0x10 - case QCMaybe: - e |= 0x18 - default: - log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed]) - } - e |= uint16(c.nTrailingNonStarters) - return e -} - -// decompSet keeps track of unique decompositions, grouped by whether -// the decomposition is followed by a trailing and/or leading CCC. -type decompSet [7]map[string]bool - -const ( - normalDecomp = iota - firstMulti - firstCCC - endMulti - firstLeadingCCC - firstCCCZeroExcept - firstStarterWithNLead - lastDecomp -) - -var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"} - -func makeDecompSet() decompSet { - m := decompSet{} - for i := range m { - m[i] = make(map[string]bool) - } - return m -} -func (m *decompSet) insert(key int, s string) { - m[key][s] = true -} - -func printCharInfoTables(w io.Writer) int { - mkstr := func(r rune, f *FormInfo) (int, string) { - d := f.expandedDecomp - s := string([]rune(d)) - if max := 1 << 6; len(s) >= max { - const msg = "%U: too many bytes in decomposition: %d >= %d" - log.Fatalf(msg, r, len(s), max) - } - head := uint8(len(s)) - if f.quickCheck[MComposed] != QCYes { - head |= 0x40 - } - if f.combinesForward { - head |= 0x80 - } - s = string([]byte{head}) + s - - lccc := ccc(d[0]) - tccc := ccc(d[len(d)-1]) - cc := ccc(r) - if cc != 0 && lccc == 0 && tccc == 0 { - log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc) - } - if tccc < lccc && lccc != 0 { - const msg = "%U: lccc (%d) must be <= tcc (%d)" - log.Fatalf(msg, r, lccc, tccc) - } - index := normalDecomp - nTrail := chars[r].nTrailingNonStarters - nLead := chars[r].nLeadingNonStarters - if tccc > 0 || lccc > 0 || nTrail > 0 { - tccc <<= 2 - tccc |= nTrail - s += string([]byte{tccc}) - index = endMulti - for _, r := range d[1:] { - if ccc(r) == 0 { - index = firstCCC - } - } - if lccc > 0 || nLead > 0 { - s += string([]byte{lccc}) - if index == firstCCC { - log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r) - } - index = firstLeadingCCC - } - if cc != lccc { - if cc != 0 { - log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc) - } - index = firstCCCZeroExcept - } - } else if len(d) > 1 { - index = firstMulti - } - return index, s - } - - decompSet := makeDecompSet() - const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail. - decompSet.insert(firstStarterWithNLead, nLeadStr) - - // Store the uniqued decompositions in a byte buffer, - // preceded by their byte length. - for _, c := range chars { - for _, f := range c.forms { - if len(f.expandedDecomp) == 0 { - continue - } - if f.combinesBackward { - log.Fatalf("%U: combinesBackward and decompose", c.codePoint) - } - index, s := mkstr(c.codePoint, &f) - decompSet.insert(index, s) - } - } - - decompositions := bytes.NewBuffer(make([]byte, 0, 10000)) - size := 0 - positionMap := make(map[string]uint16) - decompositions.WriteString("\000") - fmt.Fprintln(w, "const (") - for i, m := range decompSet { - sa := []string{} - for s := range m { - sa = append(sa, s) - } - sort.Strings(sa) - for _, s := range sa { - p := decompositions.Len() - decompositions.WriteString(s) - positionMap[s] = uint16(p) - } - if cname[i] != "" { - fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len()) - } - } - fmt.Fprintln(w, "maxDecomp = 0x8000") - fmt.Fprintln(w, ")") - b := decompositions.Bytes() - printBytes(w, b, "decomps") - size += len(b) - - varnames := []string{"nfc", "nfkc"} - for i := 0; i < FNumberOfFormTypes; i++ { - trie := triegen.NewTrie(varnames[i]) - - for r, c := range chars { - f := c.forms[i] - d := f.expandedDecomp - if len(d) != 0 { - _, key := mkstr(c.codePoint, &f) - trie.Insert(rune(r), uint64(positionMap[key])) - if c.ccc != ccc(d[0]) { - // We assume the lead ccc of a decomposition !=0 in this case. - if ccc(d[0]) == 0 { - log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc) - } - } - } else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward { - // Handle cases where it can't be detected that the nLead should be equal - // to nTrail. - trie.Insert(c.codePoint, uint64(positionMap[nLeadStr])) - } else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 { - trie.Insert(c.codePoint, uint64(0x8000|v)) - } - } - sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]})) - if err != nil { - log.Fatal(err) - } - size += sz - } - return size -} - -func contains(sa []string, s string) bool { - for _, a := range sa { - if a == s { - return true - } - } - return false -} - -func makeTables() { - w := &bytes.Buffer{} - - size := 0 - if *tablelist == "" { - return - } - list := strings.Split(*tablelist, ",") - if *tablelist == "all" { - list = []string{"recomp", "info"} - } - - // Compute maximum decomposition size. - max := 0 - for _, c := range chars { - if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max { - max = n - } - } - fmt.Fprintln(w, `import "sync"`) - fmt.Fprintln(w) - - fmt.Fprintln(w, "const (") - fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.") - fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion()) - fmt.Fprintln(w) - fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform") - fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at") - fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that") - fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.") - fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max) - fmt.Fprintln(w, ")\n") - - // Print the CCC remap table. - size += len(cccMap) - fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap)) - for i := 0; i < len(cccMap); i++ { - if i%8 == 0 { - fmt.Fprintln(w) - } - fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)]) - } - fmt.Fprintln(w, "\n}\n") - - if contains(list, "info") { - size += printCharInfoTables(w) - } - - if contains(list, "recomp") { - // Note that we use 32 bit keys, instead of 64 bit. - // This clips the bits of three entries, but we know - // this won't cause a collision. The compiler will catch - // any changes made to UnicodeData.txt that introduces - // a collision. - // Note that the recomposition map for NFC and NFKC - // are identical. - - // Recomposition map - nrentries := 0 - for _, c := range chars { - f := c.forms[FCanonical] - if !f.isOneWay && len(f.decomp) > 0 { - nrentries++ - } - } - sz := nrentries * 8 - size += sz - fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz) - fmt.Fprintln(w, "var recompMap map[uint32]rune") - fmt.Fprintln(w, "var recompMapOnce sync.Once\n") - fmt.Fprintln(w, `const recompMapPacked = "" +`) - var buf [8]byte - for i, c := range chars { - f := c.forms[FCanonical] - d := f.decomp - if !f.isOneWay && len(d) > 0 { - key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1])) - binary.BigEndian.PutUint32(buf[:4], key) - binary.BigEndian.PutUint32(buf[4:], uint32(i)) - fmt.Fprintf(w, "\t\t%q + // 0x%.8X: 0x%.8X\n", string(buf[:]), key, uint32(i)) - } - } - // hack so we don't have to special case the trailing plus sign - fmt.Fprintf(w, ` ""`) - fmt.Fprintln(w) - } - - fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size) - gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes()) -} - -func printChars() { - if *verbose { - for _, c := range chars { - if !c.isValid() || c.state == SMissing { - continue - } - fmt.Println(c) - } - } -} - -// verifyComputed does various consistency tests. -func verifyComputed() { - for i, c := range chars { - for _, f := range c.forms { - isNo := (f.quickCheck[MDecomposed] == QCNo) - if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) { - log.Fatalf("%U: NF*D QC must be No if rune decomposes", i) - } - - isMaybe := f.quickCheck[MComposed] == QCMaybe - if f.combinesBackward != isMaybe { - log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i) - } - if len(f.decomp) > 0 && f.combinesForward && isMaybe { - log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i) - } - - if len(f.expandedDecomp) != 0 { - continue - } - if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b { - // We accept these runes to be treated differently (it only affects - // segment breaking in iteration, most likely on improper use), but - // reconsider if more characters are added. - // U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L; 3099;;;;N;;;;; - // U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L; 309A;;;;N;;;;; - // U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;; - // U+318E HANGUL LETTER ARAEAE;Lo;0;L; 11A1;;;;N;HANGUL LETTER ALAE AE;;;; - // U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;; - // U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L; 3163;;;;N;;;;; - if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) { - log.Fatalf("%U: nLead was %v; want %v", i, a, b) - } - } - } - nfc := c.forms[FCanonical] - nfkc := c.forms[FCompatibility] - if nfc.combinesBackward != nfkc.combinesBackward { - log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint) - } - } -} - -// Use values in DerivedNormalizationProps.txt to compare against the -// values we computed. -// DerivedNormalizationProps.txt has form: -// 00C0..00C5 ; NFD_QC; N # ... -// 0374 ; NFD_QC; N # ... -// See https://unicode.org/reports/tr44/ for full explanation -func testDerived() { - f := gen.OpenUCDFile("DerivedNormalizationProps.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - r := p.Rune(0) - c := &chars[r] - - var ftype, mode int - qt := p.String(1) - switch qt { - case "NFC_QC": - ftype, mode = FCanonical, MComposed - case "NFD_QC": - ftype, mode = FCanonical, MDecomposed - case "NFKC_QC": - ftype, mode = FCompatibility, MComposed - case "NFKD_QC": - ftype, mode = FCompatibility, MDecomposed - default: - continue - } - var qr QCResult - switch p.String(2) { - case "Y": - qr = QCYes - case "N": - qr = QCNo - case "M": - qr = QCMaybe - default: - log.Fatalf(`Unexpected quick check value "%s"`, p.String(2)) - } - if got := c.forms[ftype].quickCheck[mode]; got != qr { - log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr) - } - c.forms[ftype].verified[mode] = true - } - if err := p.Err(); err != nil { - log.Fatal(err) - } - // Any unspecified value must be QCYes. Verify this. - for i, c := range chars { - for j, fd := range c.forms { - for k, qr := range fd.quickCheck { - if !fd.verified[k] && qr != QCYes { - m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n" - log.Printf(m, i, j, k, qr, c.name) - } - } - } - } -} - -var testHeader = `const ( - Yes = iota - No - Maybe -) - -type formData struct { - qc uint8 - combinesForward bool - decomposition string -} - -type runeData struct { - r rune - ccc uint8 - nLead uint8 - nTrail uint8 - f [2]formData // 0: canonical; 1: compatibility -} - -func f(qc uint8, cf bool, dec string) [2]formData { - return [2]formData{{qc, cf, dec}, {qc, cf, dec}} -} - -func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData { - return [2]formData{{qc, cf, d}, {qck, cfk, dk}} -} - -var testData = []runeData{ -` - -func printTestdata() { - type lastInfo struct { - ccc uint8 - nLead uint8 - nTrail uint8 - f string - } - - last := lastInfo{} - w := &bytes.Buffer{} - fmt.Fprintf(w, testHeader) - for r, c := range chars { - f := c.forms[FCanonical] - qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) - f = c.forms[FCompatibility] - qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) - s := "" - if d == dk && qc == qck && cf == cfk { - s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d) - } else { - s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk) - } - current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s} - if last != current { - fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s) - last = current - } - } - fmt.Fprintln(w, "}") - gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes()) -} diff --git a/vendor/golang.org/x/text/unicode/norm/triegen.go b/vendor/golang.org/x/text/unicode/norm/triegen.go deleted file mode 100644 index 45d711900..000000000 --- a/vendor/golang.org/x/text/unicode/norm/triegen.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Trie table generator. -// Used by make*tables tools to generate a go file with trie data structures -// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte -// sequence are used to lookup offsets in the index table to be used for the -// next byte. The last byte is used to index into a table with 16-bit values. - -package main - -import ( - "fmt" - "io" -) - -const maxSparseEntries = 16 - -type normCompacter struct { - sparseBlocks [][]uint64 - sparseOffset []uint16 - sparseCount int - name string -} - -func mostFrequentStride(a []uint64) int { - counts := make(map[int]int) - var v int - for _, x := range a { - if stride := int(x) - v; v != 0 && stride >= 0 { - counts[stride]++ - } - v = int(x) - } - var maxs, maxc int - for stride, cnt := range counts { - if cnt > maxc || (cnt == maxc && stride < maxs) { - maxs, maxc = stride, cnt - } - } - return maxs -} - -func countSparseEntries(a []uint64) int { - stride := mostFrequentStride(a) - var v, count int - for _, tv := range a { - if int(tv)-v != stride { - if tv != 0 { - count++ - } - } - v = int(tv) - } - return count -} - -func (c *normCompacter) Size(v []uint64) (sz int, ok bool) { - if n := countSparseEntries(v); n <= maxSparseEntries { - return (n+1)*4 + 2, true - } - return 0, false -} - -func (c *normCompacter) Store(v []uint64) uint32 { - h := uint32(len(c.sparseOffset)) - c.sparseBlocks = append(c.sparseBlocks, v) - c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount)) - c.sparseCount += countSparseEntries(v) + 1 - return h -} - -func (c *normCompacter) Handler() string { - return c.name + "Sparse.lookup" -} - -func (c *normCompacter) Print(w io.Writer) (retErr error) { - p := func(f string, x ...interface{}) { - if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil { - retErr = err - } - } - - ls := len(c.sparseBlocks) - p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2) - p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset) - - ns := c.sparseCount - p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4) - p("var %sSparseValues = [%d]valueRange {", c.name, ns) - for i, b := range c.sparseBlocks { - p("\n// Block %#x, offset %#x", i, c.sparseOffset[i]) - var v int - stride := mostFrequentStride(b) - n := countSparseEntries(b) - p("\n{value:%#04x,lo:%#02x},", stride, uint8(n)) - for i, nv := range b { - if int(nv)-v != stride { - if v != 0 { - p(",hi:%#02x},", 0x80+i-1) - } - if nv != 0 { - p("\n{value:%#04x,lo:%#02x", nv, 0x80+i) - } - } - v = int(nv) - } - if v != 0 { - p(",hi:%#02x},", 0x80+len(b)-1) - } - } - p("\n}\n\n") - return -} diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index ae93e2471..563f70429 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -223,7 +223,12 @@ func (lim *Limiter) Wait(ctx context.Context) (err error) { // canceled, or the expected wait time exceeds the Context's Deadline. // The burst limit is ignored if the rate limit is Inf. func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { - if n > lim.burst && lim.limit != Inf { + lim.mu.Lock() + burst := lim.burst + limit := lim.limit + lim.mu.Unlock() + + if n > burst && limit != Inf { return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst) } // Check if ctx is already cancelled @@ -281,6 +286,23 @@ func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { lim.limit = newLimit } +// SetBurst is shorthand for SetBurstAt(time.Now(), newBurst). +func (lim *Limiter) SetBurst(newBurst int) { + lim.SetBurstAt(time.Now(), newBurst) +} + +// SetBurstAt sets a new burst size for the limiter. +func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { + lim.mu.Lock() + defer lim.mu.Unlock() + + now, _, tokens := lim.advance(now) + + lim.last = now + lim.tokens = tokens + lim.burst = newBurst +} + // reserveN is a helper method for AllowN, ReserveN, and WaitN. // maxFutureReserve specifies the maximum reservation wait duration allowed. // reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. @@ -370,5 +392,9 @@ func (limit Limit) durationFromTokens(tokens float64) time.Duration { // tokensFromDuration is a unit conversion function from a time duration to the number of tokens // which could be accumulated during that duration at a rate of limit tokens per second. func (limit Limit) tokensFromDuration(d time.Duration) float64 { - return d.Seconds() * float64(limit) + // Split the integer and fractional parts ourself to minimize rounding errors. + // See golang.org/issues/34861. + sec := float64(d/time.Second) * float64(limit) + nsec := float64(d%time.Second) * float64(limit) + return sec + nsec/1e9 } diff --git a/vendor/golang.org/x/tools/AUTHORS b/vendor/golang.org/x/tools/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/vendor/golang.org/x/tools/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/tools/CONTRIBUTORS b/vendor/golang.org/x/tools/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/vendor/golang.org/x/tools/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/tools/go/analysis/analysis.go b/vendor/golang.org/x/tools/go/analysis/analysis.go new file mode 100644 index 000000000..ea605f4fd --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/analysis.go @@ -0,0 +1,221 @@ +package analysis + +import ( + "flag" + "fmt" + "go/ast" + "go/token" + "go/types" + "reflect" +) + +// An Analyzer describes an analysis function and its options. +type Analyzer struct { + // The Name of the analyzer must be a valid Go identifier + // as it may appear in command-line flags, URLs, and so on. + Name string + + // Doc is the documentation for the analyzer. + // The part before the first "\n\n" is the title + // (no capital or period, max ~60 letters). + Doc string + + // Flags defines any flags accepted by the analyzer. + // The manner in which these flags are exposed to the user + // depends on the driver which runs the analyzer. + Flags flag.FlagSet + + // Run applies the analyzer to a package. + // It returns an error if the analyzer failed. + // + // On success, the Run function may return a result + // computed by the Analyzer; its type must match ResultType. + // The driver makes this result available as an input to + // another Analyzer that depends directly on this one (see + // Requires) when it analyzes the same package. + // + // To pass analysis results between packages (and thus + // potentially between address spaces), use Facts, which are + // serializable. + Run func(*Pass) (interface{}, error) + + // RunDespiteErrors allows the driver to invoke + // the Run method of this analyzer even on a + // package that contains parse or type errors. + RunDespiteErrors bool + + // Requires is a set of analyzers that must run successfully + // before this one on a given package. This analyzer may inspect + // the outputs produced by each analyzer in Requires. + // The graph over analyzers implied by Requires edges must be acyclic. + // + // Requires establishes a "horizontal" dependency between + // analysis passes (different analyzers, same package). + Requires []*Analyzer + + // ResultType is the type of the optional result of the Run function. + ResultType reflect.Type + + // FactTypes indicates that this analyzer imports and exports + // Facts of the specified concrete types. + // An analyzer that uses facts may assume that its import + // dependencies have been similarly analyzed before it runs. + // Facts must be pointers. + // + // FactTypes establishes a "vertical" dependency between + // analysis passes (same analyzer, different packages). + FactTypes []Fact +} + +func (a *Analyzer) String() string { return a.Name } + +// A Pass provides information to the Run function that +// applies a specific analyzer to a single Go package. +// +// It forms the interface between the analysis logic and the driver +// program, and has both input and an output components. +// +// As in a compiler, one pass may depend on the result computed by another. +// +// The Run function should not call any of the Pass functions concurrently. +type Pass struct { + Analyzer *Analyzer // the identity of the current analyzer + + // syntax and type information + Fset *token.FileSet // file position information + Files []*ast.File // the abstract syntax tree of each file + OtherFiles []string // names of non-Go files of this package + Pkg *types.Package // type information about the package + TypesInfo *types.Info // type information about the syntax trees + TypesSizes types.Sizes // function for computing sizes of types + + // Report reports a Diagnostic, a finding about a specific location + // in the analyzed source code such as a potential mistake. + // It may be called by the Run function. + Report func(Diagnostic) + + // ResultOf provides the inputs to this analysis pass, which are + // the corresponding results of its prerequisite analyzers. + // The map keys are the elements of Analysis.Required, + // and the type of each corresponding value is the required + // analysis's ResultType. + ResultOf map[*Analyzer]interface{} + + // -- facts -- + + // ImportObjectFact retrieves a fact associated with obj. + // Given a value ptr of type *T, where *T satisfies Fact, + // ImportObjectFact copies the value to *ptr. + // + // ImportObjectFact panics if called after the pass is complete. + // ImportObjectFact is not concurrency-safe. + ImportObjectFact func(obj types.Object, fact Fact) bool + + // ImportPackageFact retrieves a fact associated with package pkg, + // which must be this package or one of its dependencies. + // See comments for ImportObjectFact. + ImportPackageFact func(pkg *types.Package, fact Fact) bool + + // ExportObjectFact associates a fact of type *T with the obj, + // replacing any previous fact of that type. + // + // ExportObjectFact panics if it is called after the pass is + // complete, or if obj does not belong to the package being analyzed. + // ExportObjectFact is not concurrency-safe. + ExportObjectFact func(obj types.Object, fact Fact) + + // ExportPackageFact associates a fact with the current package. + // See comments for ExportObjectFact. + ExportPackageFact func(fact Fact) + + // AllPackageFacts returns a new slice containing all package facts of the analysis's FactTypes + // in unspecified order. + // WARNING: This is an experimental API and may change in the future. + AllPackageFacts func() []PackageFact + + // AllObjectFacts returns a new slice containing all object facts of the analysis's FactTypes + // in unspecified order. + // WARNING: This is an experimental API and may change in the future. + AllObjectFacts func() []ObjectFact + + /* Further fields may be added in future. */ + // For example, suggested or applied refactorings. +} + +// PackageFact is a package together with an associated fact. +// WARNING: This is an experimental API and may change in the future. +type PackageFact struct { + Package *types.Package + Fact Fact +} + +// ObjectFact is an object together with an associated fact. +// WARNING: This is an experimental API and may change in the future. +type ObjectFact struct { + Object types.Object + Fact Fact +} + +// Reportf is a helper function that reports a Diagnostic using the +// specified position and formatted error message. +func (pass *Pass) Reportf(pos token.Pos, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + pass.Report(Diagnostic{Pos: pos, Message: msg}) +} + +// The Range interface provides a range. It's equivalent to and satisfied by +// ast.Node. +type Range interface { + Pos() token.Pos // position of first character belonging to the node + End() token.Pos // position of first character immediately after the node +} + +// ReportRangef is a helper function that reports a Diagnostic using the +// range provided. ast.Node values can be passed in as the range because +// they satisfy the Range interface. +func (pass *Pass) ReportRangef(rng Range, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + pass.Report(Diagnostic{Pos: rng.Pos(), End: rng.End(), Message: msg}) +} + +func (pass *Pass) String() string { + return fmt.Sprintf("%s@%s", pass.Analyzer.Name, pass.Pkg.Path()) +} + +// A Fact is an intermediate fact produced during analysis. +// +// Each fact is associated with a named declaration (a types.Object) or +// with a package as a whole. A single object or package may have +// multiple associated facts, but only one of any particular fact type. +// +// A Fact represents a predicate such as "never returns", but does not +// represent the subject of the predicate such as "function F" or "package P". +// +// Facts may be produced in one analysis pass and consumed by another +// analysis pass even if these are in different address spaces. +// If package P imports Q, all facts about Q produced during +// analysis of that package will be available during later analysis of P. +// Facts are analogous to type export data in a build system: +// just as export data enables separate compilation of several passes, +// facts enable "separate analysis". +// +// Each pass (a, p) starts with the set of facts produced by the +// same analyzer a applied to the packages directly imported by p. +// The analysis may add facts to the set, and they may be exported in turn. +// An analysis's Run function may retrieve facts by calling +// Pass.Import{Object,Package}Fact and update them using +// Pass.Export{Object,Package}Fact. +// +// A fact is logically private to its Analysis. To pass values +// between different analyzers, use the results mechanism; +// see Analyzer.Requires, Analyzer.ResultType, and Pass.ResultOf. +// +// A Fact type must be a pointer. +// Facts are encoded and decoded using encoding/gob. +// A Fact may implement the GobEncoder/GobDecoder interfaces +// to customize its encoding. Fact encoding should not fail. +// +// A Fact should not be modified once exported. +type Fact interface { + AFact() // dummy method to avoid type errors +} diff --git a/vendor/golang.org/x/tools/go/analysis/diagnostic.go b/vendor/golang.org/x/tools/go/analysis/diagnostic.go new file mode 100644 index 000000000..57eaf6faa --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/diagnostic.go @@ -0,0 +1,61 @@ +package analysis + +import "go/token" + +// A Diagnostic is a message associated with a source location or range. +// +// An Analyzer may return a variety of diagnostics; the optional Category, +// which should be a constant, may be used to classify them. +// It is primarily intended to make it easy to look up documentation. +// +// If End is provided, the diagnostic is specified to apply to the range between +// Pos and End. +type Diagnostic struct { + Pos token.Pos + End token.Pos // optional + Category string // optional + Message string + + // SuggestedFixes contains suggested fixes for a diagnostic which can be used to perform + // edits to a file that address the diagnostic. + // TODO(matloob): Should multiple SuggestedFixes be allowed for a diagnostic? + // Diagnostics should not contain SuggestedFixes that overlap. + // Experimental: This API is experimental and may change in the future. + SuggestedFixes []SuggestedFix // optional + + // Experimental: This API is experimental and may change in the future. + Related []RelatedInformation // optional +} + +// RelatedInformation contains information related to a diagnostic. +// For example, a diagnostic that flags duplicated declarations of a +// variable may include one RelatedInformation per existing +// declaration. +type RelatedInformation struct { + Pos token.Pos + End token.Pos + Message string +} + +// A SuggestedFix is a code change associated with a Diagnostic that a user can choose +// to apply to their code. Usually the SuggestedFix is meant to fix the issue flagged +// by the diagnostic. +// TextEdits for a SuggestedFix should not overlap. TextEdits for a SuggestedFix +// should not contain edits for other packages. +// Experimental: This API is experimental and may change in the future. +type SuggestedFix struct { + // A description for this suggested fix to be shown to a user deciding + // whether to accept it. + Message string + TextEdits []TextEdit +} + +// A TextEdit represents the replacement of the code between Pos and End with the new text. +// Each TextEdit should apply to a single file. End should not be earlier in the file than Pos. +// Experimental: This API is experimental and may change in the future. +type TextEdit struct { + // For a pure insertion, End can either be set to Pos or token.NoPos. + Pos token.Pos + End token.Pos + NewText []byte +} diff --git a/vendor/golang.org/x/tools/go/analysis/doc.go b/vendor/golang.org/x/tools/go/analysis/doc.go new file mode 100644 index 000000000..1b7b7ed5a --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/doc.go @@ -0,0 +1,334 @@ +/* + +The analysis package defines the interface between a modular static +analysis and an analysis driver program. + + +Background + +A static analysis is a function that inspects a package of Go code and +reports a set of diagnostics (typically mistakes in the code), and +perhaps produces other results as well, such as suggested refactorings +or other facts. An analysis that reports mistakes is informally called a +"checker". For example, the printf checker reports mistakes in +fmt.Printf format strings. + +A "modular" analysis is one that inspects one package at a time but can +save information from a lower-level package and use it when inspecting a +higher-level package, analogous to separate compilation in a toolchain. +The printf checker is modular: when it discovers that a function such as +log.Fatalf delegates to fmt.Printf, it records this fact, and checks +calls to that function too, including calls made from another package. + +By implementing a common interface, checkers from a variety of sources +can be easily selected, incorporated, and reused in a wide range of +driver programs including command-line tools (such as vet), text editors and +IDEs, build and test systems (such as go build, Bazel, or Buck), test +frameworks, code review tools, code-base indexers (such as SourceGraph), +documentation viewers (such as godoc), batch pipelines for large code +bases, and so on. + + +Analyzer + +The primary type in the API is Analyzer. An Analyzer statically +describes an analysis function: its name, documentation, flags, +relationship to other analyzers, and of course, its logic. + +To define an analysis, a user declares a (logically constant) variable +of type Analyzer. Here is a typical example from one of the analyzers in +the go/analysis/passes/ subdirectory: + + package unusedresult + + var Analyzer = &analysis.Analyzer{ + Name: "unusedresult", + Doc: "check for unused results of calls to some functions", + Run: run, + ... + } + + func run(pass *analysis.Pass) (interface{}, error) { + ... + } + +An analysis driver is a program such as vet that runs a set of +analyses and prints the diagnostics that they report. +The driver program must import the list of Analyzers it needs. +Typically each Analyzer resides in a separate package. +To add a new Analyzer to an existing driver, add another item to the list: + + import ( "unusedresult"; "nilness"; "printf" ) + + var analyses = []*analysis.Analyzer{ + unusedresult.Analyzer, + nilness.Analyzer, + printf.Analyzer, + } + +A driver may use the name, flags, and documentation to provide on-line +help that describes the analyses it performs. +The doc comment contains a brief one-line summary, +optionally followed by paragraphs of explanation. +The vet command, shown below, is an example of a driver that runs +multiple analyzers. It is based on the multichecker package +(see the "Standalone commands" section for details). + + $ go build golang.org/x/tools/go/analysis/cmd/vet + $ ./vet help + vet is a tool for static analysis of Go programs. + + Usage: vet [-flag] [package] + + Registered analyzers: + + asmdecl report mismatches between assembly files and Go declarations + assign check for useless assignments + atomic check for common mistakes using the sync/atomic package + ... + unusedresult check for unused results of calls to some functions + + $ ./vet help unusedresult + unusedresult: check for unused results of calls to some functions + + Analyzer flags: + + -unusedresult.funcs value + comma-separated list of functions whose results must be used (default Error,String) + -unusedresult.stringmethods value + comma-separated list of names of methods of type func() string whose results must be used + + Some functions like fmt.Errorf return a result and have no side effects, + so it is always a mistake to discard the result. This analyzer reports + calls to certain functions in which the result of the call is ignored. + + The set of functions may be controlled using flags. + +The Analyzer type has more fields besides those shown above: + + type Analyzer struct { + Name string + Doc string + Flags flag.FlagSet + Run func(*Pass) (interface{}, error) + RunDespiteErrors bool + ResultType reflect.Type + Requires []*Analyzer + FactTypes []Fact + } + +The Flags field declares a set of named (global) flag variables that +control analysis behavior. Unlike vet, analysis flags are not declared +directly in the command line FlagSet; it is up to the driver to set the +flag variables. A driver for a single analysis, a, might expose its flag +f directly on the command line as -f, whereas a driver for multiple +analyses might prefix the flag name by the analysis name (-a.f) to avoid +ambiguity. An IDE might expose the flags through a graphical interface, +and a batch pipeline might configure them from a config file. +See the "findcall" analyzer for an example of flags in action. + +The RunDespiteErrors flag indicates whether the analysis is equipped to +handle ill-typed code. If not, the driver will skip the analysis if +there were parse or type errors. +The optional ResultType field specifies the type of the result value +computed by this analysis and made available to other analyses. +The Requires field specifies a list of analyses upon which +this one depends and whose results it may access, and it constrains the +order in which a driver may run analyses. +The FactTypes field is discussed in the section on Modularity. +The analysis package provides a Validate function to perform basic +sanity checks on an Analyzer, such as that its Requires graph is +acyclic, its fact and result types are unique, and so on. + +Finally, the Run field contains a function to be called by the driver to +execute the analysis on a single package. The driver passes it an +instance of the Pass type. + + +Pass + +A Pass describes a single unit of work: the application of a particular +Analyzer to a particular package of Go code. +The Pass provides information to the Analyzer's Run function about the +package being analyzed, and provides operations to the Run function for +reporting diagnostics and other information back to the driver. + + type Pass struct { + Fset *token.FileSet + Files []*ast.File + OtherFiles []string + Pkg *types.Package + TypesInfo *types.Info + ResultOf map[*Analyzer]interface{} + Report func(Diagnostic) + ... + } + +The Fset, Files, Pkg, and TypesInfo fields provide the syntax trees, +type information, and source positions for a single package of Go code. + +The OtherFiles field provides the names, but not the contents, of non-Go +files such as assembly that are part of this package. See the "asmdecl" +or "buildtags" analyzers for examples of loading non-Go files and reporting +diagnostics against them. + +The ResultOf field provides the results computed by the analyzers +required by this one, as expressed in its Analyzer.Requires field. The +driver runs the required analyzers first and makes their results +available in this map. Each Analyzer must return a value of the type +described in its Analyzer.ResultType field. +For example, the "ctrlflow" analyzer returns a *ctrlflow.CFGs, which +provides a control-flow graph for each function in the package (see +golang.org/x/tools/go/cfg); the "inspect" analyzer returns a value that +enables other Analyzers to traverse the syntax trees of the package more +efficiently; and the "buildssa" analyzer constructs an SSA-form +intermediate representation. +Each of these Analyzers extends the capabilities of later Analyzers +without adding a dependency to the core API, so an analysis tool pays +only for the extensions it needs. + +The Report function emits a diagnostic, a message associated with a +source position. For most analyses, diagnostics are their primary +result. +For convenience, Pass provides a helper method, Reportf, to report a new +diagnostic by formatting a string. +Diagnostic is defined as: + + type Diagnostic struct { + Pos token.Pos + Category string // optional + Message string + } + +The optional Category field is a short identifier that classifies the +kind of message when an analysis produces several kinds of diagnostic. + +Most Analyzers inspect typed Go syntax trees, but a few, such as asmdecl +and buildtag, inspect the raw text of Go source files or even non-Go +files such as assembly. To report a diagnostic against a line of a +raw text file, use the following sequence: + + content, err := ioutil.ReadFile(filename) + if err != nil { ... } + tf := fset.AddFile(filename, -1, len(content)) + tf.SetLinesForContent(content) + ... + pass.Reportf(tf.LineStart(line), "oops") + + +Modular analysis with Facts + +To improve efficiency and scalability, large programs are routinely +built using separate compilation: units of the program are compiled +separately, and recompiled only when one of their dependencies changes; +independent modules may be compiled in parallel. The same technique may +be applied to static analyses, for the same benefits. Such analyses are +described as "modular". + +A compiler’s type checker is an example of a modular static analysis. +Many other checkers we would like to apply to Go programs can be +understood as alternative or non-standard type systems. For example, +vet's printf checker infers whether a function has the "printf wrapper" +type, and it applies stricter checks to calls of such functions. In +addition, it records which functions are printf wrappers for use by +later analysis passes to identify other printf wrappers by induction. +A result such as “f is a printf wrapper” that is not interesting by +itself but serves as a stepping stone to an interesting result (such as +a diagnostic) is called a "fact". + +The analysis API allows an analysis to define new types of facts, to +associate facts of these types with objects (named entities) declared +within the current package, or with the package as a whole, and to query +for an existing fact of a given type associated with an object or +package. + +An Analyzer that uses facts must declare their types: + + var Analyzer = &analysis.Analyzer{ + Name: "printf", + FactTypes: []analysis.Fact{new(isWrapper)}, + ... + } + + type isWrapper struct{} // => *types.Func f “is a printf wrapper” + +The driver program ensures that facts for a pass’s dependencies are +generated before analyzing the package and is responsible for propagating +facts from one package to another, possibly across address spaces. +Consequently, Facts must be serializable. The API requires that drivers +use the gob encoding, an efficient, robust, self-describing binary +protocol. A fact type may implement the GobEncoder/GobDecoder interfaces +if the default encoding is unsuitable. Facts should be stateless. + +The Pass type has functions to import and export facts, +associated either with an object or with a package: + + type Pass struct { + ... + ExportObjectFact func(types.Object, Fact) + ImportObjectFact func(types.Object, Fact) bool + + ExportPackageFact func(fact Fact) + ImportPackageFact func(*types.Package, Fact) bool + } + +An Analyzer may only export facts associated with the current package or +its objects, though it may import facts from any package or object that +is an import dependency of the current package. + +Conceptually, ExportObjectFact(obj, fact) inserts fact into a hidden map keyed by +the pair (obj, TypeOf(fact)), and the ImportObjectFact function +retrieves the entry from this map and copies its value into the variable +pointed to by fact. This scheme assumes that the concrete type of fact +is a pointer; this assumption is checked by the Validate function. +See the "printf" analyzer for an example of object facts in action. + +Some driver implementations (such as those based on Bazel and Blaze) do +not currently apply analyzers to packages of the standard library. +Therefore, for best results, analyzer authors should not rely on +analysis facts being available for standard packages. +For example, although the printf checker is capable of deducing during +analysis of the log package that log.Printf is a printf wrapper, +this fact is built in to the analyzer so that it correctly checks +calls to log.Printf even when run in a driver that does not apply +it to standard packages. We would like to remove this limitation in future. + + +Testing an Analyzer + +The analysistest subpackage provides utilities for testing an Analyzer. +In a few lines of code, it is possible to run an analyzer on a package +of testdata files and check that it reported all the expected +diagnostics and facts (and no more). Expectations are expressed using +"// want ..." comments in the input code. + + +Standalone commands + +Analyzers are provided in the form of packages that a driver program is +expected to import. The vet command imports a set of several analyzers, +but users may wish to define their own analysis commands that perform +additional checks. To simplify the task of creating an analysis command, +either for a single analyzer or for a whole suite, we provide the +singlechecker and multichecker subpackages. + +The singlechecker package provides the main function for a command that +runs one analyzer. By convention, each analyzer such as +go/passes/findcall should be accompanied by a singlechecker-based +command such as go/analysis/passes/findcall/cmd/findcall, defined in its +entirety as: + + package main + + import ( + "golang.org/x/tools/go/analysis/passes/findcall" + "golang.org/x/tools/go/analysis/singlechecker" + ) + + func main() { singlechecker.Main(findcall.Analyzer) } + +A tool that provides multiple analyzers can use multichecker in a +similar way, giving it the list of Analyzers. + +*/ +package analysis diff --git a/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go b/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go new file mode 100644 index 000000000..2856df137 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go @@ -0,0 +1,49 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package inspect defines an Analyzer that provides an AST inspector +// (golang.org/x/tools/go/ast/inspect.Inspect) for the syntax trees of a +// package. It is only a building block for other analyzers. +// +// Example of use in another analysis: +// +// import ( +// "golang.org/x/tools/go/analysis" +// "golang.org/x/tools/go/analysis/passes/inspect" +// "golang.org/x/tools/go/ast/inspector" +// ) +// +// var Analyzer = &analysis.Analyzer{ +// ... +// Requires: []*analysis.Analyzer{inspect.Analyzer}, +// } +// +// func run(pass *analysis.Pass) (interface{}, error) { +// inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) +// inspect.Preorder(nil, func(n ast.Node) { +// ... +// }) +// return nil +// } +// +package inspect + +import ( + "reflect" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "inspect", + Doc: "optimize AST traversal for later passes", + Run: run, + RunDespiteErrors: true, + ResultType: reflect.TypeOf(new(inspector.Inspector)), +} + +func run(pass *analysis.Pass) (interface{}, error) { + return inspector.New(pass.Files), nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/validate.go b/vendor/golang.org/x/tools/go/analysis/validate.go new file mode 100644 index 000000000..be9814346 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/validate.go @@ -0,0 +1,97 @@ +package analysis + +import ( + "fmt" + "reflect" + "unicode" +) + +// Validate reports an error if any of the analyzers are misconfigured. +// Checks include: +// that the name is a valid identifier; +// that the Requires graph is acyclic; +// that analyzer fact types are unique; +// that each fact type is a pointer. +func Validate(analyzers []*Analyzer) error { + // Map each fact type to its sole generating analyzer. + factTypes := make(map[reflect.Type]*Analyzer) + + // Traverse the Requires graph, depth first. + const ( + white = iota + grey + black + finished + ) + color := make(map[*Analyzer]uint8) + var visit func(a *Analyzer) error + visit = func(a *Analyzer) error { + if a == nil { + return fmt.Errorf("nil *Analyzer") + } + if color[a] == white { + color[a] = grey + + // names + if !validIdent(a.Name) { + return fmt.Errorf("invalid analyzer name %q", a) + } + + if a.Doc == "" { + return fmt.Errorf("analyzer %q is undocumented", a) + } + + // fact types + for _, f := range a.FactTypes { + if f == nil { + return fmt.Errorf("analyzer %s has nil FactType", a) + } + t := reflect.TypeOf(f) + if prev := factTypes[t]; prev != nil { + return fmt.Errorf("fact type %s registered by two analyzers: %v, %v", + t, a, prev) + } + if t.Kind() != reflect.Ptr { + return fmt.Errorf("%s: fact type %s is not a pointer", a, t) + } + factTypes[t] = a + } + + // recursion + for i, req := range a.Requires { + if err := visit(req); err != nil { + return fmt.Errorf("%s.Requires[%d]: %v", a.Name, i, err) + } + } + color[a] = black + } + + return nil + } + for _, a := range analyzers { + if err := visit(a); err != nil { + return err + } + } + + // Reject duplicates among analyzers. + // Precondition: color[a] == black. + // Postcondition: color[a] == finished. + for _, a := range analyzers { + if color[a] == finished { + return fmt.Errorf("duplicate analyzer: %s", a.Name) + } + color[a] = finished + } + + return nil +} + +func validIdent(name string) bool { + for i, r := range name { + if !(r == '_' || unicode.IsLetter(r) || i > 0 && unicode.IsDigit(r)) { + return false + } + } + return name != "" +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go new file mode 100644 index 000000000..6b7052b89 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -0,0 +1,627 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +// This file defines utilities for working with source positions. + +import ( + "fmt" + "go/ast" + "go/token" + "sort" +) + +// PathEnclosingInterval returns the node that encloses the source +// interval [start, end), and all its ancestors up to the AST root. +// +// The definition of "enclosing" used by this function considers +// additional whitespace abutting a node to be enclosed by it. +// In this example: +// +// z := x + y // add them +// <-A-> +// <----B-----> +// +// the ast.BinaryExpr(+) node is considered to enclose interval B +// even though its [Pos()..End()) is actually only interval A. +// This behaviour makes user interfaces more tolerant of imperfect +// input. +// +// This function treats tokens as nodes, though they are not included +// in the result. e.g. PathEnclosingInterval("+") returns the +// enclosing ast.BinaryExpr("x + y"). +// +// If start==end, the 1-char interval following start is used instead. +// +// The 'exact' result is true if the interval contains only path[0] +// and perhaps some adjacent whitespace. It is false if the interval +// overlaps multiple children of path[0], or if it contains only +// interior whitespace of path[0]. +// In this example: +// +// z := x + y // add them +// <--C--> <---E--> +// ^ +// D +// +// intervals C, D and E are inexact. C is contained by the +// z-assignment statement, because it spans three of its children (:=, +// x, +). So too is the 1-char interval D, because it contains only +// interior whitespace of the assignment. E is considered interior +// whitespace of the BlockStmt containing the assignment. +// +// Precondition: [start, end) both lie within the same file as root. +// TODO(adonovan): return (nil, false) in this case and remove precond. +// Requires FileSet; see loader.tokenFileContainsPos. +// +// Postcondition: path is never nil; it always contains at least 'root'. +// +func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { + // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging + + // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). + var visit func(node ast.Node) bool + visit = func(node ast.Node) bool { + path = append(path, node) + + nodePos := node.Pos() + nodeEnd := node.End() + + // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging + + // Intersect [start, end) with interval of node. + if start < nodePos { + start = nodePos + } + if end > nodeEnd { + end = nodeEnd + } + + // Find sole child that contains [start, end). + children := childrenOf(node) + l := len(children) + for i, child := range children { + // [childPos, childEnd) is unaugmented interval of child. + childPos := child.Pos() + childEnd := child.End() + + // [augPos, augEnd) is whitespace-augmented interval of child. + augPos := childPos + augEnd := childEnd + if i > 0 { + augPos = children[i-1].End() // start of preceding whitespace + } + if i < l-1 { + nextChildPos := children[i+1].Pos() + // Does [start, end) lie between child and next child? + if start >= augEnd && end <= nextChildPos { + return false // inexact match + } + augEnd = nextChildPos // end of following whitespace + } + + // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", + // i, augPos, augEnd, start, end) // debugging + + // Does augmented child strictly contain [start, end)? + if augPos <= start && end <= augEnd { + _, isToken := child.(tokenNode) + return isToken || visit(child) + } + + // Does [start, end) overlap multiple children? + // i.e. left-augmented child contains start + // but LR-augmented child does not contain end. + if start < childEnd && end > augEnd { + break + } + } + + // No single child contained [start, end), + // so node is the result. Is it exact? + + // (It's tempting to put this condition before the + // child loop, but it gives the wrong result in the + // case where a node (e.g. ExprStmt) and its sole + // child have equal intervals.) + if start == nodePos && end == nodeEnd { + return true // exact match + } + + return false // inexact: overlaps multiple children + } + + if start > end { + start, end = end, start + } + + if start < root.End() && end > root.Pos() { + if start == end { + end = start + 1 // empty interval => interval of size 1 + } + exact = visit(root) + + // Reverse the path: + for i, l := 0, len(path); i < l/2; i++ { + path[i], path[l-1-i] = path[l-1-i], path[i] + } + } else { + // Selection lies within whitespace preceding the + // first (or following the last) declaration in the file. + // The result nonetheless always includes the ast.File. + path = append(path, root) + } + + return +} + +// tokenNode is a dummy implementation of ast.Node for a single token. +// They are used transiently by PathEnclosingInterval but never escape +// this package. +// +type tokenNode struct { + pos token.Pos + end token.Pos +} + +func (n tokenNode) Pos() token.Pos { + return n.pos +} + +func (n tokenNode) End() token.Pos { + return n.end +} + +func tok(pos token.Pos, len int) ast.Node { + return tokenNode{pos, pos + token.Pos(len)} +} + +// childrenOf returns the direct non-nil children of ast.Node n. +// It may include fake ast.Node implementations for bare tokens. +// it is not safe to call (e.g.) ast.Walk on such nodes. +// +func childrenOf(n ast.Node) []ast.Node { + var children []ast.Node + + // First add nodes for all true subtrees. + ast.Inspect(n, func(node ast.Node) bool { + if node == n { // push n + return true // recur + } + if node != nil { // push child + children = append(children, node) + } + return false // no recursion + }) + + // Then add fake Nodes for bare tokens. + switch n := n.(type) { + case *ast.ArrayType: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Elt.End(), len("]"))) + + case *ast.AssignStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.BasicLit: + children = append(children, + tok(n.ValuePos, len(n.Value))) + + case *ast.BinaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.BlockStmt: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("}"))) + + case *ast.BranchStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.CallExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + if n.Ellipsis != 0 { + children = append(children, tok(n.Ellipsis, len("..."))) + } + + case *ast.CaseClause: + if n.List == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.ChanType: + switch n.Dir { + case ast.RECV: + children = append(children, tok(n.Begin, len("<-chan"))) + case ast.SEND: + children = append(children, tok(n.Begin, len("chan<-"))) + case ast.RECV | ast.SEND: + children = append(children, tok(n.Begin, len("chan"))) + } + + case *ast.CommClause: + if n.Comm == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.Comment: + // nop + + case *ast.CommentGroup: + // nop + + case *ast.CompositeLit: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("{"))) + + case *ast.DeclStmt: + // nop + + case *ast.DeferStmt: + children = append(children, + tok(n.Defer, len("defer"))) + + case *ast.Ellipsis: + children = append(children, + tok(n.Ellipsis, len("..."))) + + case *ast.EmptyStmt: + // nop + + case *ast.ExprStmt: + // nop + + case *ast.Field: + // TODO(adonovan): Field.{Doc,Comment,Tag}? + + case *ast.FieldList: + children = append(children, + tok(n.Opening, len("(")), + tok(n.Closing, len(")"))) + + case *ast.File: + // TODO test: Doc + children = append(children, + tok(n.Package, len("package"))) + + case *ast.ForStmt: + children = append(children, + tok(n.For, len("for"))) + + case *ast.FuncDecl: + // TODO(adonovan): FuncDecl.Comment? + + // Uniquely, FuncDecl breaks the invariant that + // preorder traversal yields tokens in lexical order: + // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func. + // + // As a workaround, we inline the case for FuncType + // here and order things correctly. + // + children = nil // discard ast.Walk(FuncDecl) info subtrees + children = append(children, tok(n.Type.Func, len("func"))) + if n.Recv != nil { + children = append(children, n.Recv) + } + children = append(children, n.Name) + if n.Type.Params != nil { + children = append(children, n.Type.Params) + } + if n.Type.Results != nil { + children = append(children, n.Type.Results) + } + if n.Body != nil { + children = append(children, n.Body) + } + + case *ast.FuncLit: + // nop + + case *ast.FuncType: + if n.Func != 0 { + children = append(children, + tok(n.Func, len("func"))) + } + + case *ast.GenDecl: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + if n.Lparen != 0 { + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + } + + case *ast.GoStmt: + children = append(children, + tok(n.Go, len("go"))) + + case *ast.Ident: + children = append(children, + tok(n.NamePos, len(n.Name))) + + case *ast.IfStmt: + children = append(children, + tok(n.If, len("if"))) + + case *ast.ImportSpec: + // TODO(adonovan): ImportSpec.{Doc,EndPos}? + + case *ast.IncDecStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.IndexExpr: + children = append(children, + tok(n.Lbrack, len("{")), + tok(n.Rbrack, len("}"))) + + case *ast.InterfaceType: + children = append(children, + tok(n.Interface, len("interface"))) + + case *ast.KeyValueExpr: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.LabeledStmt: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.MapType: + children = append(children, + tok(n.Map, len("map"))) + + case *ast.ParenExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.RangeStmt: + children = append(children, + tok(n.For, len("for")), + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.ReturnStmt: + children = append(children, + tok(n.Return, len("return"))) + + case *ast.SelectStmt: + children = append(children, + tok(n.Select, len("select"))) + + case *ast.SelectorExpr: + // nop + + case *ast.SendStmt: + children = append(children, + tok(n.Arrow, len("<-"))) + + case *ast.SliceExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.StarExpr: + children = append(children, tok(n.Star, len("*"))) + + case *ast.StructType: + children = append(children, tok(n.Struct, len("struct"))) + + case *ast.SwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.TypeAssertExpr: + children = append(children, + tok(n.Lparen-1, len(".")), + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.TypeSpec: + // TODO(adonovan): TypeSpec.{Doc,Comment}? + + case *ast.TypeSwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.UnaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.ValueSpec: + // TODO(adonovan): ValueSpec.{Doc,Comment}? + + case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt: + // nop + } + + // TODO(adonovan): opt: merge the logic of ast.Inspect() into + // the switch above so we can make interleaved callbacks for + // both Nodes and Tokens in the right order and avoid the need + // to sort. + sort.Sort(byPos(children)) + + return children +} + +type byPos []ast.Node + +func (sl byPos) Len() int { + return len(sl) +} +func (sl byPos) Less(i, j int) bool { + return sl[i].Pos() < sl[j].Pos() +} +func (sl byPos) Swap(i, j int) { + sl[i], sl[j] = sl[j], sl[i] +} + +// NodeDescription returns a description of the concrete type of n suitable +// for a user interface. +// +// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, +// StarExpr) we could be much more specific given the path to the AST +// root. Perhaps we should do that. +// +func NodeDescription(n ast.Node) string { + switch n := n.(type) { + case *ast.ArrayType: + return "array type" + case *ast.AssignStmt: + return "assignment" + case *ast.BadDecl: + return "bad declaration" + case *ast.BadExpr: + return "bad expression" + case *ast.BadStmt: + return "bad statement" + case *ast.BasicLit: + return "basic literal" + case *ast.BinaryExpr: + return fmt.Sprintf("binary %s operation", n.Op) + case *ast.BlockStmt: + return "block" + case *ast.BranchStmt: + switch n.Tok { + case token.BREAK: + return "break statement" + case token.CONTINUE: + return "continue statement" + case token.GOTO: + return "goto statement" + case token.FALLTHROUGH: + return "fall-through statement" + } + case *ast.CallExpr: + if len(n.Args) == 1 && !n.Ellipsis.IsValid() { + return "function call (or conversion)" + } + return "function call" + case *ast.CaseClause: + return "case clause" + case *ast.ChanType: + return "channel type" + case *ast.CommClause: + return "communication clause" + case *ast.Comment: + return "comment" + case *ast.CommentGroup: + return "comment group" + case *ast.CompositeLit: + return "composite literal" + case *ast.DeclStmt: + return NodeDescription(n.Decl) + " statement" + case *ast.DeferStmt: + return "defer statement" + case *ast.Ellipsis: + return "ellipsis" + case *ast.EmptyStmt: + return "empty statement" + case *ast.ExprStmt: + return "expression statement" + case *ast.Field: + // Can be any of these: + // struct {x, y int} -- struct field(s) + // struct {T} -- anon struct field + // interface {I} -- interface embedding + // interface {f()} -- interface method + // func (A) func(B) C -- receiver, param(s), result(s) + return "field/method/parameter" + case *ast.FieldList: + return "field/method/parameter list" + case *ast.File: + return "source file" + case *ast.ForStmt: + return "for loop" + case *ast.FuncDecl: + return "function declaration" + case *ast.FuncLit: + return "function literal" + case *ast.FuncType: + return "function type" + case *ast.GenDecl: + switch n.Tok { + case token.IMPORT: + return "import declaration" + case token.CONST: + return "constant declaration" + case token.TYPE: + return "type declaration" + case token.VAR: + return "variable declaration" + } + case *ast.GoStmt: + return "go statement" + case *ast.Ident: + return "identifier" + case *ast.IfStmt: + return "if statement" + case *ast.ImportSpec: + return "import specification" + case *ast.IncDecStmt: + if n.Tok == token.INC { + return "increment statement" + } + return "decrement statement" + case *ast.IndexExpr: + return "index expression" + case *ast.InterfaceType: + return "interface type" + case *ast.KeyValueExpr: + return "key/value association" + case *ast.LabeledStmt: + return "statement label" + case *ast.MapType: + return "map type" + case *ast.Package: + return "package" + case *ast.ParenExpr: + return "parenthesized " + NodeDescription(n.X) + case *ast.RangeStmt: + return "range loop" + case *ast.ReturnStmt: + return "return statement" + case *ast.SelectStmt: + return "select statement" + case *ast.SelectorExpr: + return "selector" + case *ast.SendStmt: + return "channel send" + case *ast.SliceExpr: + return "slice expression" + case *ast.StarExpr: + return "*-operation" // load/store expr or pointer type + case *ast.StructType: + return "struct type" + case *ast.SwitchStmt: + return "switch statement" + case *ast.TypeAssertExpr: + return "type assertion" + case *ast.TypeSpec: + return "type specification" + case *ast.TypeSwitchStmt: + return "type switch" + case *ast.UnaryExpr: + return fmt.Sprintf("unary %s operation", n.Op) + case *ast.ValueSpec: + return "value specification" + + } + panic(fmt.Sprintf("unexpected node type: %T", n)) +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go new file mode 100644 index 000000000..3e4b19536 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -0,0 +1,481 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package astutil contains common utilities for working with the Go AST. +package astutil // import "golang.org/x/tools/go/ast/astutil" + +import ( + "fmt" + "go/ast" + "go/token" + "strconv" + "strings" +) + +// AddImport adds the import path to the file f, if absent. +func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) { + return AddNamedImport(fset, f, "", path) +} + +// AddNamedImport adds the import with the given name and path to the file f, if absent. +// If name is not empty, it is used to rename the import. +// +// For example, calling +// AddNamedImport(fset, f, "pathpkg", "path") +// adds +// import pathpkg "path" +func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) { + if imports(f, name, path) { + return false + } + + newImport := &ast.ImportSpec{ + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(path), + }, + } + if name != "" { + newImport.Name = &ast.Ident{Name: name} + } + + // Find an import decl to add to. + // The goal is to find an existing import + // whose import path has the longest shared + // prefix with path. + var ( + bestMatch = -1 // length of longest shared prefix + lastImport = -1 // index in f.Decls of the file's final import decl + impDecl *ast.GenDecl // import decl containing the best match + impIndex = -1 // spec index in impDecl containing the best match + + isThirdPartyPath = isThirdParty(path) + ) + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if ok && gen.Tok == token.IMPORT { + lastImport = i + // Do not add to import "C", to avoid disrupting the + // association with its doc comment, breaking cgo. + if declImports(gen, "C") { + continue + } + + // Match an empty import decl if that's all that is available. + if len(gen.Specs) == 0 && bestMatch == -1 { + impDecl = gen + } + + // Compute longest shared prefix with imports in this group and find best + // matched import spec. + // 1. Always prefer import spec with longest shared prefix. + // 2. While match length is 0, + // - for stdlib package: prefer first import spec. + // - for third party package: prefer first third party import spec. + // We cannot use last import spec as best match for third party package + // because grouped imports are usually placed last by goimports -local + // flag. + // See issue #19190. + seenAnyThirdParty := false + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + p := importPath(impspec) + n := matchLen(p, path) + if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) { + bestMatch = n + impDecl = gen + impIndex = j + } + seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p) + } + } + } + + // If no import decl found, add one after the last import. + if impDecl == nil { + impDecl = &ast.GenDecl{ + Tok: token.IMPORT, + } + if lastImport >= 0 { + impDecl.TokPos = f.Decls[lastImport].End() + } else { + // There are no existing imports. + // Our new import, preceded by a blank line, goes after the package declaration + // and after the comment, if any, that starts on the same line as the + // package declaration. + impDecl.TokPos = f.Package + + file := fset.File(f.Package) + pkgLine := file.Line(f.Package) + for _, c := range f.Comments { + if file.Line(c.Pos()) > pkgLine { + break + } + // +2 for a blank line + impDecl.TokPos = c.End() + 2 + } + } + f.Decls = append(f.Decls, nil) + copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) + f.Decls[lastImport+1] = impDecl + } + + // Insert new import at insertAt. + insertAt := 0 + if impIndex >= 0 { + // insert after the found import + insertAt = impIndex + 1 + } + impDecl.Specs = append(impDecl.Specs, nil) + copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) + impDecl.Specs[insertAt] = newImport + pos := impDecl.Pos() + if insertAt > 0 { + // If there is a comment after an existing import, preserve the comment + // position by adding the new import after the comment. + if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil { + pos = spec.Comment.End() + } else { + // Assign same position as the previous import, + // so that the sorter sees it as being in the same block. + pos = impDecl.Specs[insertAt-1].Pos() + } + } + if newImport.Name != nil { + newImport.Name.NamePos = pos + } + newImport.Path.ValuePos = pos + newImport.EndPos = pos + + // Clean up parens. impDecl contains at least one spec. + if len(impDecl.Specs) == 1 { + // Remove unneeded parens. + impDecl.Lparen = token.NoPos + } else if !impDecl.Lparen.IsValid() { + // impDecl needs parens added. + impDecl.Lparen = impDecl.Specs[0].Pos() + } + + f.Imports = append(f.Imports, newImport) + + if len(f.Decls) <= 1 { + return true + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + i-- + } + + return true +} + +func isThirdParty(importPath string) bool { + // Third party package import path usually contains "." (".com", ".org", ...) + // This logic is taken from golang.org/x/tools/imports package. + return strings.Contains(importPath, ".") +} + +// DeleteImport deletes the import path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) { + return DeleteNamedImport(fset, f, "", path) +} + +// DeleteNamedImport deletes the import with the given name and path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { + var delspecs []*ast.ImportSpec + var delcomments []*ast.CommentGroup + + // Find the import nodes that import path, if any. + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT { + continue + } + for j := 0; j < len(gen.Specs); j++ { + spec := gen.Specs[j] + impspec := spec.(*ast.ImportSpec) + if importName(impspec) != name || importPath(impspec) != path { + continue + } + + // We found an import spec that imports path. + // Delete it. + delspecs = append(delspecs, impspec) + deleted = true + copy(gen.Specs[j:], gen.Specs[j+1:]) + gen.Specs = gen.Specs[:len(gen.Specs)-1] + + // If this was the last import spec in this decl, + // delete the decl, too. + if len(gen.Specs) == 0 { + copy(f.Decls[i:], f.Decls[i+1:]) + f.Decls = f.Decls[:len(f.Decls)-1] + i-- + break + } else if len(gen.Specs) == 1 { + if impspec.Doc != nil { + delcomments = append(delcomments, impspec.Doc) + } + if impspec.Comment != nil { + delcomments = append(delcomments, impspec.Comment) + } + for _, cg := range f.Comments { + // Found comment on the same line as the import spec. + if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { + delcomments = append(delcomments, cg) + break + } + } + + spec := gen.Specs[0].(*ast.ImportSpec) + + // Move the documentation right after the import decl. + if spec.Doc != nil { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + } + for _, cg := range f.Comments { + if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + break + } + } + } + if j > 0 { + lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) + lastLine := fset.Position(lastImpspec.Path.ValuePos).Line + line := fset.Position(impspec.Path.ValuePos).Line + + // We deleted an entry but now there may be + // a blank line-sized hole where the import was. + if line-lastLine > 1 { + // There was a blank line immediately preceding the deleted import, + // so there's no need to close the hole. + // Do nothing. + } else if line != fset.File(gen.Rparen).LineCount() { + // There was no blank line. Close the hole. + fset.File(gen.Rparen).MergeLine(line) + } + } + j-- + } + } + + // Delete imports from f.Imports. + for i := 0; i < len(f.Imports); i++ { + imp := f.Imports[i] + for j, del := range delspecs { + if imp == del { + copy(f.Imports[i:], f.Imports[i+1:]) + f.Imports = f.Imports[:len(f.Imports)-1] + copy(delspecs[j:], delspecs[j+1:]) + delspecs = delspecs[:len(delspecs)-1] + i-- + break + } + } + } + + // Delete comments from f.Comments. + for i := 0; i < len(f.Comments); i++ { + cg := f.Comments[i] + for j, del := range delcomments { + if cg == del { + copy(f.Comments[i:], f.Comments[i+1:]) + f.Comments = f.Comments[:len(f.Comments)-1] + copy(delcomments[j:], delcomments[j+1:]) + delcomments = delcomments[:len(delcomments)-1] + i-- + break + } + } + } + + if len(delspecs) > 0 { + panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) + } + + return +} + +// RewriteImport rewrites any import of path oldPath to path newPath. +func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) { + for _, imp := range f.Imports { + if importPath(imp) == oldPath { + rewrote = true + // record old End, because the default is to compute + // it using the length of imp.Path.Value. + imp.EndPos = imp.End() + imp.Path.Value = strconv.Quote(newPath) + } + } + return +} + +// UsesImport reports whether a given import is used. +func UsesImport(f *ast.File, path string) (used bool) { + spec := importSpec(f, path) + if spec == nil { + return + } + + name := spec.Name.String() + switch name { + case "": + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + case "_", ".": + // Not sure if this import is used - err on the side of caution. + return true + } + + ast.Walk(visitFn(func(n ast.Node) { + sel, ok := n.(*ast.SelectorExpr) + if ok && isTopName(sel.X, name) { + used = true + } + }), f) + + return +} + +type visitFn func(node ast.Node) + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + fn(node) + return fn +} + +// imports reports whether f has an import with the specified name and path. +func imports(f *ast.File, name, path string) bool { + for _, s := range f.Imports { + if importName(s) == name && importPath(s) == path { + return true + } + } + return false +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importName returns the name of s, +// or "" if the import is not named. +func importName(s *ast.ImportSpec) string { + if s.Name == nil { + return "" + } + return s.Name.Name +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} + +// declImports reports whether gen contains an import of path. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +// matchLen returns the length of the longest path segment prefix shared by x and y. +func matchLen(x, y string) int { + n := 0 + for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ { + if x[i] == '/' { + n++ + } + } + return n +} + +// isTopName returns true if n is a top-level unresolved identifier with the given name. +func isTopName(n ast.Expr, name string) bool { + id, ok := n.(*ast.Ident) + return ok && id.Name == name && id.Obj == nil +} + +// Imports returns the file imports grouped by paragraph. +func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { + var groups [][]*ast.ImportSpec + + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.IMPORT { + break + } + + group := []*ast.ImportSpec{} + + var lastLine int + for _, spec := range genDecl.Specs { + importSpec := spec.(*ast.ImportSpec) + pos := importSpec.Path.ValuePos + line := fset.Position(pos).Line + if lastLine > 0 && pos > 0 && line-lastLine > 1 { + groups = append(groups, group) + group = []*ast.ImportSpec{} + } + group = append(group, importSpec) + lastLine = line + } + groups = append(groups, group) + } + + return groups +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go new file mode 100644 index 000000000..cf72ea990 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -0,0 +1,477 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import ( + "fmt" + "go/ast" + "reflect" + "sort" +) + +// An ApplyFunc is invoked by Apply for each node n, even if n is nil, +// before and/or after the node's children, using a Cursor describing +// the current node and providing operations on it. +// +// The return value of ApplyFunc controls the syntax tree traversal. +// See Apply for details. +type ApplyFunc func(*Cursor) bool + +// Apply traverses a syntax tree recursively, starting with root, +// and calling pre and post for each node as described below. +// Apply returns the syntax tree, possibly modified. +// +// If pre is not nil, it is called for each node before the node's +// children are traversed (pre-order). If pre returns false, no +// children are traversed, and post is not called for that node. +// +// If post is not nil, and a prior call of pre didn't return false, +// post is called for each node after its children are traversed +// (post-order). If post returns false, traversal is terminated and +// Apply returns immediately. +// +// Only fields that refer to AST nodes are considered children; +// i.e., token.Pos, Scopes, Objects, and fields of basic types +// (strings, etc.) are ignored. +// +// Children are traversed in the order in which they appear in the +// respective node's struct definition. A package's files are +// traversed in the filenames' alphabetical order. +// +func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) { + parent := &struct{ ast.Node }{root} + defer func() { + if r := recover(); r != nil && r != abort { + panic(r) + } + result = parent.Node + }() + a := &application{pre: pre, post: post} + a.apply(parent, "Node", nil, root) + return +} + +var abort = new(int) // singleton, to signal termination of Apply + +// A Cursor describes a node encountered during Apply. +// Information about the node and its parent is available +// from the Node, Parent, Name, and Index methods. +// +// If p is a variable of type and value of the current parent node +// c.Parent(), and f is the field identifier with name c.Name(), +// the following invariants hold: +// +// p.f == c.Node() if c.Index() < 0 +// p.f[c.Index()] == c.Node() if c.Index() >= 0 +// +// The methods Replace, Delete, InsertBefore, and InsertAfter +// can be used to change the AST without disrupting Apply. +type Cursor struct { + parent ast.Node + name string + iter *iterator // valid if non-nil + node ast.Node +} + +// Node returns the current Node. +func (c *Cursor) Node() ast.Node { return c.node } + +// Parent returns the parent of the current Node. +func (c *Cursor) Parent() ast.Node { return c.parent } + +// Name returns the name of the parent Node field that contains the current Node. +// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns +// the filename for the current Node. +func (c *Cursor) Name() string { return c.name } + +// Index reports the index >= 0 of the current Node in the slice of Nodes that +// contains it, or a value < 0 if the current Node is not part of a slice. +// The index of the current node changes if InsertBefore is called while +// processing the current node. +func (c *Cursor) Index() int { + if c.iter != nil { + return c.iter.index + } + return -1 +} + +// field returns the current node's parent field value. +func (c *Cursor) field() reflect.Value { + return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name) +} + +// Replace replaces the current Node with n. +// The replacement node is not walked by Apply. +func (c *Cursor) Replace(n ast.Node) { + if _, ok := c.node.(*ast.File); ok { + file, ok := n.(*ast.File) + if !ok { + panic("attempt to replace *ast.File with non-*ast.File") + } + c.parent.(*ast.Package).Files[c.name] = file + return + } + + v := c.field() + if i := c.Index(); i >= 0 { + v = v.Index(i) + } + v.Set(reflect.ValueOf(n)) +} + +// Delete deletes the current Node from its containing slice. +// If the current Node is not part of a slice, Delete panics. +// As a special case, if the current node is a package file, +// Delete removes it from the package's Files map. +func (c *Cursor) Delete() { + if _, ok := c.node.(*ast.File); ok { + delete(c.parent.(*ast.Package).Files, c.name) + return + } + + i := c.Index() + if i < 0 { + panic("Delete node not contained in slice") + } + v := c.field() + l := v.Len() + reflect.Copy(v.Slice(i, l), v.Slice(i+1, l)) + v.Index(l - 1).Set(reflect.Zero(v.Type().Elem())) + v.SetLen(l - 1) + c.iter.step-- +} + +// InsertAfter inserts n after the current Node in its containing slice. +// If the current Node is not part of a slice, InsertAfter panics. +// Apply does not walk n. +func (c *Cursor) InsertAfter(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertAfter node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l)) + v.Index(i + 1).Set(reflect.ValueOf(n)) + c.iter.step++ +} + +// InsertBefore inserts n before the current Node in its containing slice. +// If the current Node is not part of a slice, InsertBefore panics. +// Apply will not walk n. +func (c *Cursor) InsertBefore(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertBefore node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+1, l), v.Slice(i, l)) + v.Index(i).Set(reflect.ValueOf(n)) + c.iter.index++ +} + +// application carries all the shared data so we can pass it around cheaply. +type application struct { + pre, post ApplyFunc + cursor Cursor + iter iterator +} + +func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { + // convert typed nil into untyped nil + if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { + n = nil + } + + // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead + saved := a.cursor + a.cursor.parent = parent + a.cursor.name = name + a.cursor.iter = iter + a.cursor.node = n + + if a.pre != nil && !a.pre(&a.cursor) { + a.cursor = saved + return + } + + // walk children + // (the order of the cases matches the order of the corresponding node types in go/ast) + switch n := n.(type) { + case nil: + // nothing to do + + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + if n != nil { + a.applyList(n, "List") + } + + case *ast.Field: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.FieldList: + a.applyList(n, "List") + + // Expressions + case *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Ellipsis: + a.apply(n, "Elt", nil, n.Elt) + + case *ast.FuncLit: + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + case *ast.CompositeLit: + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Elts") + + case *ast.ParenExpr: + a.apply(n, "X", nil, n.X) + + case *ast.SelectorExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Sel", nil, n.Sel) + + case *ast.IndexExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Index", nil, n.Index) + + case *ast.SliceExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Low", nil, n.Low) + a.apply(n, "High", nil, n.High) + a.apply(n, "Max", nil, n.Max) + + case *ast.TypeAssertExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Type", nil, n.Type) + + case *ast.CallExpr: + a.apply(n, "Fun", nil, n.Fun) + a.applyList(n, "Args") + + case *ast.StarExpr: + a.apply(n, "X", nil, n.X) + + case *ast.UnaryExpr: + a.apply(n, "X", nil, n.X) + + case *ast.BinaryExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Y", nil, n.Y) + + case *ast.KeyValueExpr: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + // Types + case *ast.ArrayType: + a.apply(n, "Len", nil, n.Len) + a.apply(n, "Elt", nil, n.Elt) + + case *ast.StructType: + a.apply(n, "Fields", nil, n.Fields) + + case *ast.FuncType: + a.apply(n, "Params", nil, n.Params) + a.apply(n, "Results", nil, n.Results) + + case *ast.InterfaceType: + a.apply(n, "Methods", nil, n.Methods) + + case *ast.MapType: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + case *ast.ChanType: + a.apply(n, "Value", nil, n.Value) + + // Statements + case *ast.BadStmt: + // nothing to do + + case *ast.DeclStmt: + a.apply(n, "Decl", nil, n.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + a.apply(n, "Label", nil, n.Label) + a.apply(n, "Stmt", nil, n.Stmt) + + case *ast.ExprStmt: + a.apply(n, "X", nil, n.X) + + case *ast.SendStmt: + a.apply(n, "Chan", nil, n.Chan) + a.apply(n, "Value", nil, n.Value) + + case *ast.IncDecStmt: + a.apply(n, "X", nil, n.X) + + case *ast.AssignStmt: + a.applyList(n, "Lhs") + a.applyList(n, "Rhs") + + case *ast.GoStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.DeferStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.ReturnStmt: + a.applyList(n, "Results") + + case *ast.BranchStmt: + a.apply(n, "Label", nil, n.Label) + + case *ast.BlockStmt: + a.applyList(n, "List") + + case *ast.IfStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Body", nil, n.Body) + a.apply(n, "Else", nil, n.Else) + + case *ast.CaseClause: + a.applyList(n, "List") + a.applyList(n, "Body") + + case *ast.SwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Body", nil, n.Body) + + case *ast.TypeSwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Assign", nil, n.Assign) + a.apply(n, "Body", nil, n.Body) + + case *ast.CommClause: + a.apply(n, "Comm", nil, n.Comm) + a.applyList(n, "Body") + + case *ast.SelectStmt: + a.apply(n, "Body", nil, n.Body) + + case *ast.ForStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Post", nil, n.Post) + a.apply(n, "Body", nil, n.Body) + + case *ast.RangeStmt: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + a.apply(n, "X", nil, n.X) + a.apply(n, "Body", nil, n.Body) + + // Declarations + case *ast.ImportSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Path", nil, n.Path) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.ValueSpec: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Values") + a.apply(n, "Comment", nil, n.Comment) + + case *ast.TypeSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.BadDecl: + // nothing to do + + case *ast.GenDecl: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Specs") + + case *ast.FuncDecl: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Recv", nil, n.Recv) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + // Files and packages + case *ast.File: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.applyList(n, "Decls") + // Don't walk n.Comments; they have either been walked already if + // they are Doc comments, or they can be easily walked explicitly. + + case *ast.Package: + // collect and sort names for reproducible behavior + var names []string + for name := range n.Files { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + a.apply(n, name, nil, n.Files[name]) + } + + default: + panic(fmt.Sprintf("Apply: unexpected node type %T", n)) + } + + if a.post != nil && !a.post(&a.cursor) { + panic(abort) + } + + a.cursor = saved +} + +// An iterator controls iteration over a slice of nodes. +type iterator struct { + index, step int +} + +func (a *application) applyList(parent ast.Node, name string) { + // avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead + saved := a.iter + a.iter.index = 0 + for { + // must reload parent.name each time, since cursor modifications might change it + v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name) + if a.iter.index >= v.Len() { + break + } + + // element x may be nil in a bad AST - be cautious + var x ast.Node + if e := v.Index(a.iter.index); e.IsValid() { + x = e.Interface().(ast.Node) + } + + a.iter.step = 1 + a.apply(parent, name, &a.iter, x) + a.iter.index += a.iter.step + } + a.iter = saved +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go new file mode 100644 index 000000000..763062982 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -0,0 +1,14 @@ +package astutil + +import "go/ast" + +// Unparen returns e with any enclosing parentheses stripped. +func Unparen(e ast.Expr) ast.Expr { + for { + p, ok := e.(*ast.ParenExpr) + if !ok { + return e + } + e = p.X + } +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go new file mode 100644 index 000000000..ddbdd3f08 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -0,0 +1,182 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package inspector provides helper functions for traversal over the +// syntax trees of a package, including node filtering by type, and +// materialization of the traversal stack. +// +// During construction, the inspector does a complete traversal and +// builds a list of push/pop events and their node type. Subsequent +// method calls that request a traversal scan this list, rather than walk +// the AST, and perform type filtering using efficient bit sets. +// +// Experiments suggest the inspector's traversals are about 2.5x faster +// than ast.Inspect, but it may take around 5 traversals for this +// benefit to amortize the inspector's construction cost. +// If efficiency is the primary concern, do not use Inspector for +// one-off traversals. +package inspector + +// There are four orthogonal features in a traversal: +// 1 type filtering +// 2 pruning +// 3 postorder calls to f +// 4 stack +// Rather than offer all of them in the API, +// only a few combinations are exposed: +// - Preorder is the fastest and has fewest features, +// but is the most commonly needed traversal. +// - Nodes and WithStack both provide pruning and postorder calls, +// even though few clients need it, because supporting two versions +// is not justified. +// More combinations could be supported by expressing them as +// wrappers around a more generic traversal, but this was measured +// and found to degrade performance significantly (30%). + +import ( + "go/ast" +) + +// An Inspector provides methods for inspecting +// (traversing) the syntax trees of a package. +type Inspector struct { + events []event +} + +// New returns an Inspector for the specified syntax trees. +func New(files []*ast.File) *Inspector { + return &Inspector{traverse(files)} +} + +// An event represents a push or a pop +// of an ast.Node during a traversal. +type event struct { + node ast.Node + typ uint64 // typeOf(node) + index int // 1 + index of corresponding pop event, or 0 if this is a pop +} + +// Preorder visits all the nodes of the files supplied to New in +// depth-first order. It calls f(n) for each node n before it visits +// n's children. +// +// The types argument, if non-empty, enables type-based filtering of +// events. The function f if is called only for nodes whose type +// matches an element of the types slice. +func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { + // Because it avoids postorder calls to f, and the pruning + // check, Preorder is almost twice as fast as Nodes. The two + // features seem to contribute similar slowdowns (~1.4x each). + + mask := maskOf(types) + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.typ&mask != 0 { + if ev.index > 0 { + f(ev.node) + } + } + i++ + } +} + +// Nodes visits the nodes of the files supplied to New in depth-first +// order. It calls f(n, true) for each node n before it visits n's +// children. If f returns true, Nodes invokes f recursively for each +// of the non-nil children of the node, followed by a call of +// f(n, false). +// +// The types argument, if non-empty, enables type-based filtering of +// events. The function f if is called only for nodes whose type +// matches an element of the types slice. +func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (prune bool)) { + mask := maskOf(types) + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.typ&mask != 0 { + if ev.index > 0 { + // push + if !f(ev.node, true) { + i = ev.index // jump to corresponding pop + 1 + continue + } + } else { + // pop + f(ev.node, false) + } + } + i++ + } +} + +// WithStack visits nodes in a similar manner to Nodes, but it +// supplies each call to f an additional argument, the current +// traversal stack. The stack's first element is the outermost node, +// an *ast.File; its last is the innermost, n. +func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (prune bool)) { + mask := maskOf(types) + var stack []ast.Node + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.index > 0 { + // push + stack = append(stack, ev.node) + if ev.typ&mask != 0 { + if !f(ev.node, true, stack) { + i = ev.index + stack = stack[:len(stack)-1] + continue + } + } + } else { + // pop + if ev.typ&mask != 0 { + f(ev.node, false, stack) + } + stack = stack[:len(stack)-1] + } + i++ + } +} + +// traverse builds the table of events representing a traversal. +func traverse(files []*ast.File) []event { + // Preallocate approximate number of events + // based on source file extent. + // This makes traverse faster by 4x (!). + var extent int + for _, f := range files { + extent += int(f.End() - f.Pos()) + } + // This estimate is based on the net/http package. + events := make([]event, 0, extent*33/100) + + var stack []event + for _, f := range files { + ast.Inspect(f, func(n ast.Node) bool { + if n != nil { + // push + ev := event{ + node: n, + typ: typeOf(n), + index: len(events), // push event temporarily holds own index + } + stack = append(stack, ev) + events = append(events, ev) + } else { + // pop + ev := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + events[ev.index].index = len(events) + 1 // make push refer to pop + + ev.index = 0 // turn ev into a pop event + events = append(events, ev) + } + return true + }) + } + + return events +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go new file mode 100644 index 000000000..d61301b13 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -0,0 +1,216 @@ +package inspector + +// This file defines func typeOf(ast.Node) uint64. +// +// The initial map-based implementation was too slow; +// see https://go-review.googlesource.com/c/tools/+/135655/1/go/ast/inspector/inspector.go#196 + +import "go/ast" + +const ( + nArrayType = iota + nAssignStmt + nBadDecl + nBadExpr + nBadStmt + nBasicLit + nBinaryExpr + nBlockStmt + nBranchStmt + nCallExpr + nCaseClause + nChanType + nCommClause + nComment + nCommentGroup + nCompositeLit + nDeclStmt + nDeferStmt + nEllipsis + nEmptyStmt + nExprStmt + nField + nFieldList + nFile + nForStmt + nFuncDecl + nFuncLit + nFuncType + nGenDecl + nGoStmt + nIdent + nIfStmt + nImportSpec + nIncDecStmt + nIndexExpr + nInterfaceType + nKeyValueExpr + nLabeledStmt + nMapType + nPackage + nParenExpr + nRangeStmt + nReturnStmt + nSelectStmt + nSelectorExpr + nSendStmt + nSliceExpr + nStarExpr + nStructType + nSwitchStmt + nTypeAssertExpr + nTypeSpec + nTypeSwitchStmt + nUnaryExpr + nValueSpec +) + +// typeOf returns a distinct single-bit value that represents the type of n. +// +// Various implementations were benchmarked with BenchmarkNewInspector: +// GOGC=off +// - type switch 4.9-5.5ms 2.1ms +// - binary search over a sorted list of types 5.5-5.9ms 2.5ms +// - linear scan, frequency-ordered list 5.9-6.1ms 2.7ms +// - linear scan, unordered list 6.4ms 2.7ms +// - hash table 6.5ms 3.1ms +// A perfect hash seemed like overkill. +// +// The compiler's switch statement is the clear winner +// as it produces a binary tree in code, +// with constant conditions and good branch prediction. +// (Sadly it is the most verbose in source code.) +// Binary search suffered from poor branch prediction. +// +func typeOf(n ast.Node) uint64 { + // Fast path: nearly half of all nodes are identifiers. + if _, ok := n.(*ast.Ident); ok { + return 1 << nIdent + } + + // These cases include all nodes encountered by ast.Inspect. + switch n.(type) { + case *ast.ArrayType: + return 1 << nArrayType + case *ast.AssignStmt: + return 1 << nAssignStmt + case *ast.BadDecl: + return 1 << nBadDecl + case *ast.BadExpr: + return 1 << nBadExpr + case *ast.BadStmt: + return 1 << nBadStmt + case *ast.BasicLit: + return 1 << nBasicLit + case *ast.BinaryExpr: + return 1 << nBinaryExpr + case *ast.BlockStmt: + return 1 << nBlockStmt + case *ast.BranchStmt: + return 1 << nBranchStmt + case *ast.CallExpr: + return 1 << nCallExpr + case *ast.CaseClause: + return 1 << nCaseClause + case *ast.ChanType: + return 1 << nChanType + case *ast.CommClause: + return 1 << nCommClause + case *ast.Comment: + return 1 << nComment + case *ast.CommentGroup: + return 1 << nCommentGroup + case *ast.CompositeLit: + return 1 << nCompositeLit + case *ast.DeclStmt: + return 1 << nDeclStmt + case *ast.DeferStmt: + return 1 << nDeferStmt + case *ast.Ellipsis: + return 1 << nEllipsis + case *ast.EmptyStmt: + return 1 << nEmptyStmt + case *ast.ExprStmt: + return 1 << nExprStmt + case *ast.Field: + return 1 << nField + case *ast.FieldList: + return 1 << nFieldList + case *ast.File: + return 1 << nFile + case *ast.ForStmt: + return 1 << nForStmt + case *ast.FuncDecl: + return 1 << nFuncDecl + case *ast.FuncLit: + return 1 << nFuncLit + case *ast.FuncType: + return 1 << nFuncType + case *ast.GenDecl: + return 1 << nGenDecl + case *ast.GoStmt: + return 1 << nGoStmt + case *ast.Ident: + return 1 << nIdent + case *ast.IfStmt: + return 1 << nIfStmt + case *ast.ImportSpec: + return 1 << nImportSpec + case *ast.IncDecStmt: + return 1 << nIncDecStmt + case *ast.IndexExpr: + return 1 << nIndexExpr + case *ast.InterfaceType: + return 1 << nInterfaceType + case *ast.KeyValueExpr: + return 1 << nKeyValueExpr + case *ast.LabeledStmt: + return 1 << nLabeledStmt + case *ast.MapType: + return 1 << nMapType + case *ast.Package: + return 1 << nPackage + case *ast.ParenExpr: + return 1 << nParenExpr + case *ast.RangeStmt: + return 1 << nRangeStmt + case *ast.ReturnStmt: + return 1 << nReturnStmt + case *ast.SelectStmt: + return 1 << nSelectStmt + case *ast.SelectorExpr: + return 1 << nSelectorExpr + case *ast.SendStmt: + return 1 << nSendStmt + case *ast.SliceExpr: + return 1 << nSliceExpr + case *ast.StarExpr: + return 1 << nStarExpr + case *ast.StructType: + return 1 << nStructType + case *ast.SwitchStmt: + return 1 << nSwitchStmt + case *ast.TypeAssertExpr: + return 1 << nTypeAssertExpr + case *ast.TypeSpec: + return 1 << nTypeSpec + case *ast.TypeSwitchStmt: + return 1 << nTypeSwitchStmt + case *ast.UnaryExpr: + return 1 << nUnaryExpr + case *ast.ValueSpec: + return 1 << nValueSpec + } + return 0 +} + +func maskOf(nodes []ast.Node) uint64 { + if nodes == nil { + return 1<<64 - 1 // match all node types + } + var mask uint64 + for _, n := range nodes { + mask |= typeOf(n) + } + return mask +} diff --git a/vendor/golang.org/x/tools/go/buildutil/allpackages.go b/vendor/golang.org/x/tools/go/buildutil/allpackages.go new file mode 100644 index 000000000..c0cb03e7b --- /dev/null +++ b/vendor/golang.org/x/tools/go/buildutil/allpackages.go @@ -0,0 +1,198 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package buildutil provides utilities related to the go/build +// package in the standard library. +// +// All I/O is done via the build.Context file system interface, which must +// be concurrency-safe. +package buildutil // import "golang.org/x/tools/go/buildutil" + +import ( + "go/build" + "os" + "path/filepath" + "sort" + "strings" + "sync" +) + +// AllPackages returns the package path of each Go package in any source +// directory of the specified build context (e.g. $GOROOT or an element +// of $GOPATH). Errors are ignored. The results are sorted. +// All package paths are canonical, and thus may contain "/vendor/". +// +// The result may include import paths for directories that contain no +// *.go files, such as "archive" (in $GOROOT/src). +// +// All I/O is done via the build.Context file system interface, +// which must be concurrency-safe. +// +func AllPackages(ctxt *build.Context) []string { + var list []string + ForEachPackage(ctxt, func(pkg string, _ error) { + list = append(list, pkg) + }) + sort.Strings(list) + return list +} + +// ForEachPackage calls the found function with the package path of +// each Go package it finds in any source directory of the specified +// build context (e.g. $GOROOT or an element of $GOPATH). +// All package paths are canonical, and thus may contain "/vendor/". +// +// If the package directory exists but could not be read, the second +// argument to the found function provides the error. +// +// All I/O is done via the build.Context file system interface, +// which must be concurrency-safe. +// +func ForEachPackage(ctxt *build.Context, found func(importPath string, err error)) { + ch := make(chan item) + + var wg sync.WaitGroup + for _, root := range ctxt.SrcDirs() { + root := root + wg.Add(1) + go func() { + allPackages(ctxt, root, ch) + wg.Done() + }() + } + go func() { + wg.Wait() + close(ch) + }() + + // All calls to found occur in the caller's goroutine. + for i := range ch { + found(i.importPath, i.err) + } +} + +type item struct { + importPath string + err error // (optional) +} + +// We use a process-wide counting semaphore to limit +// the number of parallel calls to ReadDir. +var ioLimit = make(chan bool, 20) + +func allPackages(ctxt *build.Context, root string, ch chan<- item) { + root = filepath.Clean(root) + string(os.PathSeparator) + + var wg sync.WaitGroup + + var walkDir func(dir string) + walkDir = func(dir string) { + // Avoid .foo, _foo, and testdata directory trees. + base := filepath.Base(dir) + if base == "" || base[0] == '.' || base[0] == '_' || base == "testdata" { + return + } + + pkg := filepath.ToSlash(strings.TrimPrefix(dir, root)) + + // Prune search if we encounter any of these import paths. + switch pkg { + case "builtin": + return + } + + ioLimit <- true + files, err := ReadDir(ctxt, dir) + <-ioLimit + if pkg != "" || err != nil { + ch <- item{pkg, err} + } + for _, fi := range files { + fi := fi + if fi.IsDir() { + wg.Add(1) + go func() { + walkDir(filepath.Join(dir, fi.Name())) + wg.Done() + }() + } + } + } + + walkDir(root) + wg.Wait() +} + +// ExpandPatterns returns the set of packages matched by patterns, +// which may have the following forms: +// +// golang.org/x/tools/cmd/guru # a single package +// golang.org/x/tools/... # all packages beneath dir +// ... # the entire workspace. +// +// Order is significant: a pattern preceded by '-' removes matching +// packages from the set. For example, these patterns match all encoding +// packages except encoding/xml: +// +// encoding/... -encoding/xml +// +// A trailing slash in a pattern is ignored. (Path components of Go +// package names are separated by slash, not the platform's path separator.) +// +func ExpandPatterns(ctxt *build.Context, patterns []string) map[string]bool { + // TODO(adonovan): support other features of 'go list': + // - "std"/"cmd"/"all" meta-packages + // - "..." not at the end of a pattern + // - relative patterns using "./" or "../" prefix + + pkgs := make(map[string]bool) + doPkg := func(pkg string, neg bool) { + if neg { + delete(pkgs, pkg) + } else { + pkgs[pkg] = true + } + } + + // Scan entire workspace if wildcards are present. + // TODO(adonovan): opt: scan only the necessary subtrees of the workspace. + var all []string + for _, arg := range patterns { + if strings.HasSuffix(arg, "...") { + all = AllPackages(ctxt) + break + } + } + + for _, arg := range patterns { + if arg == "" { + continue + } + + neg := arg[0] == '-' + if neg { + arg = arg[1:] + } + + if arg == "..." { + // ... matches all packages + for _, pkg := range all { + doPkg(pkg, neg) + } + } else if dir := strings.TrimSuffix(arg, "/..."); dir != arg { + // dir/... matches all packages beneath dir + for _, pkg := range all { + if strings.HasPrefix(pkg, dir) && + (len(pkg) == len(dir) || pkg[len(dir)] == '/') { + doPkg(pkg, neg) + } + } + } else { + // single package + doPkg(strings.TrimSuffix(arg, "/"), neg) + } + } + + return pkgs +} diff --git a/vendor/golang.org/x/tools/go/buildutil/fakecontext.go b/vendor/golang.org/x/tools/go/buildutil/fakecontext.go new file mode 100644 index 000000000..8b7f06673 --- /dev/null +++ b/vendor/golang.org/x/tools/go/buildutil/fakecontext.go @@ -0,0 +1,109 @@ +package buildutil + +import ( + "fmt" + "go/build" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "sort" + "strings" + "time" +) + +// FakeContext returns a build.Context for the fake file tree specified +// by pkgs, which maps package import paths to a mapping from file base +// names to contents. +// +// The fake Context has a GOROOT of "/go" and no GOPATH, and overrides +// the necessary file access methods to read from memory instead of the +// real file system. +// +// Unlike a real file tree, the fake one has only two levels---packages +// and files---so ReadDir("/go/src/") returns all packages under +// /go/src/ including, for instance, "math" and "math/big". +// ReadDir("/go/src/math/big") would return all the files in the +// "math/big" package. +// +func FakeContext(pkgs map[string]map[string]string) *build.Context { + clean := func(filename string) string { + f := path.Clean(filepath.ToSlash(filename)) + // Removing "/go/src" while respecting segment + // boundaries has this unfortunate corner case: + if f == "/go/src" { + return "" + } + return strings.TrimPrefix(f, "/go/src/") + } + + ctxt := build.Default // copy + ctxt.GOROOT = "/go" + ctxt.GOPATH = "" + ctxt.Compiler = "gc" + ctxt.IsDir = func(dir string) bool { + dir = clean(dir) + if dir == "" { + return true // needed by (*build.Context).SrcDirs + } + return pkgs[dir] != nil + } + ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) { + dir = clean(dir) + var fis []os.FileInfo + if dir == "" { + // enumerate packages + for importPath := range pkgs { + fis = append(fis, fakeDirInfo(importPath)) + } + } else { + // enumerate files of package + for basename := range pkgs[dir] { + fis = append(fis, fakeFileInfo(basename)) + } + } + sort.Sort(byName(fis)) + return fis, nil + } + ctxt.OpenFile = func(filename string) (io.ReadCloser, error) { + filename = clean(filename) + dir, base := path.Split(filename) + content, ok := pkgs[path.Clean(dir)][base] + if !ok { + return nil, fmt.Errorf("file not found: %s", filename) + } + return ioutil.NopCloser(strings.NewReader(content)), nil + } + ctxt.IsAbsPath = func(path string) bool { + path = filepath.ToSlash(path) + // Don't rely on the default (filepath.Path) since on + // Windows, it reports virtual paths as non-absolute. + return strings.HasPrefix(path, "/") + } + return &ctxt +} + +type byName []os.FileInfo + +func (s byName) Len() int { return len(s) } +func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } + +type fakeFileInfo string + +func (fi fakeFileInfo) Name() string { return string(fi) } +func (fakeFileInfo) Sys() interface{} { return nil } +func (fakeFileInfo) ModTime() time.Time { return time.Time{} } +func (fakeFileInfo) IsDir() bool { return false } +func (fakeFileInfo) Size() int64 { return 0 } +func (fakeFileInfo) Mode() os.FileMode { return 0644 } + +type fakeDirInfo string + +func (fd fakeDirInfo) Name() string { return string(fd) } +func (fakeDirInfo) Sys() interface{} { return nil } +func (fakeDirInfo) ModTime() time.Time { return time.Time{} } +func (fakeDirInfo) IsDir() bool { return true } +func (fakeDirInfo) Size() int64 { return 0 } +func (fakeDirInfo) Mode() os.FileMode { return 0755 } diff --git a/vendor/golang.org/x/tools/go/buildutil/overlay.go b/vendor/golang.org/x/tools/go/buildutil/overlay.go new file mode 100644 index 000000000..8e239086b --- /dev/null +++ b/vendor/golang.org/x/tools/go/buildutil/overlay.go @@ -0,0 +1,103 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package buildutil + +import ( + "bufio" + "bytes" + "fmt" + "go/build" + "io" + "io/ioutil" + "path/filepath" + "strconv" + "strings" +) + +// OverlayContext overlays a build.Context with additional files from +// a map. Files in the map take precedence over other files. +// +// In addition to plain string comparison, two file names are +// considered equal if their base names match and their directory +// components point at the same directory on the file system. That is, +// symbolic links are followed for directories, but not files. +// +// A common use case for OverlayContext is to allow editors to pass in +// a set of unsaved, modified files. +// +// Currently, only the Context.OpenFile function will respect the +// overlay. This may change in the future. +func OverlayContext(orig *build.Context, overlay map[string][]byte) *build.Context { + // TODO(dominikh): Implement IsDir, HasSubdir and ReadDir + + rc := func(data []byte) (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewBuffer(data)), nil + } + + copy := *orig // make a copy + ctxt := © + ctxt.OpenFile = func(path string) (io.ReadCloser, error) { + // Fast path: names match exactly. + if content, ok := overlay[path]; ok { + return rc(content) + } + + // Slow path: check for same file under a different + // alias, perhaps due to a symbolic link. + for filename, content := range overlay { + if sameFile(path, filename) { + return rc(content) + } + } + + return OpenFile(orig, path) + } + return ctxt +} + +// ParseOverlayArchive parses an archive containing Go files and their +// contents. The result is intended to be used with OverlayContext. +// +// +// Archive format +// +// The archive consists of a series of files. Each file consists of a +// name, a decimal file size and the file contents, separated by +// newlines. No newline follows after the file contents. +func ParseOverlayArchive(archive io.Reader) (map[string][]byte, error) { + overlay := make(map[string][]byte) + r := bufio.NewReader(archive) + for { + // Read file name. + filename, err := r.ReadString('\n') + if err != nil { + if err == io.EOF { + break // OK + } + return nil, fmt.Errorf("reading archive file name: %v", err) + } + filename = filepath.Clean(strings.TrimSpace(filename)) + + // Read file size. + sz, err := r.ReadString('\n') + if err != nil { + return nil, fmt.Errorf("reading size of archive file %s: %v", filename, err) + } + sz = strings.TrimSpace(sz) + size, err := strconv.ParseUint(sz, 10, 32) + if err != nil { + return nil, fmt.Errorf("parsing size of archive file %s: %v", filename, err) + } + + // Read file content. + content := make([]byte, size) + if _, err := io.ReadFull(r, content); err != nil { + return nil, fmt.Errorf("reading archive file %s: %v", filename, err) + } + overlay[filename] = content + } + + return overlay, nil +} diff --git a/vendor/golang.org/x/tools/go/buildutil/tags.go b/vendor/golang.org/x/tools/go/buildutil/tags.go new file mode 100644 index 000000000..486606f37 --- /dev/null +++ b/vendor/golang.org/x/tools/go/buildutil/tags.go @@ -0,0 +1,75 @@ +package buildutil + +// This logic was copied from stringsFlag from $GOROOT/src/cmd/go/build.go. + +import "fmt" + +const TagsFlagDoc = "a list of `build tags` to consider satisfied during the build. " + + "For more information about build tags, see the description of " + + "build constraints in the documentation for the go/build package" + +// TagsFlag is an implementation of the flag.Value and flag.Getter interfaces that parses +// a flag value in the same manner as go build's -tags flag and +// populates a []string slice. +// +// See $GOROOT/src/go/build/doc.go for description of build tags. +// See $GOROOT/src/cmd/go/doc.go for description of 'go build -tags' flag. +// +// Example: +// flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", buildutil.TagsFlagDoc) +type TagsFlag []string + +func (v *TagsFlag) Set(s string) error { + var err error + *v, err = splitQuotedFields(s) + if *v == nil { + *v = []string{} + } + return err +} + +func (v *TagsFlag) Get() interface{} { return *v } + +func splitQuotedFields(s string) ([]string, error) { + // Split fields allowing '' or "" around elements. + // Quotes further inside the string do not count. + var f []string + for len(s) > 0 { + for len(s) > 0 && isSpaceByte(s[0]) { + s = s[1:] + } + if len(s) == 0 { + break + } + // Accepted quoted string. No unescaping inside. + if s[0] == '"' || s[0] == '\'' { + quote := s[0] + s = s[1:] + i := 0 + for i < len(s) && s[i] != quote { + i++ + } + if i >= len(s) { + return nil, fmt.Errorf("unterminated %c string", quote) + } + f = append(f, s[:i]) + s = s[i+1:] + continue + } + i := 0 + for i < len(s) && !isSpaceByte(s[i]) { + i++ + } + f = append(f, s[:i]) + s = s[i:] + } + return f, nil +} + +func (v *TagsFlag) String() string { + return "" +} + +func isSpaceByte(c byte) bool { + return c == ' ' || c == '\t' || c == '\n' || c == '\r' +} diff --git a/vendor/golang.org/x/tools/go/buildutil/util.go b/vendor/golang.org/x/tools/go/buildutil/util.go new file mode 100644 index 000000000..fc923d7a7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/buildutil/util.go @@ -0,0 +1,212 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package buildutil + +import ( + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" +) + +// ParseFile behaves like parser.ParseFile, +// but uses the build context's file system interface, if any. +// +// If file is not absolute (as defined by IsAbsPath), the (dir, file) +// components are joined using JoinPath; dir must be absolute. +// +// The displayPath function, if provided, is used to transform the +// filename that will be attached to the ASTs. +// +// TODO(adonovan): call this from go/loader.parseFiles when the tree thaws. +// +func ParseFile(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, file string, mode parser.Mode) (*ast.File, error) { + if !IsAbsPath(ctxt, file) { + file = JoinPath(ctxt, dir, file) + } + rd, err := OpenFile(ctxt, file) + if err != nil { + return nil, err + } + defer rd.Close() // ignore error + if displayPath != nil { + file = displayPath(file) + } + return parser.ParseFile(fset, file, rd, mode) +} + +// ContainingPackage returns the package containing filename. +// +// If filename is not absolute, it is interpreted relative to working directory dir. +// All I/O is via the build context's file system interface, if any. +// +// The '...Files []string' fields of the resulting build.Package are not +// populated (build.FindOnly mode). +// +func ContainingPackage(ctxt *build.Context, dir, filename string) (*build.Package, error) { + if !IsAbsPath(ctxt, filename) { + filename = JoinPath(ctxt, dir, filename) + } + + // We must not assume the file tree uses + // "/" always, + // `\` always, + // or os.PathSeparator (which varies by platform), + // but to make any progress, we are forced to assume that + // paths will not use `\` unless the PathSeparator + // is also `\`, thus we can rely on filepath.ToSlash for some sanity. + + dirSlash := path.Dir(filepath.ToSlash(filename)) + "/" + + // We assume that no source root (GOPATH[i] or GOROOT) contains any other. + for _, srcdir := range ctxt.SrcDirs() { + srcdirSlash := filepath.ToSlash(srcdir) + "/" + if importPath, ok := HasSubdir(ctxt, srcdirSlash, dirSlash); ok { + return ctxt.Import(importPath, dir, build.FindOnly) + } + } + + return nil, fmt.Errorf("can't find package containing %s", filename) +} + +// -- Effective methods of file system interface ------------------------- + +// (go/build.Context defines these as methods, but does not export them.) + +// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses +// the local file system to answer the question. +func HasSubdir(ctxt *build.Context, root, dir string) (rel string, ok bool) { + if f := ctxt.HasSubdir; f != nil { + return f(root, dir) + } + + // Try using paths we received. + if rel, ok = hasSubdir(root, dir); ok { + return + } + + // Try expanding symlinks and comparing + // expanded against unexpanded and + // expanded against expanded. + rootSym, _ := filepath.EvalSymlinks(root) + dirSym, _ := filepath.EvalSymlinks(dir) + + if rel, ok = hasSubdir(rootSym, dir); ok { + return + } + if rel, ok = hasSubdir(root, dirSym); ok { + return + } + return hasSubdir(rootSym, dirSym) +} + +func hasSubdir(root, dir string) (rel string, ok bool) { + const sep = string(filepath.Separator) + root = filepath.Clean(root) + if !strings.HasSuffix(root, sep) { + root += sep + } + + dir = filepath.Clean(dir) + if !strings.HasPrefix(dir, root) { + return "", false + } + + return filepath.ToSlash(dir[len(root):]), true +} + +// FileExists returns true if the specified file exists, +// using the build context's file system interface. +func FileExists(ctxt *build.Context, path string) bool { + if ctxt.OpenFile != nil { + r, err := ctxt.OpenFile(path) + if err != nil { + return false + } + r.Close() // ignore error + return true + } + _, err := os.Stat(path) + return err == nil +} + +// OpenFile behaves like os.Open, +// but uses the build context's file system interface, if any. +func OpenFile(ctxt *build.Context, path string) (io.ReadCloser, error) { + if ctxt.OpenFile != nil { + return ctxt.OpenFile(path) + } + return os.Open(path) +} + +// IsAbsPath behaves like filepath.IsAbs, +// but uses the build context's file system interface, if any. +func IsAbsPath(ctxt *build.Context, path string) bool { + if ctxt.IsAbsPath != nil { + return ctxt.IsAbsPath(path) + } + return filepath.IsAbs(path) +} + +// JoinPath behaves like filepath.Join, +// but uses the build context's file system interface, if any. +func JoinPath(ctxt *build.Context, path ...string) string { + if ctxt.JoinPath != nil { + return ctxt.JoinPath(path...) + } + return filepath.Join(path...) +} + +// IsDir behaves like os.Stat plus IsDir, +// but uses the build context's file system interface, if any. +func IsDir(ctxt *build.Context, path string) bool { + if ctxt.IsDir != nil { + return ctxt.IsDir(path) + } + fi, err := os.Stat(path) + return err == nil && fi.IsDir() +} + +// ReadDir behaves like ioutil.ReadDir, +// but uses the build context's file system interface, if any. +func ReadDir(ctxt *build.Context, path string) ([]os.FileInfo, error) { + if ctxt.ReadDir != nil { + return ctxt.ReadDir(path) + } + return ioutil.ReadDir(path) +} + +// SplitPathList behaves like filepath.SplitList, +// but uses the build context's file system interface, if any. +func SplitPathList(ctxt *build.Context, s string) []string { + if ctxt.SplitPathList != nil { + return ctxt.SplitPathList(s) + } + return filepath.SplitList(s) +} + +// sameFile returns true if x and y have the same basename and denote +// the same file. +// +func sameFile(x, y string) bool { + if path.Clean(x) == path.Clean(y) { + return true + } + if filepath.Base(x) == filepath.Base(y) { // (optimisation) + if xi, err := os.Stat(x); err == nil { + if yi, err := os.Stat(y); err == nil { + return os.SameFile(xi, yi) + } + } + } + return false +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go new file mode 100644 index 000000000..f8363d8fa --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -0,0 +1,109 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gcexportdata provides functions for locating, reading, and +// writing export data files containing type information produced by the +// gc compiler. This package supports go1.7 export data format and all +// later versions. +// +// Although it might seem convenient for this package to live alongside +// go/types in the standard library, this would cause version skew +// problems for developer tools that use it, since they must be able to +// consume the outputs of the gc compiler both before and after a Go +// update such as from Go 1.7 to Go 1.8. Because this package lives in +// golang.org/x/tools, sites can update their version of this repo some +// time before the Go 1.8 release and rebuild and redeploy their +// developer tools, which will then be able to consume both Go 1.7 and +// Go 1.8 export data files, so they will work before and after the +// Go update. (See discussion at https://golang.org/issue/15651.) +// +package gcexportdata // import "golang.org/x/tools/go/gcexportdata" + +import ( + "bufio" + "bytes" + "fmt" + "go/token" + "go/types" + "io" + "io/ioutil" + + "golang.org/x/tools/go/internal/gcimporter" +) + +// Find returns the name of an object (.o) or archive (.a) file +// containing type information for the specified import path, +// using the workspace layout conventions of go/build. +// If no file was found, an empty filename is returned. +// +// A relative srcDir is interpreted relative to the current working directory. +// +// Find also returns the package's resolved (canonical) import path, +// reflecting the effects of srcDir and vendoring on importPath. +func Find(importPath, srcDir string) (filename, path string) { + return gcimporter.FindPkg(importPath, srcDir) +} + +// NewReader returns a reader for the export data section of an object +// (.o) or archive (.a) file read from r. The new reader may provide +// additional trailing data beyond the end of the export data. +func NewReader(r io.Reader) (io.Reader, error) { + buf := bufio.NewReader(r) + _, err := gcimporter.FindExportData(buf) + // If we ever switch to a zip-like archive format with the ToC + // at the end, we can return the correct portion of export data, + // but for now we must return the entire rest of the file. + return buf, err +} + +// Read reads export data from in, decodes it, and returns type +// information for the package. +// The package name is specified by path. +// File position information is added to fset. +// +// Read may inspect and add to the imports map to ensure that references +// within the export data to other packages are consistent. The caller +// must ensure that imports[path] does not exist, or exists but is +// incomplete (see types.Package.Complete), and Read inserts the +// resulting package into this map entry. +// +// On return, the state of the reader is undefined. +func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { + data, err := ioutil.ReadAll(in) + if err != nil { + return nil, fmt.Errorf("reading export data for %q: %v", path, err) + } + + if bytes.HasPrefix(data, []byte("!")) { + return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) + } + + // The App Engine Go runtime v1.6 uses the old export data format. + // TODO(adonovan): delete once v1.7 has been around for a while. + if bytes.HasPrefix(data, []byte("package ")) { + return gcimporter.ImportData(imports, path, path, bytes.NewReader(data)) + } + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 && data[0] == 'i' { + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + return pkg, err + } + + _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + return pkg, err +} + +// Write writes encoded type information for the specified package to out. +// The FileSet provides file position information for named objects. +func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + b, err := gcimporter.IExportData(fset, pkg) + if err != nil { + return err + } + _, err = out.Write(b) + return err +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vendor/golang.org/x/tools/go/gcexportdata/importer.go new file mode 100644 index 000000000..efe221e7e --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/importer.go @@ -0,0 +1,73 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcexportdata + +import ( + "fmt" + "go/token" + "go/types" + "os" +) + +// NewImporter returns a new instance of the types.Importer interface +// that reads type information from export data files written by gc. +// The Importer also satisfies types.ImporterFrom. +// +// Export data files are located using "go build" workspace conventions +// and the build.Default context. +// +// Use this importer instead of go/importer.For("gc", ...) to avoid the +// version-skew problems described in the documentation of this package, +// or to control the FileSet or access the imports map populated during +// package loading. +// +func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { + return importer{fset, imports} +} + +type importer struct { + fset *token.FileSet + imports map[string]*types.Package +} + +func (imp importer) Import(importPath string) (*types.Package, error) { + return imp.ImportFrom(importPath, "", 0) +} + +func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) { + filename, path := Find(importPath, srcDir) + if filename == "" { + if importPath == "unsafe" { + // Even for unsafe, call Find first in case + // the package was vendored. + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %s", importPath) + } + + if pkg, ok := imp.imports[path]; ok && pkg.Complete() { + return pkg, nil // cache hit + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + f.Close() + if err != nil { + // add file name to error + err = fmt.Errorf("reading export data: %s: %v", filename, err) + } + }() + + r, err := NewReader(f) + if err != nil { + return nil, err + } + + return Read(r, imp.fset, imp.imports, path) +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go new file mode 100644 index 000000000..a807d0aaa --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go @@ -0,0 +1,852 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "math" + "math/big" + "sort" + "strings" +) + +// If debugFormat is set, each integer and string value is preceded by a marker +// and position information in the encoding. This mechanism permits an importer +// to recognize immediately when it is out of sync. The importer recognizes this +// mode automatically (i.e., it can import export data produced with debugging +// support even if debugFormat is not set at the time of import). This mode will +// lead to massively larger export data (by a factor of 2 to 3) and should only +// be enabled during development and debugging. +// +// NOTE: This flag is the first flag to enable if importing dies because of +// (suspected) format errors, and whenever a change is made to the format. +const debugFormat = false // default: false + +// If trace is set, debugging output is printed to std out. +const trace = false // default: false + +// Current export format version. Increase with each format change. +// Note: The latest binary (non-indexed) export format is at version 6. +// This exporter is still at level 4, but it doesn't matter since +// the binary importer can handle older versions just fine. +// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE +// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE +// 4: type name objects support type aliases, uses aliasTag +// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) +// 2: removed unused bool in ODCL export (compiler only) +// 1: header format change (more regular), export package for _ struct fields +// 0: Go1.7 encoding +const exportVersion = 4 + +// trackAllTypes enables cycle tracking for all types, not just named +// types. The existing compiler invariants assume that unnamed types +// that are not completely set up are not used, or else there are spurious +// errors. +// If disabled, only named types are tracked, possibly leading to slightly +// less efficient encoding in rare cases. It also prevents the export of +// some corner-case type declarations (but those are not handled correctly +// with with the textual export format either). +// TODO(gri) enable and remove once issues caused by it are fixed +const trackAllTypes = false + +type exporter struct { + fset *token.FileSet + out bytes.Buffer + + // object -> index maps, indexed in order of serialization + strIndex map[string]int + pkgIndex map[*types.Package]int + typIndex map[types.Type]int + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + + // debugging support + written int // bytes written + indent int // for trace +} + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} + +// BExportData returns binary export data for pkg. +// If no file set is provided, position info will be missing. +func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + + p := exporter{ + fset: fset, + strIndex: map[string]int{"": 0}, // empty string is mapped to 0 + pkgIndex: make(map[*types.Package]int), + typIndex: make(map[types.Type]int), + posInfoFormat: true, // TODO(gri) might become a flag, eventually + } + + // write version info + // The version string must start with "version %d" where %d is the version + // number. Additional debugging information may follow after a blank; that + // text is ignored by the importer. + p.rawStringln(fmt.Sprintf("version %d", exportVersion)) + var debug string + if debugFormat { + debug = "debug" + } + p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly + p.bool(trackAllTypes) + p.bool(p.posInfoFormat) + + // --- generic export data --- + + // populate type map with predeclared "known" types + for index, typ := range predeclared() { + p.typIndex[typ] = index + } + if len(p.typIndex) != len(predeclared()) { + return nil, internalError("duplicate entries in type map?") + } + + // write package data + p.pkg(pkg, true) + if trace { + p.tracef("\n") + } + + // write objects + objcount := 0 + scope := pkg.Scope() + for _, name := range scope.Names() { + if !ast.IsExported(name) { + continue + } + if trace { + p.tracef("\n") + } + p.obj(scope.Lookup(name)) + objcount++ + } + + // indicate end of list + if trace { + p.tracef("\n") + } + p.tag(endTag) + + // for self-verification only (redundant) + p.int(objcount) + + if trace { + p.tracef("\n") + } + + // --- end of export data --- + + return p.out.Bytes(), nil +} + +func (p *exporter) pkg(pkg *types.Package, emptypath bool) { + if pkg == nil { + panic(internalError("unexpected nil pkg")) + } + + // if we saw the package before, write its index (>= 0) + if i, ok := p.pkgIndex[pkg]; ok { + p.index('P', i) + return + } + + // otherwise, remember the package, write the package tag (< 0) and package data + if trace { + p.tracef("P%d = { ", len(p.pkgIndex)) + defer p.tracef("} ") + } + p.pkgIndex[pkg] = len(p.pkgIndex) + + p.tag(packageTag) + p.string(pkg.Name()) + if emptypath { + p.string("") + } else { + p.string(pkg.Path()) + } +} + +func (p *exporter) obj(obj types.Object) { + switch obj := obj.(type) { + case *types.Const: + p.tag(constTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + p.value(obj.Val()) + + case *types.TypeName: + if obj.IsAlias() { + p.tag(aliasTag) + p.pos(obj) + p.qualifiedName(obj) + } else { + p.tag(typeTag) + } + p.typ(obj.Type()) + + case *types.Var: + p.tag(varTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + + case *types.Func: + p.tag(funcTag) + p.pos(obj) + p.qualifiedName(obj) + sig := obj.Type().(*types.Signature) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + + default: + panic(internalErrorf("unexpected object %v (%T)", obj, obj)) + } +} + +func (p *exporter) pos(obj types.Object) { + if !p.posInfoFormat { + return + } + + file, line := p.fileLine(obj) + if file == p.prevFile { + // common case: write line delta + // delta == 0 means different file or no line change + delta := line - p.prevLine + p.int(delta) + if delta == 0 { + p.int(-1) // -1 means no file change + } + } else { + // different file + p.int(0) + // Encode filename as length of common prefix with previous + // filename, followed by (possibly empty) suffix. Filenames + // frequently share path prefixes, so this can save a lot + // of space and make export data size less dependent on file + // path length. The suffix is unlikely to be empty because + // file names tend to end in ".go". + n := commonPrefixLen(p.prevFile, file) + p.int(n) // n >= 0 + p.string(file[n:]) // write suffix only + p.prevFile = file + p.int(line) + } + p.prevLine = line +} + +func (p *exporter) fileLine(obj types.Object) (file string, line int) { + if p.fset != nil { + pos := p.fset.Position(obj.Pos()) + file = pos.Filename + line = pos.Line + } + return +} + +func commonPrefixLen(a, b string) int { + if len(a) > len(b) { + a, b = b, a + } + // len(a) <= len(b) + i := 0 + for i < len(a) && a[i] == b[i] { + i++ + } + return i +} + +func (p *exporter) qualifiedName(obj types.Object) { + p.string(obj.Name()) + p.pkg(obj.Pkg(), false) +} + +func (p *exporter) typ(t types.Type) { + if t == nil { + panic(internalError("nil type")) + } + + // Possible optimization: Anonymous pointer types *T where + // T is a named type are common. We could canonicalize all + // such types *T to a single type PT = *T. This would lead + // to at most one *T entry in typIndex, and all future *T's + // would be encoded as the respective index directly. Would + // save 1 byte (pointerTag) per *T and reduce the typIndex + // size (at the cost of a canonicalization map). We can do + // this later, without encoding format change. + + // if we saw the type before, write its index (>= 0) + if i, ok := p.typIndex[t]; ok { + p.index('T', i) + return + } + + // otherwise, remember the type, write the type tag (< 0) and type data + if trackAllTypes { + if trace { + p.tracef("T%d = {>\n", len(p.typIndex)) + defer p.tracef("<\n} ") + } + p.typIndex[t] = len(p.typIndex) + } + + switch t := t.(type) { + case *types.Named: + if !trackAllTypes { + // if we don't track all types, track named types now + p.typIndex[t] = len(p.typIndex) + } + + p.tag(namedTag) + p.pos(t.Obj()) + p.qualifiedName(t.Obj()) + p.typ(t.Underlying()) + if !types.IsInterface(t) { + p.assocMethods(t) + } + + case *types.Array: + p.tag(arrayTag) + p.int64(t.Len()) + p.typ(t.Elem()) + + case *types.Slice: + p.tag(sliceTag) + p.typ(t.Elem()) + + case *dddSlice: + p.tag(dddTag) + p.typ(t.elem) + + case *types.Struct: + p.tag(structTag) + p.fieldList(t) + + case *types.Pointer: + p.tag(pointerTag) + p.typ(t.Elem()) + + case *types.Signature: + p.tag(signatureTag) + p.paramList(t.Params(), t.Variadic()) + p.paramList(t.Results(), false) + + case *types.Interface: + p.tag(interfaceTag) + p.iface(t) + + case *types.Map: + p.tag(mapTag) + p.typ(t.Key()) + p.typ(t.Elem()) + + case *types.Chan: + p.tag(chanTag) + p.int(int(3 - t.Dir())) // hack + p.typ(t.Elem()) + + default: + panic(internalErrorf("unexpected type %T: %s", t, t)) + } +} + +func (p *exporter) assocMethods(named *types.Named) { + // Sort methods (for determinism). + var methods []*types.Func + for i := 0; i < named.NumMethods(); i++ { + methods = append(methods, named.Method(i)) + } + sort.Sort(methodsByName(methods)) + + p.int(len(methods)) + + if trace && methods != nil { + p.tracef("associated methods {>\n") + } + + for i, m := range methods { + if trace && i > 0 { + p.tracef("\n") + } + + p.pos(m) + name := m.Name() + p.string(name) + if !exported(name) { + p.pkg(m.Pkg(), false) + } + + sig := m.Type().(*types.Signature) + p.paramList(types.NewTuple(sig.Recv()), false) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + p.int(0) // dummy value for go:nointerface pragma - ignored by importer + } + + if trace && methods != nil { + p.tracef("<\n} ") + } +} + +type methodsByName []*types.Func + +func (x methodsByName) Len() int { return len(x) } +func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } + +func (p *exporter) fieldList(t *types.Struct) { + if trace && t.NumFields() > 0 { + p.tracef("fields {>\n") + defer p.tracef("<\n} ") + } + + p.int(t.NumFields()) + for i := 0; i < t.NumFields(); i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.field(t.Field(i)) + p.string(t.Tag(i)) + } +} + +func (p *exporter) field(f *types.Var) { + if !f.IsField() { + panic(internalError("field expected")) + } + + p.pos(f) + p.fieldName(f) + p.typ(f.Type()) +} + +func (p *exporter) iface(t *types.Interface) { + // TODO(gri): enable importer to load embedded interfaces, + // then emit Embeddeds and ExplicitMethods separately here. + p.int(0) + + n := t.NumMethods() + if trace && n > 0 { + p.tracef("methods {>\n") + defer p.tracef("<\n} ") + } + p.int(n) + for i := 0; i < n; i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.method(t.Method(i)) + } +} + +func (p *exporter) method(m *types.Func) { + sig := m.Type().(*types.Signature) + if sig.Recv() == nil { + panic(internalError("method expected")) + } + + p.pos(m) + p.string(m.Name()) + if m.Name() != "_" && !ast.IsExported(m.Name()) { + p.pkg(m.Pkg(), false) + } + + // interface method; no need to encode receiver. + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) +} + +func (p *exporter) fieldName(f *types.Var) { + name := f.Name() + + if f.Anonymous() { + // anonymous field - we distinguish between 3 cases: + // 1) field name matches base type name and is exported + // 2) field name matches base type name and is not exported + // 3) field name doesn't match base type name (alias name) + bname := basetypeName(f.Type()) + if name == bname { + if ast.IsExported(name) { + name = "" // 1) we don't need to know the field name or package + } else { + name = "?" // 2) use unexported name "?" to force package export + } + } else { + // 3) indicate alias and export name as is + // (this requires an extra "@" but this is a rare case) + p.string("@") + } + } + + p.string(name) + if name != "" && !ast.IsExported(name) { + p.pkg(f.Pkg(), false) + } +} + +func basetypeName(typ types.Type) string { + switch typ := deref(typ).(type) { + case *types.Basic: + return typ.Name() + case *types.Named: + return typ.Obj().Name() + default: + return "" // unnamed type + } +} + +func (p *exporter) paramList(params *types.Tuple, variadic bool) { + // use negative length to indicate unnamed parameters + // (look at the first parameter only since either all + // names are present or all are absent) + n := params.Len() + if n > 0 && params.At(0).Name() == "" { + n = -n + } + p.int(n) + for i := 0; i < params.Len(); i++ { + q := params.At(i) + t := q.Type() + if variadic && i == params.Len()-1 { + t = &dddSlice{t.(*types.Slice).Elem()} + } + p.typ(t) + if n > 0 { + name := q.Name() + p.string(name) + if name != "_" { + p.pkg(q.Pkg(), false) + } + } + p.string("") // no compiler-specific info + } +} + +func (p *exporter) value(x constant.Value) { + if trace { + p.tracef("= ") + } + + switch x.Kind() { + case constant.Bool: + tag := falseTag + if constant.BoolVal(x) { + tag = trueTag + } + p.tag(tag) + + case constant.Int: + if v, exact := constant.Int64Val(x); exact { + // common case: x fits into an int64 - use compact encoding + p.tag(int64Tag) + p.int64(v) + return + } + // uncommon case: large x - use float encoding + // (powers of 2 will be encoded efficiently with exponent) + p.tag(floatTag) + p.float(constant.ToFloat(x)) + + case constant.Float: + p.tag(floatTag) + p.float(x) + + case constant.Complex: + p.tag(complexTag) + p.float(constant.Real(x)) + p.float(constant.Imag(x)) + + case constant.String: + p.tag(stringTag) + p.string(constant.StringVal(x)) + + case constant.Unknown: + // package contains type errors + p.tag(unknownTag) + + default: + panic(internalErrorf("unexpected value %v (%T)", x, x)) + } +} + +func (p *exporter) float(x constant.Value) { + if x.Kind() != constant.Float { + panic(internalErrorf("unexpected constant %v, want float", x)) + } + // extract sign (there is no -0) + sign := constant.Sign(x) + if sign == 0 { + // x == 0 + p.int(0) + return + } + // x != 0 + + var f big.Float + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + r := valueToRat(num) + f.SetRat(r.Quo(r, valueToRat(denom))) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + f.SetFloat64(math.MaxFloat64) // FIXME + } + + // extract exponent such that 0.5 <= m < 1.0 + var m big.Float + exp := f.MantExp(&m) + + // extract mantissa as *big.Int + // - set exponent large enough so mant satisfies mant.IsInt() + // - get *big.Int from mant + m.SetMantExp(&m, int(m.MinPrec())) + mant, acc := m.Int(nil) + if acc != big.Exact { + panic(internalError("internal error")) + } + + p.int(sign) + p.int(exp) + p.string(string(mant.Bytes())) +} + +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + +func (p *exporter) bool(b bool) bool { + if trace { + p.tracef("[") + defer p.tracef("= %v] ", b) + } + + x := 0 + if b { + x = 1 + } + p.int(x) + return b +} + +// ---------------------------------------------------------------------------- +// Low-level encoders + +func (p *exporter) index(marker byte, index int) { + if index < 0 { + panic(internalError("invalid index < 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%c%d ", marker, index) + } + p.rawInt64(int64(index)) +} + +func (p *exporter) tag(tag int) { + if tag >= 0 { + panic(internalError("invalid tag >= 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%s ", tagString[-tag]) + } + p.rawInt64(int64(tag)) +} + +func (p *exporter) int(x int) { + p.int64(int64(x)) +} + +func (p *exporter) int64(x int64) { + if debugFormat { + p.marker('i') + } + if trace { + p.tracef("%d ", x) + } + p.rawInt64(x) +} + +func (p *exporter) string(s string) { + if debugFormat { + p.marker('s') + } + if trace { + p.tracef("%q ", s) + } + // if we saw the string before, write its index (>= 0) + // (the empty string is mapped to 0) + if i, ok := p.strIndex[s]; ok { + p.rawInt64(int64(i)) + return + } + // otherwise, remember string and write its negative length and bytes + p.strIndex[s] = len(p.strIndex) + p.rawInt64(-int64(len(s))) + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } +} + +// marker emits a marker byte and position information which makes +// it easy for a reader to detect if it is "out of sync". Used for +// debugFormat format only. +func (p *exporter) marker(m byte) { + p.rawByte(m) + // Enable this for help tracking down the location + // of an incorrect marker when running in debugFormat. + if false && trace { + p.tracef("#%d ", p.written) + } + p.rawInt64(int64(p.written)) +} + +// rawInt64 should only be used by low-level encoders. +func (p *exporter) rawInt64(x int64) { + var tmp [binary.MaxVarintLen64]byte + n := binary.PutVarint(tmp[:], x) + for i := 0; i < n; i++ { + p.rawByte(tmp[i]) + } +} + +// rawStringln should only be used to emit the initial version string. +func (p *exporter) rawStringln(s string) { + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } + p.rawByte('\n') +} + +// rawByte is the bottleneck interface to write to p.out. +// rawByte escapes b as follows (any encoding does that +// hides '$'): +// +// '$' => '|' 'S' +// '|' => '|' '|' +// +// Necessary so other tools can find the end of the +// export data by searching for "$$". +// rawByte should only be used by low-level encoders. +func (p *exporter) rawByte(b byte) { + switch b { + case '$': + // write '$' as '|' 'S' + b = 'S' + fallthrough + case '|': + // write '|' as '|' '|' + p.out.WriteByte('|') + p.written++ + } + p.out.WriteByte(b) + p.written++ +} + +// tracef is like fmt.Printf but it rewrites the format string +// to take care of indentation. +func (p *exporter) tracef(format string, args ...interface{}) { + if strings.ContainsAny(format, "<>\n") { + var buf bytes.Buffer + for i := 0; i < len(format); i++ { + // no need to deal with runes + ch := format[i] + switch ch { + case '>': + p.indent++ + continue + case '<': + p.indent-- + continue + } + buf.WriteByte(ch) + if ch == '\n' { + for j := p.indent; j > 0; j-- { + buf.WriteString(". ") + } + } + } + format = buf.String() + } + fmt.Printf(format, args...) +} + +// Debugging support. +// (tagString is only used when tracing is enabled) +var tagString = [...]string{ + // Packages + -packageTag: "package", + + // Types + -namedTag: "named type", + -arrayTag: "array", + -sliceTag: "slice", + -dddTag: "ddd", + -structTag: "struct", + -pointerTag: "pointer", + -signatureTag: "signature", + -interfaceTag: "interface", + -mapTag: "map", + -chanTag: "chan", + + // Values + -falseTag: "false", + -trueTag: "true", + -int64Tag: "int64", + -floatTag: "float", + -fractionTag: "fraction", + -complexTag: "complex", + -stringTag: "string", + -unknownTag: "unknown", + + // Type aliases + -aliasTag: "alias", +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go new file mode 100644 index 000000000..e9f73d14a --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go @@ -0,0 +1,1039 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. + +package gcimporter + +import ( + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +type importer struct { + imports map[string]*types.Package + data []byte + importpath string + buf []byte // for reading strings + version int // export format version + + // object lists + strList []string // in order of appearance + pathList []string // in order of appearance + pkgList []*types.Package // in order of appearance + typList []types.Type // in order of appearance + interfaceList []*types.Interface // for delayed completion only + trackAllTypes bool + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + fake fakeFileSet + + // debugging support + debugFormat bool + read int // bytes read +} + +// BImportData imports a package from the serialized package data +// and returns the number of bytes consumed and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + // catch panics and return them as errors + const currentVersion = 6 + version := -1 // unknown version + defer func() { + if e := recover(); e != nil { + // Return a (possibly nil or incomplete) package unchanged (see #16088). + if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + + p := importer{ + imports: imports, + data: data, + importpath: path, + version: version, + strList: []string{""}, // empty string is mapped to 0 + pathList: []string{""}, // empty string is mapped to 0 + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*token.File), + }, + } + + // read version info + var versionstr string + if b := p.rawByte(); b == 'c' || b == 'd' { + // Go1.7 encoding; first byte encodes low-level + // encoding format (compact vs debug). + // For backward-compatibility only (avoid problems with + // old installed packages). Newly compiled packages use + // the extensible format string. + // TODO(gri) Remove this support eventually; after Go1.8. + if b == 'd' { + p.debugFormat = true + } + p.trackAllTypes = p.rawByte() == 'a' + p.posInfoFormat = p.int() != 0 + versionstr = p.string() + if versionstr == "v1" { + version = 0 + } + } else { + // Go1.8 extensible encoding + // read version string and extract version number (ignore anything after the version number) + versionstr = p.rawStringln(b) + if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { + if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { + version = v + } + } + } + p.version = version + + // read version specific flags - extend as necessary + switch p.version { + // case currentVersion: + // ... + // fallthrough + case currentVersion, 5, 4, 3, 2, 1: + p.debugFormat = p.rawStringln(p.rawByte()) == "debug" + p.trackAllTypes = p.int() != 0 + p.posInfoFormat = p.int() != 0 + case 0: + // Go1.7 encoding format - nothing to do here + default: + errorf("unknown bexport format version %d (%q)", p.version, versionstr) + } + + // --- generic export data --- + + // populate typList with predeclared "known" types + p.typList = append(p.typList, predeclared()...) + + // read package data + pkg = p.pkg() + + // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) + objcount := 0 + for { + tag := p.tagOrIndex() + if tag == endTag { + break + } + p.obj(tag) + objcount++ + } + + // self-verification + if count := p.int(); count != objcount { + errorf("got %d objects; want %d", objcount, count) + } + + // ignore compiler-specific import data + + // complete interfaces + // TODO(gri) re-investigate if we still need to do this in a delayed fashion + for _, typ := range p.interfaceList { + typ.Complete() + } + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), p.pkgList[1:]...) + sort.Sort(byPath(list)) + pkg.SetImports(list) + + // package was imported completely and without errors + pkg.MarkComplete() + + return p.read, pkg, nil +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Sprintf(format, args...)) +} + +func (p *importer) pkg() *types.Package { + // if the package was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.pkgList[i] + } + + // otherwise, i is the package tag (< 0) + if i != packageTag { + errorf("unexpected package tag %d version %d", i, p.version) + } + + // read package data + name := p.string() + var path string + if p.version >= 5 { + path = p.path() + } else { + path = p.string() + } + if p.version >= 6 { + p.int() // package height; unused by go/types + } + + // we should never see an empty package name + if name == "" { + errorf("empty package name in import") + } + + // an empty path denotes the package we are currently importing; + // it must be the first package we see + if (path == "") != (len(p.pkgList) == 0) { + errorf("package path %q for pkg index %d", path, len(p.pkgList)) + } + + // if the package was imported before, use that one; otherwise create a new one + if path == "" { + path = p.importpath + } + pkg := p.imports[path] + if pkg == nil { + pkg = types.NewPackage(path, name) + p.imports[path] = pkg + } else if pkg.Name() != name { + errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) + } + p.pkgList = append(p.pkgList, pkg) + + return pkg +} + +// objTag returns the tag value for each object kind. +func objTag(obj types.Object) int { + switch obj.(type) { + case *types.Const: + return constTag + case *types.TypeName: + return typeTag + case *types.Var: + return varTag + case *types.Func: + return funcTag + default: + errorf("unexpected object: %v (%T)", obj, obj) // panics + panic("unreachable") + } +} + +func sameObj(a, b types.Object) bool { + // Because unnamed types are not canonicalized, we cannot simply compare types for + // (pointer) identity. + // Ideally we'd check equality of constant values as well, but this is good enough. + return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) +} + +func (p *importer) declare(obj types.Object) { + pkg := obj.Pkg() + if alt := pkg.Scope().Insert(obj); alt != nil { + // This can only trigger if we import a (non-type) object a second time. + // Excluding type aliases, this cannot happen because 1) we only import a package + // once; and b) we ignore compiler-specific export data which may contain + // functions whose inlined function bodies refer to other functions that + // were already imported. + // However, type aliases require reexporting the original type, so we need + // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, + // method importer.obj, switch case importing functions). + // TODO(gri) review/update this comment once the gc compiler handles type aliases. + if !sameObj(obj, alt) { + errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) + } + } +} + +func (p *importer) obj(tag int) { + switch tag { + case constTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + val := p.value() + p.declare(types.NewConst(pos, pkg, name, typ, val)) + + case aliasTag: + // TODO(gri) verify type alias hookup is correct + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewTypeName(pos, pkg, name, typ)) + + case typeTag: + p.typ(nil, nil) + + case varTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewVar(pos, pkg, name, typ)) + + case funcTag: + pos := p.pos() + pkg, name := p.qualifiedName() + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(nil, params, result, isddd) + p.declare(types.NewFunc(pos, pkg, name, sig)) + + default: + errorf("unexpected object tag %d", tag) + } +} + +const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go + +func (p *importer) pos() token.Pos { + if !p.posInfoFormat { + return token.NoPos + } + + file := p.prevFile + line := p.prevLine + delta := p.int() + line += delta + if p.version >= 5 { + if delta == deltaNewFile { + if n := p.int(); n >= 0 { + // file changed + file = p.path() + line = n + } + } + } else { + if delta == 0 { + if n := p.int(); n >= 0 { + // file changed + file = p.prevFile[:n] + p.string() + line = p.int() + } + } + } + p.prevFile = file + p.prevLine = line + + return p.fake.pos(file, line, 0) +} + +// Synthesize a token.Pos +type fakeFileSet struct { + fset *token.FileSet + files map[string]*token.File +} + +func (s *fakeFileSet) pos(file string, line, column int) token.Pos { + // TODO(mdempsky): Make use of column. + + // Since we don't know the set of needed file positions, we + // reserve maxlines positions per file. + const maxlines = 64 * 1024 + f := s.files[file] + if f == nil { + f = s.fset.AddFile(file, -1, maxlines) + s.files[file] = f + // Allocate the fake linebreak indices on first use. + // TODO(adonovan): opt: save ~512KB using a more complex scheme? + fakeLinesOnce.Do(func() { + fakeLines = make([]int, maxlines) + for i := range fakeLines { + fakeLines[i] = i + } + }) + f.SetLines(fakeLines) + } + + if line > maxlines { + line = 1 + } + + // Treat the file as if it contained only newlines + // and column=1: use the line number as the offset. + return f.Pos(line - 1) +} + +var ( + fakeLines []int + fakeLinesOnce sync.Once +) + +func (p *importer) qualifiedName() (pkg *types.Package, name string) { + name = p.string() + pkg = p.pkg() + return +} + +func (p *importer) record(t types.Type) { + p.typList = append(p.typList, t) +} + +// A dddSlice is a types.Type representing ...T parameters. +// It only appears for parameter types and does not escape +// the importer. +type dddSlice struct { + elem types.Type +} + +func (t *dddSlice) Underlying() types.Type { return t } +func (t *dddSlice) String() string { return "..." + t.elem.String() } + +// parent is the package which declared the type; parent == nil means +// the package currently imported. The parent package is needed for +// exported struct fields and interface methods which don't contain +// explicit package information in the export data. +// +// A non-nil tname is used as the "owner" of the result type; i.e., +// the result type is the underlying type of tname. tname is used +// to give interface methods a named receiver type where possible. +func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { + // if the type was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.typList[i] + } + + // otherwise, i is the type tag (< 0) + switch i { + case namedTag: + // read type object + pos := p.pos() + parent, name := p.qualifiedName() + scope := parent.Scope() + obj := scope.Lookup(name) + + // if the object doesn't exist yet, create and insert it + if obj == nil { + obj = types.NewTypeName(pos, parent, name, nil) + scope.Insert(obj) + } + + if _, ok := obj.(*types.TypeName); !ok { + errorf("pkg = %s, name = %s => %s", parent, name, obj) + } + + // associate new named type with obj if it doesn't exist yet + t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) + + // but record the existing type, if any + tname := obj.Type().(*types.Named) // tname is either t0 or the existing type + p.record(tname) + + // read underlying type + t0.SetUnderlying(p.typ(parent, t0)) + + // interfaces don't have associated methods + if types.IsInterface(t0) { + return tname + } + + // read associated methods + for i := p.int(); i > 0; i-- { + // TODO(gri) replace this with something closer to fieldName + pos := p.pos() + name := p.string() + if !exported(name) { + p.pkg() + } + + recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? + params, isddd := p.paramList() + result, _ := p.paramList() + p.int() // go:nointerface pragma - discarded + + sig := types.NewSignature(recv.At(0), params, result, isddd) + t0.AddMethod(types.NewFunc(pos, parent, name, sig)) + } + + return tname + + case arrayTag: + t := new(types.Array) + if p.trackAllTypes { + p.record(t) + } + + n := p.int64() + *t = *types.NewArray(p.typ(parent, nil), n) + return t + + case sliceTag: + t := new(types.Slice) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewSlice(p.typ(parent, nil)) + return t + + case dddTag: + t := new(dddSlice) + if p.trackAllTypes { + p.record(t) + } + + t.elem = p.typ(parent, nil) + return t + + case structTag: + t := new(types.Struct) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewStruct(p.fieldList(parent)) + return t + + case pointerTag: + t := new(types.Pointer) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewPointer(p.typ(parent, nil)) + return t + + case signatureTag: + t := new(types.Signature) + if p.trackAllTypes { + p.record(t) + } + + params, isddd := p.paramList() + result, _ := p.paramList() + *t = *types.NewSignature(nil, params, result, isddd) + return t + + case interfaceTag: + // Create a dummy entry in the type list. This is safe because we + // cannot expect the interface type to appear in a cycle, as any + // such cycle must contain a named type which would have been + // first defined earlier. + // TODO(gri) Is this still true now that we have type aliases? + // See issue #23225. + n := len(p.typList) + if p.trackAllTypes { + p.record(nil) + } + + var embeddeds []types.Type + for n := p.int(); n > 0; n-- { + p.pos() + embeddeds = append(embeddeds, p.typ(parent, nil)) + } + + t := newInterface(p.methodList(parent, tname), embeddeds) + p.interfaceList = append(p.interfaceList, t) + if p.trackAllTypes { + p.typList[n] = t + } + return t + + case mapTag: + t := new(types.Map) + if p.trackAllTypes { + p.record(t) + } + + key := p.typ(parent, nil) + val := p.typ(parent, nil) + *t = *types.NewMap(key, val) + return t + + case chanTag: + t := new(types.Chan) + if p.trackAllTypes { + p.record(t) + } + + dir := chanDir(p.int()) + val := p.typ(parent, nil) + *t = *types.NewChan(dir, val) + return t + + default: + errorf("unexpected type tag %d", i) // panics + panic("unreachable") + } +} + +func chanDir(d int) types.ChanDir { + // tag values must match the constants in cmd/compile/internal/gc/go.go + switch d { + case 1 /* Crecv */ : + return types.RecvOnly + case 2 /* Csend */ : + return types.SendOnly + case 3 /* Cboth */ : + return types.SendRecv + default: + errorf("unexpected channel dir %d", d) + return 0 + } +} + +func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { + if n := p.int(); n > 0 { + fields = make([]*types.Var, n) + tags = make([]string, n) + for i := range fields { + fields[i], tags[i] = p.field(parent) + } + } + return +} + +func (p *importer) field(parent *types.Package) (*types.Var, string) { + pos := p.pos() + pkg, name, alias := p.fieldName(parent) + typ := p.typ(parent, nil) + tag := p.string() + + anonymous := false + if name == "" { + // anonymous field - typ must be T or *T and T must be a type name + switch typ := deref(typ).(type) { + case *types.Basic: // basic types are named types + pkg = nil // // objects defined in Universe scope have no package + name = typ.Name() + case *types.Named: + name = typ.Obj().Name() + default: + errorf("named base type expected") + } + anonymous = true + } else if alias { + // anonymous field: we have an explicit name because it's an alias + anonymous = true + } + + return types.NewField(pos, pkg, name, typ, anonymous), tag +} + +func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { + if n := p.int(); n > 0 { + methods = make([]*types.Func, n) + for i := range methods { + methods[i] = p.method(parent, baseType) + } + } + return +} + +func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { + pos := p.pos() + pkg, name, _ := p.fieldName(parent) + // If we don't have a baseType, use a nil receiver. + // A receiver using the actual interface type (which + // we don't know yet) will be filled in when we call + // types.Interface.Complete. + var recv *types.Var + if baseType != nil { + recv = types.NewVar(token.NoPos, parent, "", baseType) + } + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(recv, params, result, isddd) + return types.NewFunc(pos, pkg, name, sig) +} + +func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { + name = p.string() + pkg = parent + if pkg == nil { + // use the imported package instead + pkg = p.pkgList[0] + } + if p.version == 0 && name == "_" { + // version 0 didn't export a package for _ fields + return + } + switch name { + case "": + // 1) field name matches base type name and is exported: nothing to do + case "?": + // 2) field name matches base type name and is not exported: need package + name = "" + pkg = p.pkg() + case "@": + // 3) field name doesn't match type name (alias) + name = p.string() + alias = true + fallthrough + default: + if !exported(name) { + pkg = p.pkg() + } + } + return +} + +func (p *importer) paramList() (*types.Tuple, bool) { + n := p.int() + if n == 0 { + return nil, false + } + // negative length indicates unnamed parameters + named := true + if n < 0 { + n = -n + named = false + } + // n > 0 + params := make([]*types.Var, n) + isddd := false + for i := range params { + params[i], isddd = p.param(named) + } + return types.NewTuple(params...), isddd +} + +func (p *importer) param(named bool) (*types.Var, bool) { + t := p.typ(nil, nil) + td, isddd := t.(*dddSlice) + if isddd { + t = types.NewSlice(td.elem) + } + + var pkg *types.Package + var name string + if named { + name = p.string() + if name == "" { + errorf("expected named parameter") + } + if name != "_" { + pkg = p.pkg() + } + if i := strings.Index(name, "·"); i > 0 { + name = name[:i] // cut off gc-specific parameter numbering + } + } + + // read and discard compiler-specific info + p.string() + + return types.NewVar(token.NoPos, pkg, name, t), isddd +} + +func exported(name string) bool { + ch, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(ch) +} + +func (p *importer) value() constant.Value { + switch tag := p.tagOrIndex(); tag { + case falseTag: + return constant.MakeBool(false) + case trueTag: + return constant.MakeBool(true) + case int64Tag: + return constant.MakeInt64(p.int64()) + case floatTag: + return p.float() + case complexTag: + re := p.float() + im := p.float() + return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + case stringTag: + return constant.MakeString(p.string()) + case unknownTag: + return constant.MakeUnknown() + default: + errorf("unexpected value tag %d", tag) // panics + panic("unreachable") + } +} + +func (p *importer) float() constant.Value { + sign := p.int() + if sign == 0 { + return constant.MakeInt64(0) + } + + exp := p.int() + mant := []byte(p.string()) // big endian + + // remove leading 0's if any + for len(mant) > 0 && mant[0] == 0 { + mant = mant[1:] + } + + // convert to little endian + // TODO(gri) go/constant should have a more direct conversion function + // (e.g., once it supports a big.Float based implementation) + for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { + mant[i], mant[j] = mant[j], mant[i] + } + + // adjust exponent (constant.MakeFromBytes creates an integer value, + // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) + exp -= len(mant) << 3 + if len(mant) > 0 { + for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { + exp++ + } + } + + x := constant.MakeFromBytes(mant) + switch { + case exp < 0: + d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) + x = constant.BinaryOp(x, token.QUO, d) + case exp > 0: + x = constant.Shift(x, token.SHL, uint(exp)) + } + + if sign < 0 { + x = constant.UnaryOp(token.SUB, x, 0) + } + return x +} + +// ---------------------------------------------------------------------------- +// Low-level decoders + +func (p *importer) tagOrIndex() int { + if p.debugFormat { + p.marker('t') + } + + return int(p.rawInt64()) +} + +func (p *importer) int() int { + x := p.int64() + if int64(int(x)) != x { + errorf("exported integer too large") + } + return int(x) +} + +func (p *importer) int64() int64 { + if p.debugFormat { + p.marker('i') + } + + return p.rawInt64() +} + +func (p *importer) path() string { + if p.debugFormat { + p.marker('p') + } + // if the path was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.pathList[i] + } + // otherwise, i is the negative path length (< 0) + a := make([]string, -i) + for n := range a { + a[n] = p.string() + } + s := strings.Join(a, "/") + p.pathList = append(p.pathList, s) + return s +} + +func (p *importer) string() string { + if p.debugFormat { + p.marker('s') + } + // if the string was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.strList[i] + } + // otherwise, i is the negative string length (< 0) + if n := int(-i); n <= cap(p.buf) { + p.buf = p.buf[:n] + } else { + p.buf = make([]byte, n) + } + for i := range p.buf { + p.buf[i] = p.rawByte() + } + s := string(p.buf) + p.strList = append(p.strList, s) + return s +} + +func (p *importer) marker(want byte) { + if got := p.rawByte(); got != want { + errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) + } + + pos := p.read + if n := int(p.rawInt64()); n != pos { + errorf("incorrect position: got %d; want %d", n, pos) + } +} + +// rawInt64 should only be used by low-level decoders. +func (p *importer) rawInt64() int64 { + i, err := binary.ReadVarint(p) + if err != nil { + errorf("read error: %v", err) + } + return i +} + +// rawStringln should only be used to read the initial version string. +func (p *importer) rawStringln(b byte) string { + p.buf = p.buf[:0] + for b != '\n' { + p.buf = append(p.buf, b) + b = p.rawByte() + } + return string(p.buf) +} + +// needed for binary.ReadVarint in rawInt64 +func (p *importer) ReadByte() (byte, error) { + return p.rawByte(), nil +} + +// byte is the bottleneck interface for reading p.data. +// It unescapes '|' 'S' to '$' and '|' '|' to '|'. +// rawByte should only be used by low-level decoders. +func (p *importer) rawByte() byte { + b := p.data[0] + r := 1 + if b == '|' { + b = p.data[1] + r = 2 + switch b { + case 'S': + b = '$' + case '|': + // nothing to do + default: + errorf("unexpected escape sequence in export data") + } + } + p.data = p.data[r:] + p.read += r + return b + +} + +// ---------------------------------------------------------------------------- +// Export format + +// Tags. Must be < 0. +const ( + // Objects + packageTag = -(iota + 1) + constTag + typeTag + varTag + funcTag + endTag + + // Types + namedTag + arrayTag + sliceTag + dddTag + structTag + pointerTag + signatureTag + interfaceTag + mapTag + chanTag + + // Values + falseTag + trueTag + int64Tag + floatTag + fractionTag // not used by gc + complexTag + stringTag + nilTag // only used by gc (appears in exported inlined function bodies) + unknownTag // not used by gc (only appears in packages with errors) + + // Type aliases + aliasTag +) + +var predeclOnce sync.Once +var predecl []types.Type // initialized lazily + +func predeclared() []types.Type { + predeclOnce.Do(func() { + // initialize lazily to be sure that all + // elements have been initialized before + predecl = []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + } + }) + return predecl +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go new file mode 100644 index 000000000..f33dc5613 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go @@ -0,0 +1,93 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. + +// This file implements FindExportData. + +package gcimporter + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" +) + +func readGopackHeader(r *bufio.Reader) (name string, size int, err error) { + // See $GOROOT/include/ar.h. + hdr := make([]byte, 16+12+6+6+8+10+2) + _, err = io.ReadFull(r, hdr) + if err != nil { + return + } + // leave for debugging + if false { + fmt.Printf("header: %s", hdr) + } + s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) + size, err = strconv.Atoi(s) + if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { + err = fmt.Errorf("invalid archive header") + return + } + name = strings.TrimSpace(string(hdr[:16])) + return +} + +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying GC-created object/archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. The hdr result +// is the string before the export data, either "$$" or "$$B". +// +func FindExportData(r *bufio.Reader) (hdr string, err error) { + // Read first line to make sure this is an object file. + line, err := r.ReadSlice('\n') + if err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + + if string(line) == "!\n" { + // Archive file. Scan to __.PKGDEF. + var name string + if name, _, err = readGopackHeader(r); err != nil { + return + } + + // First entry should be __.PKGDEF. + if name != "__.PKGDEF" { + err = fmt.Errorf("go archive is missing __.PKGDEF") + return + } + + // Read first line of __.PKGDEF data, so that line + // is once again the first line of the input. + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + } + + // Now at __.PKGDEF in archive or still at beginning of file. + // Either way, line should begin with "go object ". + if !strings.HasPrefix(string(line), "go object ") { + err = fmt.Errorf("not a Go object file") + return + } + + // Skip over object header to export data. + // Begins after first line starting with $$. + for line[0] != '$' { + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + } + hdr = string(line) + + return +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go new file mode 100644 index 000000000..9cf186605 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go @@ -0,0 +1,1078 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a modified copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go, +// but it also contains the original source-based importer code for Go1.6. +// Once we stop supporting 1.6, we can remove that code. + +// Package gcimporter provides various functions for reading +// gc-generated object files that can be used to implement the +// Importer interface defined by the Go 1.5 standard library package. +package gcimporter // import "golang.org/x/tools/go/internal/gcimporter" + +import ( + "bufio" + "errors" + "fmt" + "go/build" + "go/constant" + "go/token" + "go/types" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "text/scanner" +) + +// debugging/development support +const debug = false + +var pkgExts = [...]string{".a", ".o"} + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// If no file was found, an empty filename is returned. +// +func FindPkg(path, srcDir string) (filename, id string) { + if path == "" { + return + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + id = path // make sure we have an id to print in error message + return + } + noext = strings.TrimSuffix(bp.PkgObj, ".a") + id = bp.ImportPath + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + filename = "" // not found + return +} + +// ImportData imports a package by reading the gc-generated export data, +// adds the corresponding package object to the packages map indexed by id, +// and returns the object. +// +// The packages map must contains all packages already imported. The data +// reader position must be the beginning of the export data section. The +// filename is only used in error messages. +// +// If packages[id] contains the completely imported package, that package +// can be used directly, and there is no need to call this function (but +// there is also no harm but for extra time used). +// +func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) { + // support for parser error handling + defer func() { + switch r := recover().(type) { + case nil: + // nothing to do + case importError: + err = r + default: + panic(r) // internal error + } + }() + + var p parser + p.init(filename, id, data, packages) + pkg = p.parseExport() + + return +} + +// Import imports a gc-generated package given its import path and srcDir, adds +// the corresponding package object to the packages map, and returns the object. +// The packages map must contain all packages already imported. +// +func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { + var rc io.ReadCloser + var filename, id string + if lookup != nil { + // With custom lookup specified, assume that caller has + // converted path to a canonical import path for use in the map. + if path == "unsafe" { + return types.Unsafe, nil + } + id = path + + // No need to re-import if the package was imported completely before. + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + f, err := lookup(path) + if err != nil { + return nil, err + } + rc = f + } else { + filename, id = FindPkg(path, srcDir) + if filename == "" { + if path == "unsafe" { + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %q", id) + } + + // no need to re-import if the package was imported completely before + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + // add file name to error + err = fmt.Errorf("%s: %v", filename, err) + } + }() + rc = f + } + defer rc.Close() + + var hdr string + buf := bufio.NewReader(rc) + if hdr, err = FindExportData(buf); err != nil { + return + } + + switch hdr { + case "$$\n": + // Work-around if we don't have a filename; happens only if lookup != nil. + // Either way, the filename is only needed for importer error messages, so + // this is fine. + if filename == "" { + filename = path + } + return ImportData(packages, filename, id, buf) + + case "$$B\n": + var data []byte + data, err = ioutil.ReadAll(buf) + if err != nil { + break + } + + // TODO(gri): allow clients of go/importer to provide a FileSet. + // Or, define a new standard go/types/gcexportdata package. + fset := token.NewFileSet() + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 && data[0] == 'i' { + _, pkg, err = IImportData(fset, packages, data[1:], id) + } else { + _, pkg, err = BImportData(fset, packages, data, id) + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + } + + return +} + +// ---------------------------------------------------------------------------- +// Parser + +// TODO(gri) Imported objects don't have position information. +// Ideally use the debug table line info; alternatively +// create some fake position (or the position of the +// import). That way error messages referring to imported +// objects can print meaningful information. + +// parser parses the exports inside a gc compiler-produced +// object/archive file and populates its scope with the results. +type parser struct { + scanner scanner.Scanner + tok rune // current token + lit string // literal string; only valid for Ident, Int, String tokens + id string // package id of imported package + sharedPkgs map[string]*types.Package // package id -> package object (across importer) + localPkgs map[string]*types.Package // package id -> package object (just this package) +} + +func (p *parser) init(filename, id string, src io.Reader, packages map[string]*types.Package) { + p.scanner.Init(src) + p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) } + p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments + p.scanner.Whitespace = 1<<'\t' | 1<<' ' + p.scanner.Filename = filename // for good error messages + p.next() + p.id = id + p.sharedPkgs = packages + if debug { + // check consistency of packages map + for _, pkg := range packages { + if pkg.Name() == "" { + fmt.Printf("no package name for %s\n", pkg.Path()) + } + } + } +} + +func (p *parser) next() { + p.tok = p.scanner.Scan() + switch p.tok { + case scanner.Ident, scanner.Int, scanner.Char, scanner.String, '·': + p.lit = p.scanner.TokenText() + default: + p.lit = "" + } + if debug { + fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit) + } +} + +func declTypeName(pkg *types.Package, name string) *types.TypeName { + scope := pkg.Scope() + if obj := scope.Lookup(name); obj != nil { + return obj.(*types.TypeName) + } + obj := types.NewTypeName(token.NoPos, pkg, name, nil) + // a named type may be referred to before the underlying type + // is known - set it up + types.NewNamed(obj, nil, nil) + scope.Insert(obj) + return obj +} + +// ---------------------------------------------------------------------------- +// Error handling + +// Internal errors are boxed as importErrors. +type importError struct { + pos scanner.Position + err error +} + +func (e importError) Error() string { + return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err) +} + +func (p *parser) error(err interface{}) { + if s, ok := err.(string); ok { + err = errors.New(s) + } + // panic with a runtime.Error if err is not an error + panic(importError{p.scanner.Pos(), err.(error)}) +} + +func (p *parser) errorf(format string, args ...interface{}) { + p.error(fmt.Sprintf(format, args...)) +} + +func (p *parser) expect(tok rune) string { + lit := p.lit + if p.tok != tok { + p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit) + } + p.next() + return lit +} + +func (p *parser) expectSpecial(tok string) { + sep := 'x' // not white space + i := 0 + for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' { + sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token + p.next() + i++ + } + if i < len(tok) { + p.errorf("expected %q, got %q", tok, tok[0:i]) + } +} + +func (p *parser) expectKeyword(keyword string) { + lit := p.expect(scanner.Ident) + if lit != keyword { + p.errorf("expected keyword %s, got %q", keyword, lit) + } +} + +// ---------------------------------------------------------------------------- +// Qualified and unqualified names + +// PackageId = string_lit . +// +func (p *parser) parsePackageId() string { + id, err := strconv.Unquote(p.expect(scanner.String)) + if err != nil { + p.error(err) + } + // id == "" stands for the imported package id + // (only known at time of package installation) + if id == "" { + id = p.id + } + return id +} + +// PackageName = ident . +// +func (p *parser) parsePackageName() string { + return p.expect(scanner.Ident) +} + +// dotIdentifier = ( ident | '·' ) { ident | int | '·' } . +func (p *parser) parseDotIdent() string { + ident := "" + if p.tok != scanner.Int { + sep := 'x' // not white space + for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' { + ident += p.lit + sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token + p.next() + } + } + if ident == "" { + p.expect(scanner.Ident) // use expect() for error handling + } + return ident +} + +// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) . +// +func (p *parser) parseQualifiedName() (id, name string) { + p.expect('@') + id = p.parsePackageId() + p.expect('.') + // Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields. + if p.tok == '?' { + p.next() + } else { + name = p.parseDotIdent() + } + return +} + +// getPkg returns the package for a given id. If the package is +// not found, create the package and add it to the p.localPkgs +// and p.sharedPkgs maps. name is the (expected) name of the +// package. If name == "", the package name is expected to be +// set later via an import clause in the export data. +// +// id identifies a package, usually by a canonical package path like +// "encoding/json" but possibly by a non-canonical import path like +// "./json". +// +func (p *parser) getPkg(id, name string) *types.Package { + // package unsafe is not in the packages maps - handle explicitly + if id == "unsafe" { + return types.Unsafe + } + + pkg := p.localPkgs[id] + if pkg == nil { + // first import of id from this package + pkg = p.sharedPkgs[id] + if pkg == nil { + // first import of id by this importer; + // add (possibly unnamed) pkg to shared packages + pkg = types.NewPackage(id, name) + p.sharedPkgs[id] = pkg + } + // add (possibly unnamed) pkg to local packages + if p.localPkgs == nil { + p.localPkgs = make(map[string]*types.Package) + } + p.localPkgs[id] = pkg + } else if name != "" { + // package exists already and we have an expected package name; + // make sure names match or set package name if necessary + if pname := pkg.Name(); pname == "" { + pkg.SetName(name) + } else if pname != name { + p.errorf("%s package name mismatch: %s (given) vs %s (expected)", id, pname, name) + } + } + return pkg +} + +// parseExportedName is like parseQualifiedName, but +// the package id is resolved to an imported *types.Package. +// +func (p *parser) parseExportedName() (pkg *types.Package, name string) { + id, name := p.parseQualifiedName() + pkg = p.getPkg(id, "") + return +} + +// ---------------------------------------------------------------------------- +// Types + +// BasicType = identifier . +// +func (p *parser) parseBasicType() types.Type { + id := p.expect(scanner.Ident) + obj := types.Universe.Lookup(id) + if obj, ok := obj.(*types.TypeName); ok { + return obj.Type() + } + p.errorf("not a basic type: %s", id) + return nil +} + +// ArrayType = "[" int_lit "]" Type . +// +func (p *parser) parseArrayType(parent *types.Package) types.Type { + // "[" already consumed and lookahead known not to be "]" + lit := p.expect(scanner.Int) + p.expect(']') + elem := p.parseType(parent) + n, err := strconv.ParseInt(lit, 10, 64) + if err != nil { + p.error(err) + } + return types.NewArray(elem, n) +} + +// MapType = "map" "[" Type "]" Type . +// +func (p *parser) parseMapType(parent *types.Package) types.Type { + p.expectKeyword("map") + p.expect('[') + key := p.parseType(parent) + p.expect(']') + elem := p.parseType(parent) + return types.NewMap(key, elem) +} + +// Name = identifier | "?" | QualifiedName . +// +// For unqualified and anonymous names, the returned package is the parent +// package unless parent == nil, in which case the returned package is the +// package being imported. (The parent package is not nil if the the name +// is an unqualified struct field or interface method name belonging to a +// type declared in another package.) +// +// For qualified names, the returned package is nil (and not created if +// it doesn't exist yet) unless materializePkg is set (which creates an +// unnamed package with valid package path). In the latter case, a +// subsequent import clause is expected to provide a name for the package. +// +func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) { + pkg = parent + if pkg == nil { + pkg = p.sharedPkgs[p.id] + } + switch p.tok { + case scanner.Ident: + name = p.lit + p.next() + case '?': + // anonymous + p.next() + case '@': + // exported name prefixed with package path + pkg = nil + var id string + id, name = p.parseQualifiedName() + if materializePkg { + pkg = p.getPkg(id, "") + } + default: + p.error("name expected") + } + return +} + +func deref(typ types.Type) types.Type { + if p, _ := typ.(*types.Pointer); p != nil { + return p.Elem() + } + return typ +} + +// Field = Name Type [ string_lit ] . +// +func (p *parser) parseField(parent *types.Package) (*types.Var, string) { + pkg, name := p.parseName(parent, true) + + if name == "_" { + // Blank fields should be package-qualified because they + // are unexported identifiers, but gc does not qualify them. + // Assuming that the ident belongs to the current package + // causes types to change during re-exporting, leading + // to spurious "can't assign A to B" errors from go/types. + // As a workaround, pretend all blank fields belong + // to the same unique dummy package. + const blankpkg = "<_>" + pkg = p.getPkg(blankpkg, blankpkg) + } + + typ := p.parseType(parent) + anonymous := false + if name == "" { + // anonymous field - typ must be T or *T and T must be a type name + switch typ := deref(typ).(type) { + case *types.Basic: // basic types are named types + pkg = nil // objects defined in Universe scope have no package + name = typ.Name() + case *types.Named: + name = typ.Obj().Name() + default: + p.errorf("anonymous field expected") + } + anonymous = true + } + tag := "" + if p.tok == scanner.String { + s := p.expect(scanner.String) + var err error + tag, err = strconv.Unquote(s) + if err != nil { + p.errorf("invalid struct tag %s: %s", s, err) + } + } + return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag +} + +// StructType = "struct" "{" [ FieldList ] "}" . +// FieldList = Field { ";" Field } . +// +func (p *parser) parseStructType(parent *types.Package) types.Type { + var fields []*types.Var + var tags []string + + p.expectKeyword("struct") + p.expect('{') + for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { + if i > 0 { + p.expect(';') + } + fld, tag := p.parseField(parent) + if tag != "" && tags == nil { + tags = make([]string, i) + } + if tags != nil { + tags = append(tags, tag) + } + fields = append(fields, fld) + } + p.expect('}') + + return types.NewStruct(fields, tags) +} + +// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] . +// +func (p *parser) parseParameter() (par *types.Var, isVariadic bool) { + _, name := p.parseName(nil, false) + // remove gc-specific parameter numbering + if i := strings.Index(name, "·"); i >= 0 { + name = name[:i] + } + if p.tok == '.' { + p.expectSpecial("...") + isVariadic = true + } + typ := p.parseType(nil) + if isVariadic { + typ = types.NewSlice(typ) + } + // ignore argument tag (e.g. "noescape") + if p.tok == scanner.String { + p.next() + } + // TODO(gri) should we provide a package? + par = types.NewVar(token.NoPos, nil, name, typ) + return +} + +// Parameters = "(" [ ParameterList ] ")" . +// ParameterList = { Parameter "," } Parameter . +// +func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) { + p.expect('(') + for p.tok != ')' && p.tok != scanner.EOF { + if len(list) > 0 { + p.expect(',') + } + par, variadic := p.parseParameter() + list = append(list, par) + if variadic { + if isVariadic { + p.error("... not on final argument") + } + isVariadic = true + } + } + p.expect(')') + + return +} + +// Signature = Parameters [ Result ] . +// Result = Type | Parameters . +// +func (p *parser) parseSignature(recv *types.Var) *types.Signature { + params, isVariadic := p.parseParameters() + + // optional result type + var results []*types.Var + if p.tok == '(' { + var variadic bool + results, variadic = p.parseParameters() + if variadic { + p.error("... not permitted on result type") + } + } + + return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic) +} + +// InterfaceType = "interface" "{" [ MethodList ] "}" . +// MethodList = Method { ";" Method } . +// Method = Name Signature . +// +// The methods of embedded interfaces are always "inlined" +// by the compiler and thus embedded interfaces are never +// visible in the export data. +// +func (p *parser) parseInterfaceType(parent *types.Package) types.Type { + var methods []*types.Func + + p.expectKeyword("interface") + p.expect('{') + for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { + if i > 0 { + p.expect(';') + } + pkg, name := p.parseName(parent, true) + sig := p.parseSignature(nil) + methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig)) + } + p.expect('}') + + // Complete requires the type's embedded interfaces to be fully defined, + // but we do not define any + return types.NewInterface(methods, nil).Complete() +} + +// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . +// +func (p *parser) parseChanType(parent *types.Package) types.Type { + dir := types.SendRecv + if p.tok == scanner.Ident { + p.expectKeyword("chan") + if p.tok == '<' { + p.expectSpecial("<-") + dir = types.SendOnly + } + } else { + p.expectSpecial("<-") + p.expectKeyword("chan") + dir = types.RecvOnly + } + elem := p.parseType(parent) + return types.NewChan(dir, elem) +} + +// Type = +// BasicType | TypeName | ArrayType | SliceType | StructType | +// PointerType | FuncType | InterfaceType | MapType | ChanType | +// "(" Type ")" . +// +// BasicType = ident . +// TypeName = ExportedName . +// SliceType = "[" "]" Type . +// PointerType = "*" Type . +// FuncType = "func" Signature . +// +func (p *parser) parseType(parent *types.Package) types.Type { + switch p.tok { + case scanner.Ident: + switch p.lit { + default: + return p.parseBasicType() + case "struct": + return p.parseStructType(parent) + case "func": + // FuncType + p.next() + return p.parseSignature(nil) + case "interface": + return p.parseInterfaceType(parent) + case "map": + return p.parseMapType(parent) + case "chan": + return p.parseChanType(parent) + } + case '@': + // TypeName + pkg, name := p.parseExportedName() + return declTypeName(pkg, name).Type() + case '[': + p.next() // look ahead + if p.tok == ']' { + // SliceType + p.next() + return types.NewSlice(p.parseType(parent)) + } + return p.parseArrayType(parent) + case '*': + // PointerType + p.next() + return types.NewPointer(p.parseType(parent)) + case '<': + return p.parseChanType(parent) + case '(': + // "(" Type ")" + p.next() + typ := p.parseType(parent) + p.expect(')') + return typ + } + p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit) + return nil +} + +// ---------------------------------------------------------------------------- +// Declarations + +// ImportDecl = "import" PackageName PackageId . +// +func (p *parser) parseImportDecl() { + p.expectKeyword("import") + name := p.parsePackageName() + p.getPkg(p.parsePackageId(), name) +} + +// int_lit = [ "+" | "-" ] { "0" ... "9" } . +// +func (p *parser) parseInt() string { + s := "" + switch p.tok { + case '-': + s = "-" + p.next() + case '+': + p.next() + } + return s + p.expect(scanner.Int) +} + +// number = int_lit [ "p" int_lit ] . +// +func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) { + // mantissa + mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0) + if mant == nil { + panic("invalid mantissa") + } + + if p.lit == "p" { + // exponent (base 2) + p.next() + exp, err := strconv.ParseInt(p.parseInt(), 10, 0) + if err != nil { + p.error(err) + } + if exp < 0 { + denom := constant.MakeInt64(1) + denom = constant.Shift(denom, token.SHL, uint(-exp)) + typ = types.Typ[types.UntypedFloat] + val = constant.BinaryOp(mant, token.QUO, denom) + return + } + if exp > 0 { + mant = constant.Shift(mant, token.SHL, uint(exp)) + } + typ = types.Typ[types.UntypedFloat] + val = mant + return + } + + typ = types.Typ[types.UntypedInt] + val = mant + return +} + +// ConstDecl = "const" ExportedName [ Type ] "=" Literal . +// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit . +// bool_lit = "true" | "false" . +// complex_lit = "(" float_lit "+" float_lit "i" ")" . +// rune_lit = "(" int_lit "+" int_lit ")" . +// string_lit = `"` { unicode_char } `"` . +// +func (p *parser) parseConstDecl() { + p.expectKeyword("const") + pkg, name := p.parseExportedName() + + var typ0 types.Type + if p.tok != '=' { + // constant types are never structured - no need for parent type + typ0 = p.parseType(nil) + } + + p.expect('=') + var typ types.Type + var val constant.Value + switch p.tok { + case scanner.Ident: + // bool_lit + if p.lit != "true" && p.lit != "false" { + p.error("expected true or false") + } + typ = types.Typ[types.UntypedBool] + val = constant.MakeBool(p.lit == "true") + p.next() + + case '-', scanner.Int: + // int_lit + typ, val = p.parseNumber() + + case '(': + // complex_lit or rune_lit + p.next() + if p.tok == scanner.Char { + p.next() + p.expect('+') + typ = types.Typ[types.UntypedRune] + _, val = p.parseNumber() + p.expect(')') + break + } + _, re := p.parseNumber() + p.expect('+') + _, im := p.parseNumber() + p.expectKeyword("i") + p.expect(')') + typ = types.Typ[types.UntypedComplex] + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + case scanner.Char: + // rune_lit + typ = types.Typ[types.UntypedRune] + val = constant.MakeFromLiteral(p.lit, token.CHAR, 0) + p.next() + + case scanner.String: + // string_lit + typ = types.Typ[types.UntypedString] + val = constant.MakeFromLiteral(p.lit, token.STRING, 0) + p.next() + + default: + p.errorf("expected literal got %s", scanner.TokenString(p.tok)) + } + + if typ0 == nil { + typ0 = typ + } + + pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val)) +} + +// TypeDecl = "type" ExportedName Type . +// +func (p *parser) parseTypeDecl() { + p.expectKeyword("type") + pkg, name := p.parseExportedName() + obj := declTypeName(pkg, name) + + // The type object may have been imported before and thus already + // have a type associated with it. We still need to parse the type + // structure, but throw it away if the object already has a type. + // This ensures that all imports refer to the same type object for + // a given type declaration. + typ := p.parseType(pkg) + + if name := obj.Type().(*types.Named); name.Underlying() == nil { + name.SetUnderlying(typ) + } +} + +// VarDecl = "var" ExportedName Type . +// +func (p *parser) parseVarDecl() { + p.expectKeyword("var") + pkg, name := p.parseExportedName() + typ := p.parseType(pkg) + pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ)) +} + +// Func = Signature [ Body ] . +// Body = "{" ... "}" . +// +func (p *parser) parseFunc(recv *types.Var) *types.Signature { + sig := p.parseSignature(recv) + if p.tok == '{' { + p.next() + for i := 1; i > 0; p.next() { + switch p.tok { + case '{': + i++ + case '}': + i-- + } + } + } + return sig +} + +// MethodDecl = "func" Receiver Name Func . +// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" . +// +func (p *parser) parseMethodDecl() { + // "func" already consumed + p.expect('(') + recv, _ := p.parseParameter() // receiver + p.expect(')') + + // determine receiver base type object + base := deref(recv.Type()).(*types.Named) + + // parse method name, signature, and possibly inlined body + _, name := p.parseName(nil, false) + sig := p.parseFunc(recv) + + // methods always belong to the same package as the base type object + pkg := base.Obj().Pkg() + + // add method to type unless type was imported before + // and method exists already + // TODO(gri) This leads to a quadratic algorithm - ok for now because method counts are small. + base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig)) +} + +// FuncDecl = "func" ExportedName Func . +// +func (p *parser) parseFuncDecl() { + // "func" already consumed + pkg, name := p.parseExportedName() + typ := p.parseFunc(nil) + pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ)) +} + +// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" . +// +func (p *parser) parseDecl() { + if p.tok == scanner.Ident { + switch p.lit { + case "import": + p.parseImportDecl() + case "const": + p.parseConstDecl() + case "type": + p.parseTypeDecl() + case "var": + p.parseVarDecl() + case "func": + p.next() // look ahead + if p.tok == '(' { + p.parseMethodDecl() + } else { + p.parseFuncDecl() + } + } + } + p.expect('\n') +} + +// ---------------------------------------------------------------------------- +// Export + +// Export = "PackageClause { Decl } "$$" . +// PackageClause = "package" PackageName [ "safe" ] "\n" . +// +func (p *parser) parseExport() *types.Package { + p.expectKeyword("package") + name := p.parsePackageName() + if p.tok == scanner.Ident && p.lit == "safe" { + // package was compiled with -u option - ignore + p.next() + } + p.expect('\n') + + pkg := p.getPkg(p.id, name) + + for p.tok != '$' && p.tok != scanner.EOF { + p.parseDecl() + } + + if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' { + // don't call next()/expect() since reading past the + // export data may cause scanner errors (e.g. NUL chars) + p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch) + } + + if n := p.scanner.ErrorCount; n != 0 { + p.errorf("expected no scanner errors, got %d", n) + } + + // Record all locally referenced packages as imports. + var imports []*types.Package + for id, pkg2 := range p.localPkgs { + if pkg2.Name() == "" { + p.errorf("%s package has no name", id) + } + if id == p.id { + continue // avoid self-edge + } + imports = append(imports, pkg2) + } + sort.Sort(byPath(imports)) + pkg.SetImports(imports) + + // package was imported completely and without errors + pkg.MarkComplete() + + return pkg +} + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go new file mode 100644 index 000000000..4be32a2e5 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go @@ -0,0 +1,739 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "go/ast" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "reflect" + "sort" +) + +// Current indexed export format version. Increase with each format change. +// 0: Go1.11 encoding +const iexportVersion = 0 + +// IExportData returns the binary export data for pkg. +// +// If no file set is provided, position info will be missing. +// The package path of the top-level package will not be recorded, +// so that calls to IImportData can override with a provided package path. +func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + + p := iexporter{ + out: bytes.NewBuffer(nil), + fset: fset, + allPkgs: map[*types.Package]bool{}, + stringIndex: map[string]uint64{}, + declIndex: map[types.Object]uint64{}, + typIndex: map[types.Type]uint64{}, + localpkg: pkg, + } + + for i, pt := range predeclared() { + p.typIndex[pt] = uint64(i) + } + if len(p.typIndex) > predeclReserved { + panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)) + } + + // Initialize work queue with exported declarations. + scope := pkg.Scope() + for _, name := range scope.Names() { + if ast.IsExported(name) { + p.pushDecl(scope.Lookup(name)) + } + } + + // Loop until no more work. + for !p.declTodo.empty() { + p.doDecl(p.declTodo.popHead()) + } + + // Append indices to data0 section. + dataLen := uint64(p.data0.Len()) + w := p.newWriter() + w.writeIndex(p.declIndex) + w.flush() + + // Assemble header. + var hdr intWriter + hdr.WriteByte('i') + hdr.uint64(iexportVersion) + hdr.uint64(uint64(p.strings.Len())) + hdr.uint64(dataLen) + + // Flush output. + io.Copy(p.out, &hdr) + io.Copy(p.out, &p.strings) + io.Copy(p.out, &p.data0) + + return p.out.Bytes(), nil +} + +// writeIndex writes out an object index. mainIndex indicates whether +// we're writing out the main index, which is also read by +// non-compiler tools and includes a complete package description +// (i.e., name and height). +func (w *exportWriter) writeIndex(index map[types.Object]uint64) { + // Build a map from packages to objects from that package. + pkgObjs := map[*types.Package][]types.Object{} + + // For the main index, make sure to include every package that + // we reference, even if we're not exporting (or reexporting) + // any symbols from it. + pkgObjs[w.p.localpkg] = nil + for pkg := range w.p.allPkgs { + pkgObjs[pkg] = nil + } + + for obj := range index { + pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], obj) + } + + var pkgs []*types.Package + for pkg, objs := range pkgObjs { + pkgs = append(pkgs, pkg) + + sort.Slice(objs, func(i, j int) bool { + return objs[i].Name() < objs[j].Name() + }) + } + + sort.Slice(pkgs, func(i, j int) bool { + return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j]) + }) + + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.string(w.exportPath(pkg)) + w.string(pkg.Name()) + w.uint64(uint64(0)) // package height is not needed for go/types + + objs := pkgObjs[pkg] + w.uint64(uint64(len(objs))) + for _, obj := range objs { + w.string(obj.Name()) + w.uint64(index[obj]) + } + } +} + +type iexporter struct { + fset *token.FileSet + out *bytes.Buffer + + localpkg *types.Package + + // allPkgs tracks all packages that have been referenced by + // the export data, so we can ensure to include them in the + // main index. + allPkgs map[*types.Package]bool + + declTodo objQueue + + strings intWriter + stringIndex map[string]uint64 + + data0 intWriter + declIndex map[types.Object]uint64 + typIndex map[types.Type]uint64 +} + +// stringOff returns the offset of s within the string section. +// If not already present, it's added to the end. +func (p *iexporter) stringOff(s string) uint64 { + off, ok := p.stringIndex[s] + if !ok { + off = uint64(p.strings.Len()) + p.stringIndex[s] = off + + p.strings.uint64(uint64(len(s))) + p.strings.WriteString(s) + } + return off +} + +// pushDecl adds n to the declaration work queue, if not already present. +func (p *iexporter) pushDecl(obj types.Object) { + // Package unsafe is known to the compiler and predeclared. + assert(obj.Pkg() != types.Unsafe) + + if _, ok := p.declIndex[obj]; ok { + return + } + + p.declIndex[obj] = ^uint64(0) // mark n present in work queue + p.declTodo.pushTail(obj) +} + +// exportWriter handles writing out individual data section chunks. +type exportWriter struct { + p *iexporter + + data intWriter + currPkg *types.Package + prevFile string + prevLine int64 +} + +func (w *exportWriter) exportPath(pkg *types.Package) string { + if pkg == w.p.localpkg { + return "" + } + return pkg.Path() +} + +func (p *iexporter) doDecl(obj types.Object) { + w := p.newWriter() + w.setPkg(obj.Pkg(), false) + + switch obj := obj.(type) { + case *types.Var: + w.tag('V') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + + case *types.Func: + sig, _ := obj.Type().(*types.Signature) + if sig.Recv() != nil { + panic(internalErrorf("unexpected method: %v", sig)) + } + w.tag('F') + w.pos(obj.Pos()) + w.signature(sig) + + case *types.Const: + w.tag('C') + w.pos(obj.Pos()) + w.value(obj.Type(), obj.Val()) + + case *types.TypeName: + if obj.IsAlias() { + w.tag('A') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + break + } + + // Defined type. + w.tag('T') + w.pos(obj.Pos()) + + underlying := obj.Type().Underlying() + w.typ(underlying, obj.Pkg()) + + t := obj.Type() + if types.IsInterface(t) { + break + } + + named, ok := t.(*types.Named) + if !ok { + panic(internalErrorf("%s is not a defined type", t)) + } + + n := named.NumMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := named.Method(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.param(sig.Recv()) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected object: %v", obj)) + } + + p.declIndex[obj] = w.flush() +} + +func (w *exportWriter) tag(tag byte) { + w.data.WriteByte(tag) +} + +func (w *exportWriter) pos(pos token.Pos) { + if w.p.fset == nil { + w.int64(0) + return + } + + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + + // When file is the same as the last position (common case), + // we can save a few bytes by delta encoding just the line + // number. + // + // Note: Because data objects may be read out of order (or not + // at all), we can only apply delta encoding within a single + // object. This is handled implicitly by tracking prevFile and + // prevLine as fields of exportWriter. + + if file == w.prevFile { + delta := line - w.prevLine + w.int64(delta) + if delta == deltaNewFile { + w.int64(-1) + } + } else { + w.int64(deltaNewFile) + w.int64(line) // line >= 0 + w.string(file) + w.prevFile = file + } + w.prevLine = line +} + +func (w *exportWriter) pkg(pkg *types.Package) { + // Ensure any referenced packages are declared in the main index. + w.p.allPkgs[pkg] = true + + w.string(w.exportPath(pkg)) +} + +func (w *exportWriter) qualifiedIdent(obj types.Object) { + // Ensure any referenced declarations are written out too. + w.p.pushDecl(obj) + + w.string(obj.Name()) + w.pkg(obj.Pkg()) +} + +func (w *exportWriter) typ(t types.Type, pkg *types.Package) { + w.data.uint64(w.p.typOff(t, pkg)) +} + +func (p *iexporter) newWriter() *exportWriter { + return &exportWriter{p: p} +} + +func (w *exportWriter) flush() uint64 { + off := uint64(w.p.data0.Len()) + io.Copy(&w.p.data0, &w.data) + return off +} + +func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 { + off, ok := p.typIndex[t] + if !ok { + w := p.newWriter() + w.doTyp(t, pkg) + off = predeclReserved + w.flush() + p.typIndex[t] = off + } + return off +} + +func (w *exportWriter) startType(k itag) { + w.data.uint64(uint64(k)) +} + +func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { + switch t := t.(type) { + case *types.Named: + w.startType(definedType) + w.qualifiedIdent(t.Obj()) + + case *types.Pointer: + w.startType(pointerType) + w.typ(t.Elem(), pkg) + + case *types.Slice: + w.startType(sliceType) + w.typ(t.Elem(), pkg) + + case *types.Array: + w.startType(arrayType) + w.uint64(uint64(t.Len())) + w.typ(t.Elem(), pkg) + + case *types.Chan: + w.startType(chanType) + // 1 RecvOnly; 2 SendOnly; 3 SendRecv + var dir uint64 + switch t.Dir() { + case types.RecvOnly: + dir = 1 + case types.SendOnly: + dir = 2 + case types.SendRecv: + dir = 3 + } + w.uint64(dir) + w.typ(t.Elem(), pkg) + + case *types.Map: + w.startType(mapType) + w.typ(t.Key(), pkg) + w.typ(t.Elem(), pkg) + + case *types.Signature: + w.startType(signatureType) + w.setPkg(pkg, true) + w.signature(t) + + case *types.Struct: + w.startType(structType) + w.setPkg(pkg, true) + + n := t.NumFields() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + f := t.Field(i) + w.pos(f.Pos()) + w.string(f.Name()) + w.typ(f.Type(), pkg) + w.bool(f.Anonymous()) + w.string(t.Tag(i)) // note (or tag) + } + + case *types.Interface: + w.startType(interfaceType) + w.setPkg(pkg, true) + + n := t.NumEmbeddeds() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + f := t.Embedded(i) + w.pos(f.Obj().Pos()) + w.typ(f.Obj().Type(), f.Obj().Pkg()) + } + + n = t.NumExplicitMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := t.ExplicitMethod(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t))) + } +} + +func (w *exportWriter) setPkg(pkg *types.Package, write bool) { + if write { + w.pkg(pkg) + } + + w.currPkg = pkg +} + +func (w *exportWriter) signature(sig *types.Signature) { + w.paramList(sig.Params()) + w.paramList(sig.Results()) + if sig.Params().Len() > 0 { + w.bool(sig.Variadic()) + } +} + +func (w *exportWriter) paramList(tup *types.Tuple) { + n := tup.Len() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + w.param(tup.At(i)) + } +} + +func (w *exportWriter) param(obj types.Object) { + w.pos(obj.Pos()) + w.localIdent(obj) + w.typ(obj.Type(), obj.Pkg()) +} + +func (w *exportWriter) value(typ types.Type, v constant.Value) { + w.typ(typ, nil) + + switch v.Kind() { + case constant.Bool: + w.bool(constant.BoolVal(v)) + case constant.Int: + var i big.Int + if i64, exact := constant.Int64Val(v); exact { + i.SetInt64(i64) + } else if ui64, exact := constant.Uint64Val(v); exact { + i.SetUint64(ui64) + } else { + i.SetString(v.ExactString(), 10) + } + w.mpint(&i, typ) + case constant.Float: + f := constantToFloat(v) + w.mpfloat(f, typ) + case constant.Complex: + w.mpfloat(constantToFloat(constant.Real(v)), typ) + w.mpfloat(constantToFloat(constant.Imag(v)), typ) + case constant.String: + w.string(constant.StringVal(v)) + case constant.Unknown: + // package contains type errors + default: + panic(internalErrorf("unexpected value %v (%T)", v, v)) + } +} + +// constantToFloat converts a constant.Value with kind constant.Float to a +// big.Float. +func constantToFloat(x constant.Value) *big.Float { + assert(x.Kind() == constant.Float) + // Use the same floating-point precision (512) as cmd/compile + // (see Mpprec in cmd/compile/internal/gc/mpfloat.go). + const mpprec = 512 + var f big.Float + f.SetPrec(mpprec) + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + n := valueToRat(num) + d := valueToRat(denom) + f.SetRat(n.Quo(n, d)) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + _, ok := f.SetString(x.ExactString()) + assert(ok) + } + return &f +} + +// mpint exports a multi-precision integer. +// +// For unsigned types, small values are written out as a single +// byte. Larger values are written out as a length-prefixed big-endian +// byte string, where the length prefix is encoded as its complement. +// For example, bytes 0, 1, and 2 directly represent the integer +// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-, +// 2-, and 3-byte big-endian string follow. +// +// Encoding for signed types use the same general approach as for +// unsigned types, except small values use zig-zag encoding and the +// bottom bit of length prefix byte for large values is reserved as a +// sign bit. +// +// The exact boundary between small and large encodings varies +// according to the maximum number of bytes needed to encode a value +// of type typ. As a special case, 8-bit types are always encoded as a +// single byte. +// +// TODO(mdempsky): Is this level of complexity really worthwhile? +func (w *exportWriter) mpint(x *big.Int, typ types.Type) { + basic, ok := typ.Underlying().(*types.Basic) + if !ok { + panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying())) + } + + signed, maxBytes := intSize(basic) + + negative := x.Sign() < 0 + if !signed && negative { + panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x)) + } + + b := x.Bytes() + if len(b) > 0 && b[0] == 0 { + panic(internalErrorf("leading zeros")) + } + if uint(len(b)) > maxBytes { + panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)) + } + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + // Check if x can use small value encoding. + if len(b) <= 1 { + var ux uint + if len(b) == 1 { + ux = uint(b[0]) + } + if signed { + ux <<= 1 + if negative { + ux-- + } + } + if ux < maxSmall { + w.data.WriteByte(byte(ux)) + return + } + } + + n := 256 - uint(len(b)) + if signed { + n = 256 - 2*uint(len(b)) + if negative { + n |= 1 + } + } + if n < maxSmall || n >= 256 { + panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)) + } + + w.data.WriteByte(byte(n)) + w.data.Write(b) +} + +// mpfloat exports a multi-precision floating point number. +// +// The number's value is decomposed into mantissa × 2**exponent, where +// mantissa is an integer. The value is written out as mantissa (as a +// multi-precision integer) and then the exponent, except exponent is +// omitted if mantissa is zero. +func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) { + if f.IsInf() { + panic("infinite constant") + } + + // Break into f = mant × 2**exp, with 0.5 <= mant < 1. + var mant big.Float + exp := int64(f.MantExp(&mant)) + + // Scale so that mant is an integer. + prec := mant.MinPrec() + mant.SetMantExp(&mant, int(prec)) + exp -= int64(prec) + + manti, acc := mant.Int(nil) + if acc != big.Exact { + panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc)) + } + w.mpint(manti, typ) + if manti.Sign() != 0 { + w.int64(exp) + } +} + +func (w *exportWriter) bool(b bool) bool { + var x uint64 + if b { + x = 1 + } + w.uint64(x) + return b +} + +func (w *exportWriter) int64(x int64) { w.data.int64(x) } +func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) } +func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } + +func (w *exportWriter) localIdent(obj types.Object) { + // Anonymous parameters. + if obj == nil { + w.string("") + return + } + + name := obj.Name() + if name == "_" { + w.string("_") + return + } + + w.string(name) +} + +type intWriter struct { + bytes.Buffer +} + +func (w *intWriter) int64(x int64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutVarint(buf[:], x) + w.Write(buf[:n]) +} + +func (w *intWriter) uint64(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + w.Write(buf[:n]) +} + +func assert(cond bool) { + if !cond { + panic("internal error: assertion failed") + } +} + +// The below is copied from go/src/cmd/compile/internal/gc/syntax.go. + +// objQueue is a FIFO queue of types.Object. The zero value of objQueue is +// a ready-to-use empty queue. +type objQueue struct { + ring []types.Object + head, tail int +} + +// empty returns true if q contains no Nodes. +func (q *objQueue) empty() bool { + return q.head == q.tail +} + +// pushTail appends n to the tail of the queue. +func (q *objQueue) pushTail(obj types.Object) { + if len(q.ring) == 0 { + q.ring = make([]types.Object, 16) + } else if q.head+len(q.ring) == q.tail { + // Grow the ring. + nring := make([]types.Object, len(q.ring)*2) + // Copy the old elements. + part := q.ring[q.head%len(q.ring):] + if q.tail-q.head <= len(part) { + part = part[:q.tail-q.head] + copy(nring, part) + } else { + pos := copy(nring, part) + copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) + } + q.ring, q.head, q.tail = nring, 0, q.tail-q.head + } + + q.ring[q.tail%len(q.ring)] = obj + q.tail++ +} + +// popHead pops a node from the head of the queue. It panics if q is empty. +func (q *objQueue) popHead() types.Object { + if q.empty() { + panic("dequeue empty") + } + obj := q.ring[q.head%len(q.ring)] + q.head++ + return obj +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go new file mode 100644 index 000000000..a31a88026 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go @@ -0,0 +1,630 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed package import. +// See cmd/compile/internal/gc/iexport.go for the export data format. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "sort" +) + +type intReader struct { + *bytes.Reader + path string +} + +func (r *intReader) int64() int64 { + i, err := binary.ReadVarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +func (r *intReader) uint64() uint64 { + i, err := binary.ReadUvarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +const predeclReserved = 32 + +type itag uint64 + +const ( + // Types + definedType itag = iota + pointerType + sliceType + arrayType + chanType + mapType + signatureType + structType + interfaceType +) + +// IImportData imports a package from the serialized package data +// and returns the number of bytes consumed and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + const currentVersion = 1 + version := int64(-1) + defer func() { + if e := recover(); e != nil { + if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + + r := &intReader{bytes.NewReader(data), path} + + version = int64(r.uint64()) + switch version { + case currentVersion, 0: + default: + errorf("unknown iexport format version %d", version) + } + + sLen := int64(r.uint64()) + dLen := int64(r.uint64()) + + whence, _ := r.Seek(0, io.SeekCurrent) + stringData := data[whence : whence+sLen] + declData := data[whence+sLen : whence+sLen+dLen] + r.Seek(sLen+dLen, io.SeekCurrent) + + p := iimporter{ + ipath: path, + version: int(version), + + stringData: stringData, + stringCache: make(map[uint64]string), + pkgCache: make(map[uint64]*types.Package), + + declData: declData, + pkgIndex: make(map[*types.Package]map[string]uint64), + typCache: make(map[uint64]types.Type), + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*token.File), + }, + } + + for i, pt := range predeclared() { + p.typCache[uint64(i)] = pt + } + + pkgList := make([]*types.Package, r.uint64()) + for i := range pkgList { + pkgPathOff := r.uint64() + pkgPath := p.stringAt(pkgPathOff) + pkgName := p.stringAt(r.uint64()) + _ = r.uint64() // package height; unused by go/types + + if pkgPath == "" { + pkgPath = path + } + pkg := imports[pkgPath] + if pkg == nil { + pkg = types.NewPackage(pkgPath, pkgName) + imports[pkgPath] = pkg + } else if pkg.Name() != pkgName { + errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) + } + + p.pkgCache[pkgPathOff] = pkg + + nameIndex := make(map[string]uint64) + for nSyms := r.uint64(); nSyms > 0; nSyms-- { + name := p.stringAt(r.uint64()) + nameIndex[name] = r.uint64() + } + + p.pkgIndex[pkg] = nameIndex + pkgList[i] = pkg + } + if len(pkgList) == 0 { + errorf("no packages found for %s", path) + panic("unreachable") + } + p.ipkg = pkgList[0] + names := make([]string, 0, len(p.pkgIndex[p.ipkg])) + for name := range p.pkgIndex[p.ipkg] { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + p.doDecl(p.ipkg, name) + } + + for _, typ := range p.interfaceList { + typ.Complete() + } + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), pkgList[1:]...) + sort.Sort(byPath(list)) + p.ipkg.SetImports(list) + + // package was imported completely and without errors + p.ipkg.MarkComplete() + + consumed, _ := r.Seek(0, io.SeekCurrent) + return int(consumed), p.ipkg, nil +} + +type iimporter struct { + ipath string + ipkg *types.Package + version int + + stringData []byte + stringCache map[uint64]string + pkgCache map[uint64]*types.Package + + declData []byte + pkgIndex map[*types.Package]map[string]uint64 + typCache map[uint64]types.Type + + fake fakeFileSet + interfaceList []*types.Interface +} + +func (p *iimporter) doDecl(pkg *types.Package, name string) { + // See if we've already imported this declaration. + if obj := pkg.Scope().Lookup(name); obj != nil { + return + } + + off, ok := p.pkgIndex[pkg][name] + if !ok { + errorf("%v.%v not in index", pkg, name) + } + + r := &importReader{p: p, currPkg: pkg} + r.declReader.Reset(p.declData[off:]) + + r.obj(name) +} + +func (p *iimporter) stringAt(off uint64) string { + if s, ok := p.stringCache[off]; ok { + return s + } + + slen, n := binary.Uvarint(p.stringData[off:]) + if n <= 0 { + errorf("varint failed") + } + spos := off + uint64(n) + s := string(p.stringData[spos : spos+slen]) + p.stringCache[off] = s + return s +} + +func (p *iimporter) pkgAt(off uint64) *types.Package { + if pkg, ok := p.pkgCache[off]; ok { + return pkg + } + path := p.stringAt(off) + if path == p.ipath { + return p.ipkg + } + errorf("missing package %q in %q", path, p.ipath) + return nil +} + +func (p *iimporter) typAt(off uint64, base *types.Named) types.Type { + if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) { + return t + } + + if off < predeclReserved { + errorf("predeclared type missing from cache: %v", off) + } + + r := &importReader{p: p} + r.declReader.Reset(p.declData[off-predeclReserved:]) + t := r.doType(base) + + if base == nil || !isInterface(t) { + p.typCache[off] = t + } + return t +} + +type importReader struct { + p *iimporter + declReader bytes.Reader + currPkg *types.Package + prevFile string + prevLine int64 + prevColumn int64 +} + +func (r *importReader) obj(name string) { + tag := r.byte() + pos := r.pos() + + switch tag { + case 'A': + typ := r.typ() + + r.declare(types.NewTypeName(pos, r.currPkg, name, typ)) + + case 'C': + typ, val := r.value() + + r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) + + case 'F': + sig := r.signature(nil) + + r.declare(types.NewFunc(pos, r.currPkg, name, sig)) + + case 'T': + // Types can be recursive. We need to setup a stub + // declaration before recursing. + obj := types.NewTypeName(pos, r.currPkg, name, nil) + named := types.NewNamed(obj, nil, nil) + r.declare(obj) + + underlying := r.p.typAt(r.uint64(), named).Underlying() + named.SetUnderlying(underlying) + + if !isInterface(underlying) { + for n := r.uint64(); n > 0; n-- { + mpos := r.pos() + mname := r.ident() + recv := r.param() + msig := r.signature(recv) + + named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) + } + } + + case 'V': + typ := r.typ() + + r.declare(types.NewVar(pos, r.currPkg, name, typ)) + + default: + errorf("unexpected tag: %v", tag) + } +} + +func (r *importReader) declare(obj types.Object) { + obj.Pkg().Scope().Insert(obj) +} + +func (r *importReader) value() (typ types.Type, val constant.Value) { + typ = r.typ() + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + val = constant.MakeBool(r.bool()) + + case types.IsString: + val = constant.MakeString(r.string()) + + case types.IsInteger: + val = r.mpint(b) + + case types.IsFloat: + val = r.mpfloat(b) + + case types.IsComplex: + re := r.mpfloat(b) + im := r.mpfloat(b) + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + default: + if b.Kind() == types.Invalid { + val = constant.MakeUnknown() + return + } + errorf("unexpected type %v", typ) // panics + panic("unreachable") + } + + return +} + +func intSize(b *types.Basic) (signed bool, maxBytes uint) { + if (b.Info() & types.IsUntyped) != 0 { + return true, 64 + } + + switch b.Kind() { + case types.Float32, types.Complex64: + return true, 3 + case types.Float64, types.Complex128: + return true, 7 + } + + signed = (b.Info() & types.IsUnsigned) == 0 + switch b.Kind() { + case types.Int8, types.Uint8: + maxBytes = 1 + case types.Int16, types.Uint16: + maxBytes = 2 + case types.Int32, types.Uint32: + maxBytes = 4 + default: + maxBytes = 8 + } + + return +} + +func (r *importReader) mpint(b *types.Basic) constant.Value { + signed, maxBytes := intSize(b) + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + n, _ := r.declReader.ReadByte() + if uint(n) < maxSmall { + v := int64(n) + if signed { + v >>= 1 + if n&1 != 0 { + v = ^v + } + } + return constant.MakeInt64(v) + } + + v := -n + if signed { + v = -(n &^ 1) >> 1 + } + if v < 1 || uint(v) > maxBytes { + errorf("weird decoding: %v, %v => %v", n, signed, v) + } + + buf := make([]byte, v) + io.ReadFull(&r.declReader, buf) + + // convert to little endian + // TODO(gri) go/constant should have a more direct conversion function + // (e.g., once it supports a big.Float based implementation) + for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 { + buf[i], buf[j] = buf[j], buf[i] + } + + x := constant.MakeFromBytes(buf) + if signed && n&1 != 0 { + x = constant.UnaryOp(token.SUB, x, 0) + } + return x +} + +func (r *importReader) mpfloat(b *types.Basic) constant.Value { + x := r.mpint(b) + if constant.Sign(x) == 0 { + return x + } + + exp := r.int64() + switch { + case exp > 0: + x = constant.Shift(x, token.SHL, uint(exp)) + case exp < 0: + d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) + x = constant.BinaryOp(x, token.QUO, d) + } + return x +} + +func (r *importReader) ident() string { + return r.string() +} + +func (r *importReader) qualifiedIdent() (*types.Package, string) { + name := r.string() + pkg := r.pkg() + return pkg, name +} + +func (r *importReader) pos() token.Pos { + if r.p.version >= 1 { + r.posv1() + } else { + r.posv0() + } + + if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 { + return token.NoPos + } + return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn)) +} + +func (r *importReader) posv0() { + delta := r.int64() + if delta != deltaNewFile { + r.prevLine += delta + } else if l := r.int64(); l == -1 { + r.prevLine += deltaNewFile + } else { + r.prevFile = r.string() + r.prevLine = l + } +} + +func (r *importReader) posv1() { + delta := r.int64() + r.prevColumn += delta >> 1 + if delta&1 != 0 { + delta = r.int64() + r.prevLine += delta >> 1 + if delta&1 != 0 { + r.prevFile = r.string() + } + } +} + +func (r *importReader) typ() types.Type { + return r.p.typAt(r.uint64(), nil) +} + +func isInterface(t types.Type) bool { + _, ok := t.(*types.Interface) + return ok +} + +func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) } +func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } + +func (r *importReader) doType(base *types.Named) types.Type { + switch k := r.kind(); k { + default: + errorf("unexpected kind tag in %q: %v", r.p.ipath, k) + return nil + + case definedType: + pkg, name := r.qualifiedIdent() + r.p.doDecl(pkg, name) + return pkg.Scope().Lookup(name).(*types.TypeName).Type() + case pointerType: + return types.NewPointer(r.typ()) + case sliceType: + return types.NewSlice(r.typ()) + case arrayType: + n := r.uint64() + return types.NewArray(r.typ(), int64(n)) + case chanType: + dir := chanDir(int(r.uint64())) + return types.NewChan(dir, r.typ()) + case mapType: + return types.NewMap(r.typ(), r.typ()) + case signatureType: + r.currPkg = r.pkg() + return r.signature(nil) + + case structType: + r.currPkg = r.pkg() + + fields := make([]*types.Var, r.uint64()) + tags := make([]string, len(fields)) + for i := range fields { + fpos := r.pos() + fname := r.ident() + ftyp := r.typ() + emb := r.bool() + tag := r.string() + + fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + tags[i] = tag + } + return types.NewStruct(fields, tags) + + case interfaceType: + r.currPkg = r.pkg() + + embeddeds := make([]types.Type, r.uint64()) + for i := range embeddeds { + _ = r.pos() + embeddeds[i] = r.typ() + } + + methods := make([]*types.Func, r.uint64()) + for i := range methods { + mpos := r.pos() + mname := r.ident() + + // TODO(mdempsky): Matches bimport.go, but I + // don't agree with this. + var recv *types.Var + if base != nil { + recv = types.NewVar(token.NoPos, r.currPkg, "", base) + } + + msig := r.signature(recv) + methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig) + } + + typ := newInterface(methods, embeddeds) + r.p.interfaceList = append(r.p.interfaceList, typ) + return typ + } +} + +func (r *importReader) kind() itag { + return itag(r.uint64()) +} + +func (r *importReader) signature(recv *types.Var) *types.Signature { + params := r.paramList() + results := r.paramList() + variadic := params.Len() > 0 && r.bool() + return types.NewSignature(recv, params, results, variadic) +} + +func (r *importReader) paramList() *types.Tuple { + xs := make([]*types.Var, r.uint64()) + for i := range xs { + xs[i] = r.param() + } + return types.NewTuple(xs...) +} + +func (r *importReader) param() *types.Var { + pos := r.pos() + name := r.ident() + typ := r.typ() + return types.NewParam(pos, r.currPkg, name, typ) +} + +func (r *importReader) bool() bool { + return r.uint64() != 0 +} + +func (r *importReader) int64() int64 { + n, err := binary.ReadVarint(&r.declReader) + if err != nil { + errorf("readVarint: %v", err) + } + return n +} + +func (r *importReader) uint64() uint64 { + n, err := binary.ReadUvarint(&r.declReader) + if err != nil { + errorf("readUvarint: %v", err) + } + return n +} + +func (r *importReader) byte() byte { + x, err := r.declReader.ReadByte() + if err != nil { + errorf("declReader.ReadByte: %v", err) + } + return x +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go new file mode 100644 index 000000000..463f25227 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go @@ -0,0 +1,21 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + named := make([]*types.Named, len(embeddeds)) + for i, e := range embeddeds { + var ok bool + named[i], ok = e.(*types.Named) + if !ok { + panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") + } + } + return types.NewInterface(methods, named) +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go new file mode 100644 index 000000000..ab28b95cb --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + return types.NewInterfaceType(methods, embeddeds) +} diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go new file mode 100644 index 000000000..db0c9a7ea --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -0,0 +1,174 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagesdriver fetches type sizes for go/packages and go/analysis. +package packagesdriver + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "go/types" + "log" + "os" + "os/exec" + "strings" + "time" +) + +var debug = false + +// GetSizes returns the sizes used by the underlying driver with the given parameters. +func GetSizes(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) { + // TODO(matloob): Clean this up. This code is mostly a copy of packages.findExternalDriver. + const toolPrefix = "GOPACKAGESDRIVER=" + tool := "" + for _, env := range env { + if val := strings.TrimPrefix(env, toolPrefix); val != env { + tool = val + } + } + + if tool == "" { + var err error + tool, err = exec.LookPath("gopackagesdriver") + if err != nil { + // We did not find the driver, so use "go list". + tool = "off" + } + } + + if tool == "off" { + return GetSizesGolist(ctx, buildFlags, env, dir, usesExportData) + } + + req, err := json.Marshal(struct { + Command string `json:"command"` + Env []string `json:"env"` + BuildFlags []string `json:"build_flags"` + }{ + Command: "sizes", + Env: env, + BuildFlags: buildFlags, + }) + if err != nil { + return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) + } + + buf := new(bytes.Buffer) + cmd := exec.CommandContext(ctx, tool) + cmd.Dir = dir + cmd.Env = env + cmd.Stdin = bytes.NewReader(req) + cmd.Stdout = buf + cmd.Stderr = new(bytes.Buffer) + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) + } + var response struct { + // Sizes, if not nil, is the types.Sizes to use when type checking. + Sizes *types.StdSizes + } + if err := json.Unmarshal(buf.Bytes(), &response); err != nil { + return nil, err + } + return response.Sizes, nil +} + +func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) { + args := []string{"list", "-f", "{{context.GOARCH}} {{context.Compiler}}"} + args = append(args, buildFlags...) + args = append(args, "--", "unsafe") + stdout, stderr, err := invokeGo(ctx, env, dir, usesExportData, args...) + var goarch, compiler string + if err != nil { + if strings.Contains(err.Error(), "cannot find main module") { + // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. + // TODO(matloob): Is this a problem in practice? + envout, _, enverr := invokeGo(ctx, env, dir, usesExportData, "env", "GOARCH") + if enverr != nil { + return nil, err + } + goarch = strings.TrimSpace(envout.String()) + compiler = "gc" + } else { + return nil, err + } + } else { + fields := strings.Fields(stdout.String()) + if len(fields) < 2 { + return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \" \" from stdout of go command:\n%s\ndir: %s\nstdout: <<%s>>\nstderr: <<%s>>", + cmdDebugStr(env, args...), dir, stdout.String(), stderr.String()) + } + goarch = fields[0] + compiler = fields[1] + } + return types.SizesFor(compiler, goarch), nil +} + +// invokeGo returns the stdout and stderr of a go command invocation. +func invokeGo(ctx context.Context, env []string, dir string, usesExportData bool, args ...string) (*bytes.Buffer, *bytes.Buffer, error) { + if debug { + defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(env, args...)) }(time.Now()) + } + stdout := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(ctx, "go", args...) + // On darwin the cwd gets resolved to the real path, which breaks anything that + // expects the working directory to keep the original path, including the + // go command when dealing with modules. + // The Go stdlib has a special feature where if the cwd and the PWD are the + // same node then it trusts the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go command. + cmd.Env = append(append([]string{}, env...), "PWD="+dir) + cmd.Dir = dir + cmd.Stdout = stdout + cmd.Stderr = stderr + if err := cmd.Run(); err != nil { + exitErr, ok := err.(*exec.ExitError) + if !ok { + // Catastrophic error: + // - executable not found + // - context cancellation + return nil, nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err) + } + + // Export mode entails a build. + // If that build fails, errors appear on stderr + // (despite the -e flag) and the Export field is blank. + // Do not fail in that case. + if !usesExportData { + return nil, nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr) + } + } + + // As of writing, go list -export prints some non-fatal compilation + // errors to stderr, even with -e set. We would prefer that it put + // them in the Package.Error JSON (see https://golang.org/issue/26319). + // In the meantime, there's nowhere good to put them, but they can + // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS + // is set. + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(env, args...), stderr) + } + + // debugging + if false { + fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(env, args...), stdout) + } + + return stdout, stderr, nil +} + +func cmdDebugStr(envlist []string, args ...string) string { + env := make(map[string]string) + for _, kv := range envlist { + split := strings.Split(kv, "=") + k, v := split[0], split[1] + env[k] = v + } + + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], args) +} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go new file mode 100644 index 000000000..4bfe28a51 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -0,0 +1,221 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package packages loads Go packages for inspection and analysis. + +The Load function takes as input a list of patterns and return a list of Package +structs describing individual packages matched by those patterns. +The LoadMode controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool, +but all patterns with the prefix "query=", where query is a +non-empty string of letters from [a-z], are reserved and may be +interpreted as query operators. + +Two query operators are currently supported: "file" and "pattern". + +The query "file=path/to/file.go" matches the package or packages enclosing +the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go" +might return the packages "fmt" and "fmt [fmt.test]". + +The query "pattern=string" causes "string" to be passed directly to +the underlying build tool. In most cases this is unnecessary, +but an application can use Load("pattern=" + x) as an escaping mechanism +to ensure that x is not interpreted as a query operator if it contains '='. + +All other query operators are reserved for future use and currently +cause Load to report an error. + +The Package struct provides basic information about the package, including + + - ID, a unique identifier for the package in the returned set; + - GoFiles, the names of the package's Go source files; + - Imports, a map from source import strings to the Packages they name; + - Types, the type information for the package's exported symbols; + - Syntax, the parsed syntax trees for the package's source code; and + - TypeInfo, the result of a complete type-check of the package syntax trees. + +(See the documentation for type Package for the complete list of fields +and more detailed descriptions.) + +For example, + + Load(nil, "bytes", "unicode...") + +returns four Package structs describing the standard library packages +bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern +can match multiple packages and that a package might be matched by +multiple patterns: in general it is not possible to determine which +packages correspond to which patterns. + +Note that the list returned by Load contains only the packages matched +by the patterns. Their dependencies can be found by walking the import +graph using the Imports fields. + +The Load function can be configured by passing a pointer to a Config as +the first argument. A nil Config is equivalent to the zero Config, which +causes Load to run in LoadFiles mode, collecting minimal information. +See the documentation for type Config for details. + +As noted earlier, the Config.Mode controls the amount of detail +reported about the loaded packages. See the documentation for type LoadMode +for details. + +Most tools should pass their command-line arguments (after any flags) +uninterpreted to the loader, so that the loader can interpret them +according to the conventions of the underlying build system. +See the Example function for typical usage. + +*/ +package packages // import "golang.org/x/tools/go/packages" + +/* + +Motivation and design considerations + +The new package's design solves problems addressed by two existing +packages: go/build, which locates and describes packages, and +golang.org/x/tools/go/loader, which loads, parses and type-checks them. +The go/build.Package structure encodes too much of the 'go build' way +of organizing projects, leaving us in need of a data type that describes a +package of Go source code independent of the underlying build system. +We wanted something that works equally well with go build and vgo, and +also other build systems such as Bazel and Blaze, making it possible to +construct analysis tools that work in all these environments. +Tools such as errcheck and staticcheck were essentially unavailable to +the Go community at Google, and some of Google's internal tools for Go +are unavailable externally. +This new package provides a uniform way to obtain package metadata by +querying each of these build systems, optionally supporting their +preferred command-line notations for packages, so that tools integrate +neatly with users' build environments. The Metadata query function +executes an external query tool appropriate to the current workspace. + +Loading packages always returns the complete import graph "all the way down", +even if all you want is information about a single package, because the query +mechanisms of all the build systems we currently support ({go,vgo} list, and +blaze/bazel aspect-based query) cannot provide detailed information +about one package without visiting all its dependencies too, so there is +no additional asymptotic cost to providing transitive information. +(This property might not be true of a hypothetical 5th build system.) + +In calls to TypeCheck, all initial packages, and any package that +transitively depends on one of them, must be loaded from source. +Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from +source; D may be loaded from export data, and E may not be loaded at all +(though it's possible that D's export data mentions it, so a +types.Package may be created for it and exposed.) + +The old loader had a feature to suppress type-checking of function +bodies on a per-package basis, primarily intended to reduce the work of +obtaining type information for imported packages. Now that imports are +satisfied by export data, the optimization no longer seems necessary. + +Despite some early attempts, the old loader did not exploit export data, +instead always using the equivalent of WholeProgram mode. This was due +to the complexity of mixing source and export data packages (now +resolved by the upward traversal mentioned above), and because export data +files were nearly always missing or stale. Now that 'go build' supports +caching, all the underlying build systems can guarantee to produce +export data in a reasonable (amortized) time. + +Test "main" packages synthesized by the build system are now reported as +first-class packages, avoiding the need for clients (such as go/ssa) to +reinvent this generation logic. + +One way in which go/packages is simpler than the old loader is in its +treatment of in-package tests. In-package tests are packages that +consist of all the files of the library under test, plus the test files. +The old loader constructed in-package tests by a two-phase process of +mutation called "augmentation": first it would construct and type check +all the ordinary library packages and type-check the packages that +depend on them; then it would add more (test) files to the package and +type-check again. This two-phase approach had four major problems: +1) in processing the tests, the loader modified the library package, + leaving no way for a client application to see both the test + package and the library package; one would mutate into the other. +2) because test files can declare additional methods on types defined in + the library portion of the package, the dispatch of method calls in + the library portion was affected by the presence of the test files. + This should have been a clue that the packages were logically + different. +3) this model of "augmentation" assumed at most one in-package test + per library package, which is true of projects using 'go build', + but not other build systems. +4) because of the two-phase nature of test processing, all packages that + import the library package had to be processed before augmentation, + forcing a "one-shot" API and preventing the client from calling Load + in several times in sequence as is now possible in WholeProgram mode. + (TypeCheck mode has a similar one-shot restriction for a different reason.) + +Early drafts of this package supported "multi-shot" operation. +Although it allowed clients to make a sequence of calls (or concurrent +calls) to Load, building up the graph of Packages incrementally, +it was of marginal value: it complicated the API +(since it allowed some options to vary across calls but not others), +it complicated the implementation, +it cannot be made to work in Types mode, as explained above, +and it was less efficient than making one combined call (when this is possible). +Among the clients we have inspected, none made multiple calls to load +but could not be easily and satisfactorily modified to make only a single call. +However, applications changes may be required. +For example, the ssadump command loads the user-specified packages +and in addition the runtime package. It is tempting to simply append +"runtime" to the user-provided list, but that does not work if the user +specified an ad-hoc package such as [a.go b.go]. +Instead, ssadump no longer requests the runtime package, +but seeks it among the dependencies of the user-specified packages, +and emits an error if it is not found. + +Overlays: The Overlay field in the Config allows providing alternate contents +for Go source files, by providing a mapping from file path to contents. +go/packages will pull in new imports added in overlay files when go/packages +is run in LoadImports mode or greater. +Overlay support for the go list driver isn't complete yet: if the file doesn't +exist on disk, it will only be recognized in an overlay if it is a non-test file +and the package would be reported even without the overlay. + +Questions & Tasks + +- Add GOARCH/GOOS? + They are not portable concepts, but could be made portable. + Our goal has been to allow users to express themselves using the conventions + of the underlying build system: if the build system honors GOARCH + during a build and during a metadata query, then so should + applications built atop that query mechanism. + Conversely, if the target architecture of the build is determined by + command-line flags, the application can pass the relevant + flags through to the build system using a command such as: + myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin" + However, this approach is low-level, unwieldy, and non-portable. + GOOS and GOARCH seem important enough to warrant a dedicated option. + +- How should we handle partial failures such as a mixture of good and + malformed patterns, existing and non-existent packages, successful and + failed builds, import failures, import cycles, and so on, in a call to + Load? + +- Support bazel, blaze, and go1.10 list, not just go1.11 list. + +- Handle (and test) various partial success cases, e.g. + a mixture of good packages and: + invalid patterns + nonexistent packages + empty packages + packages with malformed package or import declarations + unreadable files + import cycles + other parse errors + type errors + Make sure we record errors at the correct place in the graph. + +- Missing packages among initial arguments are not reported. + Return bogus packages for them, like golist does. + +- "undeclared name" errors (for example) are reported out of source file + order. I suspect this is due to the breadth-first resolution now used + by go/types. Is that a bug? Discuss with gri. + +*/ diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go new file mode 100644 index 000000000..8c8473fd0 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -0,0 +1,101 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file enables an external tool to intercept package requests. +// If the tool is present then its results are used in preference to +// the go list command. + +package packages + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "os/exec" + "strings" +) + +// The Driver Protocol +// +// The driver, given the inputs to a call to Load, returns metadata about the packages specified. +// This allows for different build systems to support go/packages by telling go/packages how the +// packages' source is organized. +// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in +// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package +// documentation in doc.go for the full description of the patterns that need to be supported. +// A driver receives as a JSON-serialized driverRequest struct in standard input and will +// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output. + +// driverRequest is used to provide the portion of Load's Config that is needed by a driver. +type driverRequest struct { + Mode LoadMode `json:"mode"` + // Env specifies the environment the underlying build system should be run in. + Env []string `json:"env"` + // BuildFlags are flags that should be passed to the underlying build system. + BuildFlags []string `json:"build_flags"` + // Tests specifies whether the patterns should also return test packages. + Tests bool `json:"tests"` + // Overlay maps file paths (relative to the driver's working directory) to the byte contents + // of overlay files. + Overlay map[string][]byte `json:"overlay"` +} + +// findExternalDriver returns the file path of a tool that supplies +// the build system package structure, or "" if not found." +// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its +// value, otherwise it searches for a binary named gopackagesdriver on the PATH. +func findExternalDriver(cfg *Config) driver { + const toolPrefix = "GOPACKAGESDRIVER=" + tool := "" + for _, env := range cfg.Env { + if val := strings.TrimPrefix(env, toolPrefix); val != env { + tool = val + } + } + if tool != "" && tool == "off" { + return nil + } + if tool == "" { + var err error + tool, err = exec.LookPath("gopackagesdriver") + if err != nil { + return nil + } + } + return func(cfg *Config, words ...string) (*driverResponse, error) { + req, err := json.Marshal(driverRequest{ + Mode: cfg.Mode, + Env: cfg.Env, + BuildFlags: cfg.BuildFlags, + Tests: cfg.Tests, + Overlay: cfg.Overlay, + }) + if err != nil { + return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) + } + + buf := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(cfg.Context, tool, words...) + cmd.Dir = cfg.Dir + cmd.Env = cfg.Env + cmd.Stdin = bytes.NewReader(req) + cmd.Stdout = buf + cmd.Stderr = stderr + + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) + } + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, words...), stderr) + } + + var response driverResponse + if err := json.Unmarshal(buf.Bytes(), &response); err != nil { + return nil, err + } + return &response, nil + } +} diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go new file mode 100644 index 000000000..2ac7c02a3 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -0,0 +1,1075 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "bytes" + "encoding/json" + "fmt" + "go/types" + "io/ioutil" + "log" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "regexp" + "strconv" + "strings" + "sync" + "time" + "unicode" + + "golang.org/x/tools/go/internal/packagesdriver" + "golang.org/x/tools/internal/gopathwalk" + "golang.org/x/tools/internal/semver" +) + +// debug controls verbose logging. +var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG")) + +// A goTooOldError reports that the go command +// found by exec.LookPath is too old to use the new go list behavior. +type goTooOldError struct { + error +} + +// responseDeduper wraps a driverResponse, deduplicating its contents. +type responseDeduper struct { + seenRoots map[string]bool + seenPackages map[string]*Package + dr *driverResponse +} + +// init fills in r with a driverResponse. +func (r *responseDeduper) init(dr *driverResponse) { + r.dr = dr + r.seenRoots = map[string]bool{} + r.seenPackages = map[string]*Package{} + for _, pkg := range dr.Packages { + r.seenPackages[pkg.ID] = pkg + } + for _, root := range dr.Roots { + r.seenRoots[root] = true + } +} + +func (r *responseDeduper) addPackage(p *Package) { + if r.seenPackages[p.ID] != nil { + return + } + r.seenPackages[p.ID] = p + r.dr.Packages = append(r.dr.Packages, p) +} + +func (r *responseDeduper) addRoot(id string) { + if r.seenRoots[id] { + return + } + r.seenRoots[id] = true + r.dr.Roots = append(r.dr.Roots, id) +} + +// goInfo contains global information from the go tool. +type goInfo struct { + rootDirs map[string]string + env goEnv +} + +type goEnv struct { + modulesOn bool +} + +func determineEnv(cfg *Config) goEnv { + buf, err := invokeGo(cfg, "env", "GOMOD") + if err != nil { + return goEnv{} + } + gomod := bytes.TrimSpace(buf.Bytes()) + + env := goEnv{} + env.modulesOn = len(gomod) > 0 + return env +} + +// goListDriver uses the go list command to interpret the patterns and produce +// the build system package structure. +// See driver for more details. +func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + var sizes types.Sizes + var sizeserr error + var sizeswg sync.WaitGroup + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + sizeswg.Add(1) + go func() { + sizes, sizeserr = getSizes(cfg) + sizeswg.Done() + }() + } + defer sizeswg.Wait() + + // start fetching rootDirs + var info goInfo + var rootDirsReady, envReady = make(chan struct{}), make(chan struct{}) + go func() { + info.rootDirs = determineRootDirs(cfg) + close(rootDirsReady) + }() + go func() { + info.env = determineEnv(cfg) + close(envReady) + }() + getGoInfo := func() *goInfo { + <-rootDirsReady + <-envReady + return &info + } + + // Ensure that we don't leak goroutines: Load is synchronous, so callers will + // not expect it to access the fields of cfg after the call returns. + defer getGoInfo() + + // always pass getGoInfo to golistDriver + golistDriver := func(cfg *Config, patterns ...string) (*driverResponse, error) { + return golistDriver(cfg, getGoInfo, patterns...) + } + + // Determine files requested in contains patterns + var containFiles []string + var packagesNamed []string + restPatterns := make([]string, 0, len(patterns)) + // Extract file= and other [querytype]= patterns. Report an error if querytype + // doesn't exist. +extractQueries: + for _, pattern := range patterns { + eqidx := strings.Index(pattern, "=") + if eqidx < 0 { + restPatterns = append(restPatterns, pattern) + } else { + query, value := pattern[:eqidx], pattern[eqidx+len("="):] + switch query { + case "file": + containFiles = append(containFiles, value) + case "pattern": + restPatterns = append(restPatterns, value) + case "iamashamedtousethedisabledqueryname": + packagesNamed = append(packagesNamed, value) + case "": // not a reserved query + restPatterns = append(restPatterns, pattern) + default: + for _, rune := range query { + if rune < 'a' || rune > 'z' { // not a reserved query + restPatterns = append(restPatterns, pattern) + continue extractQueries + } + } + // Reject all other patterns containing "=" + return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern) + } + } + } + + response := &responseDeduper{} + var err error + + // See if we have any patterns to pass through to go list. Zero initial + // patterns also requires a go list call, since it's the equivalent of + // ".". + if len(restPatterns) > 0 || len(patterns) == 0 { + dr, err := golistDriver(cfg, restPatterns...) + if err != nil { + return nil, err + } + response.init(dr) + } else { + response.init(&driverResponse{}) + } + + sizeswg.Wait() + if sizeserr != nil { + return nil, sizeserr + } + // types.SizesFor always returns nil or a *types.StdSizes + response.dr.Sizes, _ = sizes.(*types.StdSizes) + + var containsCandidates []string + + if len(containFiles) != 0 { + if err := runContainsQueries(cfg, golistDriver, response, containFiles, getGoInfo); err != nil { + return nil, err + } + } + + if len(packagesNamed) != 0 { + if err := runNamedQueries(cfg, golistDriver, response, packagesNamed); err != nil { + return nil, err + } + } + + modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response, getGoInfo) + if err != nil { + return nil, err + } + if len(containFiles) > 0 { + containsCandidates = append(containsCandidates, modifiedPkgs...) + containsCandidates = append(containsCandidates, needPkgs...) + } + if err := addNeededOverlayPackages(cfg, golistDriver, response, needPkgs, getGoInfo); err != nil { + return nil, err + } + // Check candidate packages for containFiles. + if len(containFiles) > 0 { + for _, id := range containsCandidates { + pkg, ok := response.seenPackages[id] + if !ok { + response.addPackage(&Package{ + ID: id, + Errors: []Error{ + { + Kind: ListError, + Msg: fmt.Sprintf("package %s expected but not seen", id), + }, + }, + }) + continue + } + for _, f := range containFiles { + for _, g := range pkg.GoFiles { + if sameFile(f, g) { + response.addRoot(id) + } + } + } + } + } + + return response.dr, nil +} + +func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string, getGoInfo func() *goInfo) error { + if len(pkgs) == 0 { + return nil + } + dr, err := driver(cfg, pkgs...) + if err != nil { + return err + } + for _, pkg := range dr.Packages { + response.addPackage(pkg) + } + _, needPkgs, err := processGolistOverlay(cfg, response, getGoInfo) + if err != nil { + return err + } + return addNeededOverlayPackages(cfg, driver, response, needPkgs, getGoInfo) +} + +func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string, goInfo func() *goInfo) error { + for _, query := range queries { + // TODO(matloob): Do only one query per directory. + fdir := filepath.Dir(query) + // Pass absolute path of directory to go list so that it knows to treat it as a directory, + // not a package path. + pattern, err := filepath.Abs(fdir) + if err != nil { + return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) + } + dirResponse, err := driver(cfg, pattern) + if err != nil || (len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].Errors) == 1) { + // There was an error loading the package. Try to load the file as an ad-hoc package. + // Usually the error will appear in a returned package, but may not if we're in modules mode + // and the ad-hoc is located outside a module. + var queryErr error + dirResponse, queryErr = driver(cfg, query) + if queryErr != nil { + // Return the original error if the attempt to fall back failed. + return err + } + // If we get nothing back from `go list`, try to make this file into its own ad-hoc package. + if len(dirResponse.Packages) == 0 && queryErr == nil { + dirResponse.Packages = append(dirResponse.Packages, &Package{ + ID: "command-line-arguments", + PkgPath: query, + GoFiles: []string{query}, + CompiledGoFiles: []string{query}, + Imports: make(map[string]*Package), + }) + dirResponse.Roots = append(dirResponse.Roots, "command-line-arguments") + } + // Special case to handle issue #33482: + // If this is a file= query for ad-hoc packages where the file only exists on an overlay, + // and exists outside of a module, add the file in for the package. + if len(dirResponse.Packages) == 1 && (dirResponse.Packages[0].ID == "command-line-arguments" || + filepath.ToSlash(dirResponse.Packages[0].PkgPath) == filepath.ToSlash(query)) { + if len(dirResponse.Packages[0].GoFiles) == 0 { + filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath + // TODO(matloob): check if the file is outside of a root dir? + for path := range cfg.Overlay { + if path == filename { + dirResponse.Packages[0].Errors = nil + dirResponse.Packages[0].GoFiles = []string{path} + dirResponse.Packages[0].CompiledGoFiles = []string{path} + } + } + } + } + } + isRoot := make(map[string]bool, len(dirResponse.Roots)) + for _, root := range dirResponse.Roots { + isRoot[root] = true + } + for _, pkg := range dirResponse.Packages { + // Add any new packages to the main set + // We don't bother to filter packages that will be dropped by the changes of roots, + // that will happen anyway during graph construction outside this function. + // Over-reporting packages is not a problem. + response.addPackage(pkg) + // if the package was not a root one, it cannot have the file + if !isRoot[pkg.ID] { + continue + } + for _, pkgFile := range pkg.GoFiles { + if filepath.Base(query) == filepath.Base(pkgFile) { + response.addRoot(pkg.ID) + break + } + } + } + } + return nil +} + +// modCacheRegexp splits a path in a module cache into module, module version, and package. +var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) + +func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error { + // calling `go env` isn't free; bail out if there's nothing to do. + if len(queries) == 0 { + return nil + } + // Determine which directories are relevant to scan. + roots, modRoot, err := roots(cfg) + if err != nil { + return err + } + + // Scan the selected directories. Simple matches, from GOPATH/GOROOT + // or the local module, can simply be "go list"ed. Matches from the + // module cache need special treatment. + var matchesMu sync.Mutex + var simpleMatches, modCacheMatches []string + add := func(root gopathwalk.Root, dir string) { + // Walk calls this concurrently; protect the result slices. + matchesMu.Lock() + defer matchesMu.Unlock() + + path := dir + if dir != root.Path { + path = dir[len(root.Path)+1:] + } + if pathMatchesQueries(path, queries) { + switch root.Type { + case gopathwalk.RootModuleCache: + modCacheMatches = append(modCacheMatches, path) + case gopathwalk.RootCurrentModule: + // We'd need to read go.mod to find the full + // import path. Relative's easier. + rel, err := filepath.Rel(cfg.Dir, dir) + if err != nil { + // This ought to be impossible, since + // we found dir in the current module. + panic(err) + } + simpleMatches = append(simpleMatches, "./"+rel) + case gopathwalk.RootGOPATH, gopathwalk.RootGOROOT: + simpleMatches = append(simpleMatches, path) + } + } + } + + startWalk := time.Now() + gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug}) + cfg.Logf("%v for walk", time.Since(startWalk)) + + // Weird special case: the top-level package in a module will be in + // whatever directory the user checked the repository out into. It's + // more reasonable for that to not match the package name. So, if there + // are any Go files in the mod root, query it just to be safe. + if modRoot != "" { + rel, err := filepath.Rel(cfg.Dir, modRoot) + if err != nil { + panic(err) // See above. + } + + files, err := ioutil.ReadDir(modRoot) + if err != nil { + panic(err) // See above. + } + + for _, f := range files { + if strings.HasSuffix(f.Name(), ".go") { + simpleMatches = append(simpleMatches, rel) + break + } + } + } + + addResponse := func(r *driverResponse) { + for _, pkg := range r.Packages { + response.addPackage(pkg) + for _, name := range queries { + if pkg.Name == name { + response.addRoot(pkg.ID) + break + } + } + } + } + + if len(simpleMatches) != 0 { + resp, err := driver(cfg, simpleMatches...) + if err != nil { + return err + } + addResponse(resp) + } + + // Module cache matches are tricky. We want to avoid downloading new + // versions of things, so we need to use the ones present in the cache. + // go list doesn't accept version specifiers, so we have to write out a + // temporary module, and do the list in that module. + if len(modCacheMatches) != 0 { + // Collect all the matches, deduplicating by major version + // and preferring the newest. + type modInfo struct { + mod string + major string + } + mods := make(map[modInfo]string) + var imports []string + for _, modPath := range modCacheMatches { + matches := modCacheRegexp.FindStringSubmatch(modPath) + mod, ver := filepath.ToSlash(matches[1]), matches[2] + importPath := filepath.ToSlash(filepath.Join(matches[1], matches[3])) + + major := semver.Major(ver) + if prevVer, ok := mods[modInfo{mod, major}]; !ok || semver.Compare(ver, prevVer) > 0 { + mods[modInfo{mod, major}] = ver + } + + imports = append(imports, importPath) + } + + // Build the temporary module. + var gomod bytes.Buffer + gomod.WriteString("module modquery\nrequire (\n") + for mod, version := range mods { + gomod.WriteString("\t" + mod.mod + " " + version + "\n") + } + gomod.WriteString(")\n") + + tmpCfg := *cfg + + // We're only trying to look at stuff in the module cache, so + // disable the network. This should speed things up, and has + // prevented errors in at least one case, #28518. + tmpCfg.Env = append([]string{"GOPROXY=off"}, cfg.Env...) + + var err error + tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery") + if err != nil { + return err + } + defer os.RemoveAll(tmpCfg.Dir) + + if err := ioutil.WriteFile(filepath.Join(tmpCfg.Dir, "go.mod"), gomod.Bytes(), 0777); err != nil { + return fmt.Errorf("writing go.mod for module cache query: %v", err) + } + + // Run the query, using the import paths calculated from the matches above. + resp, err := driver(&tmpCfg, imports...) + if err != nil { + return fmt.Errorf("querying module cache matches: %v", err) + } + addResponse(resp) + } + + return nil +} + +func getSizes(cfg *Config) (types.Sizes, error) { + return packagesdriver.GetSizesGolist(cfg.Context, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg)) +} + +// roots selects the appropriate paths to walk based on the passed-in configuration, +// particularly the environment and the presence of a go.mod in cfg.Dir's parents. +func roots(cfg *Config) ([]gopathwalk.Root, string, error) { + stdout, err := invokeGo(cfg, "env", "GOROOT", "GOPATH", "GOMOD") + if err != nil { + return nil, "", err + } + + fields := strings.Split(stdout.String(), "\n") + if len(fields) != 4 || len(fields[3]) != 0 { + return nil, "", fmt.Errorf("go env returned unexpected output: %q", stdout.String()) + } + goroot, gopath, gomod := fields[0], filepath.SplitList(fields[1]), fields[2] + var modDir string + if gomod != "" { + modDir = filepath.Dir(gomod) + } + + var roots []gopathwalk.Root + // Always add GOROOT. + roots = append(roots, gopathwalk.Root{ + Path: filepath.Join(goroot, "/src"), + Type: gopathwalk.RootGOROOT, + }) + // If modules are enabled, scan the module dir. + if modDir != "" { + roots = append(roots, gopathwalk.Root{ + Path: modDir, + Type: gopathwalk.RootCurrentModule, + }) + } + // Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode. + for _, p := range gopath { + if modDir != "" { + roots = append(roots, gopathwalk.Root{ + Path: filepath.Join(p, "/pkg/mod"), + Type: gopathwalk.RootModuleCache, + }) + } else { + roots = append(roots, gopathwalk.Root{ + Path: filepath.Join(p, "/src"), + Type: gopathwalk.RootGOPATH, + }) + } + } + + return roots, modDir, nil +} + +// These functions were copied from goimports. See further documentation there. + +// pathMatchesQueries is adapted from pkgIsCandidate. +// TODO: is it reasonable to do Contains here, rather than an exact match on a path component? +func pathMatchesQueries(path string, queries []string) bool { + lastTwo := lastTwoComponents(path) + for _, query := range queries { + if strings.Contains(lastTwo, query) { + return true + } + if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(query) { + lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + if strings.Contains(lastTwo, query) { + return true + } + } + } + return false +} + +// lastTwoComponents returns at most the last two path components +// of v, using either / or \ as the path separator. +func lastTwoComponents(v string) string { + nslash := 0 + for i := len(v) - 1; i >= 0; i-- { + if v[i] == '/' || v[i] == '\\' { + nslash++ + if nslash == 2 { + return v[i:] + } + } + } + return v +} + +func hasHyphenOrUpperASCII(s string) bool { + for i := 0; i < len(s); i++ { + b := s[i] + if b == '-' || ('A' <= b && b <= 'Z') { + return true + } + } + return false +} + +func lowerASCIIAndRemoveHyphen(s string) (ret string) { + buf := make([]byte, 0, len(s)) + for i := 0; i < len(s); i++ { + b := s[i] + switch { + case b == '-': + continue + case 'A' <= b && b <= 'Z': + buf = append(buf, b+('a'-'A')) + default: + buf = append(buf, b) + } + } + return string(buf) +} + +// Fields must match go list; +// see $GOROOT/src/cmd/go/internal/load/pkg.go. +type jsonPackage struct { + ImportPath string + Dir string + Name string + Export string + GoFiles []string + CompiledGoFiles []string + CFiles []string + CgoFiles []string + CXXFiles []string + MFiles []string + HFiles []string + FFiles []string + SFiles []string + SwigFiles []string + SwigCXXFiles []string + SysoFiles []string + Imports []string + ImportMap map[string]string + Deps []string + TestGoFiles []string + TestImports []string + XTestGoFiles []string + XTestImports []string + ForTest string // q in a "p [q.test]" package, else "" + DepOnly bool + + Error *jsonPackageError +} + +type jsonPackageError struct { + ImportStack []string + Pos string + Err string +} + +func otherFiles(p *jsonPackage) [][]string { + return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} +} + +// golistDriver uses the "go list" command to expand the pattern +// words and return metadata for the specified packages. dir may be +// "" and env may be nil, as per os/exec.Command. +func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driverResponse, error) { + // go list uses the following identifiers in ImportPath and Imports: + // + // "p" -- importable package or main (command) + // "q.test" -- q's test executable + // "p [q.test]" -- variant of p as built for q's test executable + // "q_test [q.test]" -- q's external test package + // + // The packages p that are built differently for a test q.test + // are q itself, plus any helpers used by the external test q_test, + // typically including "testing" and all its dependencies. + + // Run "go list" for complete + // information on the specified packages. + buf, err := invokeGo(cfg, golistargs(cfg, words)...) + if err != nil { + return nil, err + } + seen := make(map[string]*jsonPackage) + // Decode the JSON and convert it to Package form. + var response driverResponse + for dec := json.NewDecoder(buf); dec.More(); { + p := new(jsonPackage) + if err := dec.Decode(p); err != nil { + return nil, fmt.Errorf("JSON decoding failed: %v", err) + } + + if p.ImportPath == "" { + // The documentation for go list says that “[e]rroneous packages will have + // a non-empty ImportPath”. If for some reason it comes back empty, we + // prefer to error out rather than silently discarding data or handing + // back a package without any way to refer to it. + if p.Error != nil { + return nil, Error{ + Pos: p.Error.Pos, + Msg: p.Error.Err, + } + } + return nil, fmt.Errorf("package missing import path: %+v", p) + } + + // Work around https://golang.org/issue/33157: + // go list -e, when given an absolute path, will find the package contained at + // that directory. But when no package exists there, it will return a fake package + // with an error and the ImportPath set to the absolute path provided to go list. + // Try to convert that absolute path to what its package path would be if it's + // contained in a known module or GOPATH entry. This will allow the package to be + // properly "reclaimed" when overlays are processed. + if filepath.IsAbs(p.ImportPath) && p.Error != nil { + pkgPath, ok := getPkgPath(cfg, p.ImportPath, rootsDirs) + if ok { + p.ImportPath = pkgPath + } + } + + if old, found := seen[p.ImportPath]; found { + if !reflect.DeepEqual(p, old) { + return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + } + // skip the duplicate + continue + } + seen[p.ImportPath] = p + + pkg := &Package{ + Name: p.Name, + ID: p.ImportPath, + GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), + CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), + OtherFiles: absJoin(p.Dir, otherFiles(p)...), + } + + // Work around https://golang.org/issue/28749: + // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. + // Filter out any elements of CompiledGoFiles that are also in OtherFiles. + // We have to keep this workaround in place until go1.12 is a distant memory. + if len(pkg.OtherFiles) > 0 { + other := make(map[string]bool, len(pkg.OtherFiles)) + for _, f := range pkg.OtherFiles { + other[f] = true + } + + out := pkg.CompiledGoFiles[:0] + for _, f := range pkg.CompiledGoFiles { + if other[f] { + continue + } + out = append(out, f) + } + pkg.CompiledGoFiles = out + } + + // Extract the PkgPath from the package's ID. + if i := strings.IndexByte(pkg.ID, ' '); i >= 0 { + pkg.PkgPath = pkg.ID[:i] + } else { + pkg.PkgPath = pkg.ID + } + + if pkg.PkgPath == "unsafe" { + pkg.GoFiles = nil // ignore fake unsafe.go file + } + + // Assume go list emits only absolute paths for Dir. + if p.Dir != "" && !filepath.IsAbs(p.Dir) { + log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir) + } + + if p.Export != "" && !filepath.IsAbs(p.Export) { + pkg.ExportFile = filepath.Join(p.Dir, p.Export) + } else { + pkg.ExportFile = p.Export + } + + // imports + // + // Imports contains the IDs of all imported packages. + // ImportsMap records (path, ID) only where they differ. + ids := make(map[string]bool) + for _, id := range p.Imports { + ids[id] = true + } + pkg.Imports = make(map[string]*Package) + for path, id := range p.ImportMap { + pkg.Imports[path] = &Package{ID: id} // non-identity import + delete(ids, id) + } + for id := range ids { + if id == "C" { + continue + } + + pkg.Imports[id] = &Package{ID: id} // identity import + } + if !p.DepOnly { + response.Roots = append(response.Roots, pkg.ID) + } + + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + if len(pkg.CompiledGoFiles) == 0 { + pkg.CompiledGoFiles = pkg.GoFiles + } + + if p.Error != nil { + msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363. + // Address golang.org/issue/35964 by appending import stack to error message. + if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 { + msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack) + } + pkg.Errors = append(pkg.Errors, Error{ + Pos: p.Error.Pos, + Msg: msg, + Kind: ListError, + }) + } + + response.Packages = append(response.Packages, pkg) + } + + return &response, nil +} + +// getPkgPath finds the package path of a directory if it's relative to a root directory. +func getPkgPath(cfg *Config, dir string, goInfo func() *goInfo) (string, bool) { + absDir, err := filepath.Abs(dir) + if err != nil { + cfg.Logf("error getting absolute path of %s: %v", dir, err) + return "", false + } + for rdir, rpath := range goInfo().rootDirs { + absRdir, err := filepath.Abs(rdir) + if err != nil { + cfg.Logf("error getting absolute path of %s: %v", rdir, err) + continue + } + // Make sure that the directory is in the module, + // to avoid creating a path relative to another module. + if !strings.HasPrefix(absDir, absRdir) { + cfg.Logf("%s does not have prefix %s", absDir, absRdir) + continue + } + // TODO(matloob): This doesn't properly handle symlinks. + r, err := filepath.Rel(rdir, dir) + if err != nil { + continue + } + if rpath != "" { + // We choose only one root even though the directory even it can belong in multiple modules + // or GOPATH entries. This is okay because we only need to work with absolute dirs when a + // file is missing from disk, for instance when gopls calls go/packages in an overlay. + // Once the file is saved, gopls, or the next invocation of the tool will get the correct + // result straight from golist. + // TODO(matloob): Implement module tiebreaking? + return path.Join(rpath, filepath.ToSlash(r)), true + } + return filepath.ToSlash(r), true + } + return "", false +} + +// absJoin absolutizes and flattens the lists of files. +func absJoin(dir string, fileses ...[]string) (res []string) { + for _, files := range fileses { + for _, file := range files { + if !filepath.IsAbs(file) { + file = filepath.Join(dir, file) + } + res = append(res, file) + } + } + return res +} + +func golistargs(cfg *Config, words []string) []string { + const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo + fullargs := []string{ + "list", "-e", "-json", + fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0), + fmt.Sprintf("-test=%t", cfg.Tests), + fmt.Sprintf("-export=%t", usesExportData(cfg)), + fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), + // go list doesn't let you pass -test and -find together, + // probably because you'd just get the TestMain. + fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0), + } + fullargs = append(fullargs, cfg.BuildFlags...) + fullargs = append(fullargs, "--") + fullargs = append(fullargs, words...) + return fullargs +} + +// invokeGo returns the stdout of a go command invocation. +func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { + stdout := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(cfg.Context, "go", args...) + // On darwin the cwd gets resolved to the real path, which breaks anything that + // expects the working directory to keep the original path, including the + // go command when dealing with modules. + // The Go stdlib has a special feature where if the cwd and the PWD are the + // same node then it trusts the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go command. + cmd.Env = append(append([]string{}, cfg.Env...), "PWD="+cfg.Dir) + cmd.Dir = cfg.Dir + cmd.Stdout = stdout + cmd.Stderr = stderr + defer func(start time.Time) { + cfg.Logf("%s for %v, stderr: <<%s>> stdout: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr, stdout) + }(time.Now()) + + if err := cmd.Run(); err != nil { + // Check for 'go' executable not being found. + if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) + } + + exitErr, ok := err.(*exec.ExitError) + if !ok { + // Catastrophic error: + // - context cancellation + return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err) + } + + // Old go version? + if strings.Contains(stderr.String(), "flag provided but not defined") { + return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)} + } + + // Related to #24854 + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") { + return nil, fmt.Errorf("%s", stderr.String()) + } + + // Is there an error running the C compiler in cgo? This will be reported in the "Error" field + // and should be suppressed by go list -e. + // + // This condition is not perfect yet because the error message can include other error messages than runtime/cgo. + isPkgPathRune := func(r rune) bool { + // From https://golang.org/ref/spec#Import_declarations: + // Implementation restriction: A compiler may restrict ImportPaths to non-empty strings + // using only characters belonging to Unicode's L, M, N, P, and S general categories + // (the Graphic characters without spaces) and may also exclude the + // characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD. + return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) && + !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) + } + if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { + if strings.HasPrefix(strings.TrimLeftFunc(stderr.String()[len("# "):], isPkgPathRune), "\n") { + return stdout, nil + } + } + + // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show + // the error in the Err section of stdout in case -e option is provided. + // This fix is provided for backwards compatibility. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Similar to the previous error, but currently lacks a fix in Go. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath. + // If the package doesn't exist, put the absolute path of the directory into the error message, + // as Go 1.13 list does. + const noSuchDirectory = "no such directory" + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) { + errstr := stderr.String() + abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):]) + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + abspath, strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist. + // Note that the error message we look for in this case is different that the one looked for above. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a + // directory outside any module. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Another variation of the previous error + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit + // status if there's a dependency on a package that doesn't exist. But it should return + // a zero exit status and set an error on that package. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") { + // Don't clobber stdout if `go list` actually returned something. + if len(stdout.String()) > 0 { + return stdout, nil + } + // try to extract package name from string + stderrStr := stderr.String() + var importPath string + colon := strings.Index(stderrStr, ":") + if colon > 0 && strings.HasPrefix(stderrStr, "go build ") { + importPath = stderrStr[len("go build "):colon] + } + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + importPath, strings.Trim(stderrStr, "\n")) + return bytes.NewBufferString(output), nil + } + + // Export mode entails a build. + // If that build fails, errors appear on stderr + // (despite the -e flag) and the Export field is blank. + // Do not fail in that case. + // The same is true if an ad-hoc package given to go list doesn't exist. + // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when + // packages don't exist or a build fails. + if !usesExportData(cfg) && !containsGoFile(args) { + return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr) + } + } + + // As of writing, go list -export prints some non-fatal compilation + // errors to stderr, even with -e set. We would prefer that it put + // them in the Package.Error JSON (see https://golang.org/issue/26319). + // In the meantime, there's nowhere good to put them, but they can + // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS + // is set. + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, args...), stderr) + } + return stdout, nil +} + +func containsGoFile(s []string) bool { + for _, f := range s { + if strings.HasSuffix(f, ".go") { + return true + } + } + return false +} + +func cmdDebugStr(cmd *exec.Cmd, args ...string) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.Split(kv, "=") + k, v := split[0], split[1] + env[k] = v + } + var quotedArgs []string + for _, arg := range args { + quotedArgs = append(quotedArgs, strconv.Quote(arg)) + } + + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %s", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], strings.Join(quotedArgs, " ")) +} diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go new file mode 100644 index 000000000..a7de62299 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -0,0 +1,293 @@ +package packages + +import ( + "bytes" + "encoding/json" + "fmt" + "go/parser" + "go/token" + "path/filepath" + "strconv" + "strings" +) + +// processGolistOverlay provides rudimentary support for adding +// files that don't exist on disk to an overlay. The results can be +// sometimes incorrect. +// TODO(matloob): Handle unsupported cases, including the following: +// - determining the correct package to add given a new import path +func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func() *goInfo) (modifiedPkgs, needPkgs []string, err error) { + havePkgs := make(map[string]string) // importPath -> non-test package ID + needPkgsSet := make(map[string]bool) + modifiedPkgsSet := make(map[string]bool) + + for _, pkg := range response.dr.Packages { + // This is an approximation of import path to id. This can be + // wrong for tests, vendored packages, and a number of other cases. + havePkgs[pkg.PkgPath] = pkg.ID + } + + // If no new imports are added, it is safe to avoid loading any needPkgs. + // Otherwise, it's hard to tell which package is actually being loaded + // (due to vendoring) and whether any modified package will show up + // in the transitive set of dependencies (because new imports are added, + // potentially modifying the transitive set of dependencies). + var overlayAddsImports bool + + for opath, contents := range cfg.Overlay { + base := filepath.Base(opath) + dir := filepath.Dir(opath) + var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant + var testVariantOf *Package // if opath is a test file, this is the package it is testing + var fileExists bool + isTestFile := strings.HasSuffix(opath, "_test.go") + pkgName, ok := extractPackageName(opath, contents) + if !ok { + // Don't bother adding a file that doesn't even have a parsable package statement + // to the overlay. + continue + } + nextPackage: + for _, p := range response.dr.Packages { + if pkgName != p.Name && p.ID != "command-line-arguments" { + continue + } + for _, f := range p.GoFiles { + if !sameFile(filepath.Dir(f), dir) { + continue + } + // Make sure to capture information on the package's test variant, if needed. + if isTestFile && !hasTestFiles(p) { + // TODO(matloob): Are there packages other than the 'production' variant + // of a package that this can match? This shouldn't match the test main package + // because the file is generated in another directory. + testVariantOf = p + continue nextPackage + } + if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { + // If we've already seen the test variant, + // make sure to label which package it is a test variant of. + if hasTestFiles(pkg) { + testVariantOf = p + continue nextPackage + } + // If we have already seen the package of which this is a test variant. + if hasTestFiles(p) { + testVariantOf = pkg + } + } + pkg = p + if filepath.Base(f) == base { + fileExists = true + } + } + } + // The overlay could have included an entirely new package. + if pkg == nil { + // Try to find the module or gopath dir the file is contained in. + // Then for modules, add the module opath to the beginning. + pkgPath, ok := getPkgPath(cfg, dir, rootDirs) + if !ok { + break + } + isXTest := strings.HasSuffix(pkgName, "_test") + if isXTest { + pkgPath += "_test" + } + id := pkgPath + if isTestFile && !isXTest { + id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) + } + // Try to reclaim a package with the same id if it exists in the response. + for _, p := range response.dr.Packages { + if reclaimPackage(p, id, opath, contents) { + pkg = p + break + } + } + // Otherwise, create a new package + if pkg == nil { + pkg = &Package{PkgPath: pkgPath, ID: id, Name: pkgName, Imports: make(map[string]*Package)} + response.addPackage(pkg) + havePkgs[pkg.PkgPath] = id + // Add the production package's sources for a test variant. + if isTestFile && !isXTest && testVariantOf != nil { + pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) + } + } + } + if !fileExists { + pkg.GoFiles = append(pkg.GoFiles, opath) + // TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior + // if the file will be ignored due to its build tags. + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath) + modifiedPkgsSet[pkg.ID] = true + } + imports, err := extractImports(opath, contents) + if err != nil { + // Let the parser or type checker report errors later. + continue + } + for _, imp := range imports { + _, found := pkg.Imports[imp] + if !found { + overlayAddsImports = true + // TODO(matloob): Handle cases when the following block isn't correct. + // These include imports of vendored packages, etc. + id, ok := havePkgs[imp] + if !ok { + id = imp + } + pkg.Imports[imp] = &Package{ID: id} + // Add dependencies to the non-test variant version of this package as wel. + if testVariantOf != nil { + testVariantOf.Imports[imp] = &Package{ID: id} + } + } + } + continue + } + + // toPkgPath tries to guess the package path given the id. + // This isn't always correct -- it's certainly wrong for + // vendored packages' paths. + toPkgPath := func(id string) string { + // TODO(matloob): Handle vendor paths. + i := strings.IndexByte(id, ' ') + if i >= 0 { + return id[:i] + } + return id + } + + // Do another pass now that new packages have been created to determine the + // set of missing packages. + for _, pkg := range response.dr.Packages { + for _, imp := range pkg.Imports { + pkgPath := toPkgPath(imp.ID) + if _, ok := havePkgs[pkgPath]; !ok { + needPkgsSet[pkgPath] = true + } + } + } + + if overlayAddsImports { + needPkgs = make([]string, 0, len(needPkgsSet)) + for pkg := range needPkgsSet { + needPkgs = append(needPkgs, pkg) + } + } + modifiedPkgs = make([]string, 0, len(modifiedPkgsSet)) + for pkg := range modifiedPkgsSet { + modifiedPkgs = append(modifiedPkgs, pkg) + } + return modifiedPkgs, needPkgs, err +} + +func hasTestFiles(p *Package) bool { + for _, f := range p.GoFiles { + if strings.HasSuffix(f, "_test.go") { + return true + } + } + return false +} + +// determineRootDirs returns a mapping from directories code can be contained in to the +// corresponding import path prefixes of those directories. +// Its result is used to try to determine the import path for a package containing +// an overlay file. +func determineRootDirs(cfg *Config) map[string]string { + // Assume modules first: + out, err := invokeGo(cfg, "list", "-m", "-json", "all") + if err != nil { + return determineRootDirsGOPATH(cfg) + } + m := map[string]string{} + type jsonMod struct{ Path, Dir string } + for dec := json.NewDecoder(out); dec.More(); { + mod := new(jsonMod) + if err := dec.Decode(mod); err != nil { + return m // Give up and return an empty map. Package won't be found for overlay. + } + if mod.Dir != "" && mod.Path != "" { + // This is a valid module; add it to the map. + m[mod.Dir] = mod.Path + } + } + return m +} + +func determineRootDirsGOPATH(cfg *Config) map[string]string { + m := map[string]string{} + out, err := invokeGo(cfg, "env", "GOPATH") + if err != nil { + // Could not determine root dir mapping. Everything is best-effort, so just return an empty map. + // When we try to find the import path for a directory, there will be no root-dir match and + // we'll give up. + return m + } + for _, p := range filepath.SplitList(string(bytes.TrimSpace(out.Bytes()))) { + m[filepath.Join(p, "src")] = "" + } + return m +} + +func extractImports(filename string, contents []byte) ([]string, error) { + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset? + if err != nil { + return nil, err + } + var res []string + for _, imp := range f.Imports { + quotedPath := imp.Path.Value + path, err := strconv.Unquote(quotedPath) + if err != nil { + return nil, err + } + res = append(res, path) + } + return res, nil +} + +// reclaimPackage attempts to reuse a package that failed to load in an overlay. +// +// If the package has errors and has no Name, GoFiles, or Imports, +// then it's possible that it doesn't yet exist on disk. +func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + if pkg.ID != id { + return false + } + if len(pkg.Errors) != 1 { + return false + } + if pkg.Name != "" || pkg.ExportFile != "" { + return false + } + if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 { + return false + } + if len(pkg.Imports) > 0 { + return false + } + pkgName, ok := extractPackageName(filename, contents) + if !ok { + return false + } + pkg.Name = pkgName + pkg.Errors = nil + return true +} + +func extractPackageName(filename string, contents []byte) (string, bool) { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset? + if err != nil { + return "", false + } + return f.Name.Name, true +} diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go new file mode 100644 index 000000000..aff94a3fe --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "fmt" + "strings" +) + +var allModes = []LoadMode{ + NeedName, + NeedFiles, + NeedCompiledGoFiles, + NeedImports, + NeedDeps, + NeedExportsFile, + NeedTypes, + NeedSyntax, + NeedTypesInfo, + NeedTypesSizes, +} + +var modeStrings = []string{ + "NeedName", + "NeedFiles", + "NeedCompiledGoFiles", + "NeedImports", + "NeedDeps", + "NeedExportsFile", + "NeedTypes", + "NeedSyntax", + "NeedTypesInfo", + "NeedTypesSizes", +} + +func (mod LoadMode) String() string { + m := mod + if m == 0 { + return fmt.Sprintf("LoadMode(0)") + } + var out []string + for i, x := range allModes { + if x > m { + break + } + if (m & x) != 0 { + out = append(out, modeStrings[i]) + m = m ^ x + } + } + if m != 0 { + out = append(out, "Unknown") + } + return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) +} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go new file mode 100644 index 000000000..f98a0bdca --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -0,0 +1,1116 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +// See doc.go for package documentation and implementation notes. + +import ( + "context" + "encoding/json" + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "sync" + + "golang.org/x/tools/go/gcexportdata" +) + +// A LoadMode controls the amount of detail to return when loading. +// The bits below can be combined to specify which fields should be +// filled in the result packages. +// The zero value is a special case, equivalent to combining +// the NeedName, NeedFiles, and NeedCompiledGoFiles bits. +// ID and Errors (if present) will always be filled. +// Load may return more information than requested. +type LoadMode int + +const ( + // NeedName adds Name and PkgPath. + NeedName LoadMode = 1 << iota + + // NeedFiles adds GoFiles and OtherFiles. + NeedFiles + + // NeedCompiledGoFiles adds CompiledGoFiles. + NeedCompiledGoFiles + + // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain + // "placeholder" Packages with only the ID set. + NeedImports + + // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. + NeedDeps + + // NeedExportsFile adds ExportsFile. + NeedExportsFile + + // NeedTypes adds Types, Fset, and IllTyped. + NeedTypes + + // NeedSyntax adds Syntax. + NeedSyntax + + // NeedTypesInfo adds TypesInfo. + NeedTypesInfo + + // NeedTypesSizes adds TypesSizes. + NeedTypesSizes +) + +const ( + // Deprecated: LoadFiles exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles + + // Deprecated: LoadImports exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadImports = LoadFiles | NeedImports + + // Deprecated: LoadTypes exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadTypes = LoadImports | NeedTypes | NeedTypesSizes + + // Deprecated: LoadSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo + + // Deprecated: LoadAllSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadAllSyntax = LoadSyntax | NeedDeps +) + +// A Config specifies details about how packages should be loaded. +// The zero value is a valid configuration. +// Calls to Load do not modify this struct. +type Config struct { + // Mode controls the level of information returned for each package. + Mode LoadMode + + // Context specifies the context for the load operation. + // If the context is cancelled, the loader may stop early + // and return an ErrCancelled error. + // If Context is nil, the load cannot be cancelled. + Context context.Context + + // Logf is the logger for the config. + // If the user provides a logger, debug logging is enabled. + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the logger is nil, default to log.Printf. + Logf func(format string, args ...interface{}) + + // Dir is the directory in which to run the build system's query tool + // that provides information about the packages. + // If Dir is empty, the tool is run in the current directory. + Dir string + + // Env is the environment to use when invoking the build system's query tool. + // If Env is nil, the current environment is used. + // As in os/exec's Cmd, only the last value in the slice for + // each environment key is used. To specify the setting of only + // a few variables, append to the current environment, as in: + // + // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386") + // + Env []string + + // BuildFlags is a list of command-line flags to be passed through to + // the build system's query tool. + BuildFlags []string + + // Fset provides source position information for syntax trees and types. + // If Fset is nil, Load will use a new fileset, but preserve Fset's value. + Fset *token.FileSet + + // ParseFile is called to read and parse each file + // when preparing a package's type-checked syntax tree. + // It must be safe to call ParseFile simultaneously from multiple goroutines. + // If ParseFile is nil, the loader will uses parser.ParseFile. + // + // ParseFile should parse the source from src and use filename only for + // recording position information. + // + // An application may supply a custom implementation of ParseFile + // to change the effective file contents or the behavior of the parser, + // or to modify the syntax tree. For example, selectively eliminating + // unwanted function bodies can significantly accelerate type checking. + ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) + + // If Tests is set, the loader includes not just the packages + // matching a particular pattern but also any related test packages, + // including test-only variants of the package and the test executable. + // + // For example, when using the go command, loading "fmt" with Tests=true + // returns four packages, with IDs "fmt" (the standard package), + // "fmt [fmt.test]" (the package as compiled for the test), + // "fmt_test" (the test functions from source files in package fmt_test), + // and "fmt.test" (the test binary). + // + // In build systems with explicit names for tests, + // setting Tests may have no effect. + Tests bool + + // Overlay provides a mapping of absolute file paths to file contents. + // If the file with the given path already exists, the parser will use the + // alternative file contents provided by the map. + // + // Overlays provide incomplete support for when a given file doesn't + // already exist on disk. See the package doc above for more details. + Overlay map[string][]byte +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*driverResponse, error) + +// driverResponse contains the results for a driver query. +type driverResponse struct { + // Sizes, if not nil, is the types.Sizes to use when type checking. + Sizes *types.StdSizes + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package +} + +// Load loads and returns the Go packages named by the given patterns. +// +// Config specifies loading options; +// nil behaves the same as an empty Config. +// +// Load returns an error if any of the patterns was invalid +// as defined by the underlying build system. +// It may return an empty list of packages without an error, +// for instance for an empty expansion of a valid wildcard. +// Errors associated with a particular package are recorded in the +// corresponding Package's Errors list, and do not cause Load to +// return an error. Clients may need to handle such errors before +// proceeding with further analysis. The PrintErrors function is +// provided for convenient display of all errors. +func Load(cfg *Config, patterns ...string) ([]*Package, error) { + l := newLoader(cfg) + response, err := defaultDriver(&l.Config, patterns...) + if err != nil { + return nil, err + } + l.sizes = response.Sizes + return l.refine(response.Roots, response.Packages...) +} + +// defaultDriver is a driver that looks for an external driver binary, and if +// it does not find it falls back to the built in go list driver. +func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + driver := findExternalDriver(cfg) + if driver == nil { + driver = goListDriver + } + return driver(cfg, patterns...) +} + +// A Package describes a loaded Go package. +type Package struct { + // ID is a unique identifier for a package, + // in a syntax provided by the underlying build system. + // + // Because the syntax varies based on the build system, + // clients should treat IDs as opaque and not attempt to + // interpret them. + ID string + + // Name is the package name as it appears in the package source code. + Name string + + // PkgPath is the package path as used by the go/types package. + PkgPath string + + // Errors contains any errors encountered querying the metadata + // of the package, or while parsing or type-checking its files. + Errors []Error + + // GoFiles lists the absolute file paths of the package's Go source files. + GoFiles []string + + // CompiledGoFiles lists the absolute file paths of the package's source + // files that were presented to the compiler. + // This may differ from GoFiles if files are processed before compilation. + CompiledGoFiles []string + + // OtherFiles lists the absolute file paths of the package's non-Go source files, + // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. + OtherFiles []string + + // ExportFile is the absolute path to a file containing type + // information for the package as provided by the build system. + ExportFile string + + // Imports maps import paths appearing in the package's Go source files + // to corresponding loaded Packages. + Imports map[string]*Package + + // Types provides type information for the package. + // The NeedTypes LoadMode bit sets this field for packages matching the + // patterns; type information for dependencies may be missing or incomplete, + // unless NeedDeps and NeedImports are also set. + Types *types.Package + + // Fset provides position information for Types, TypesInfo, and Syntax. + // It is set only when Types is set. + Fset *token.FileSet + + // IllTyped indicates whether the package or any dependency contains errors. + // It is set only when Types is set. + IllTyped bool + + // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. + // + // The NeedSyntax LoadMode bit populates this field for packages matching the patterns. + // If NeedDeps and NeedImports are also set, this field will also be populated + // for dependencies. + Syntax []*ast.File + + // TypesInfo provides type information about the package's syntax trees. + // It is set only when Syntax is set. + TypesInfo *types.Info + + // TypesSizes provides the effective size function for types in TypesInfo. + TypesSizes types.Sizes +} + +// An Error describes a problem with a package's metadata, syntax, or types. +type Error struct { + Pos string // "file:line:col" or "file:line" or "" or "-" + Msg string + Kind ErrorKind +} + +// ErrorKind describes the source of the error, allowing the user to +// differentiate between errors generated by the driver, the parser, or the +// type-checker. +type ErrorKind int + +const ( + UnknownError ErrorKind = iota + ListError + ParseError + TypeError +) + +func (err Error) Error() string { + pos := err.Pos + if pos == "" { + pos = "-" // like token.Position{}.String() + } + return pos + ": " + err.Msg +} + +// flatPackage is the JSON form of Package +// It drops all the type and syntax fields, and transforms the Imports +// +// TODO(adonovan): identify this struct with Package, effectively +// publishing the JSON protocol. +type flatPackage struct { + ID string + Name string `json:",omitempty"` + PkgPath string `json:",omitempty"` + Errors []Error `json:",omitempty"` + GoFiles []string `json:",omitempty"` + CompiledGoFiles []string `json:",omitempty"` + OtherFiles []string `json:",omitempty"` + ExportFile string `json:",omitempty"` + Imports map[string]string `json:",omitempty"` +} + +// MarshalJSON returns the Package in its JSON form. +// For the most part, the structure fields are written out unmodified, and +// the type and syntax fields are skipped. +// The imports are written out as just a map of path to package id. +// The errors are written using a custom type that tries to preserve the +// structure of error types we know about. +// +// This method exists to enable support for additional build systems. It is +// not intended for use by clients of the API and we may change the format. +func (p *Package) MarshalJSON() ([]byte, error) { + flat := &flatPackage{ + ID: p.ID, + Name: p.Name, + PkgPath: p.PkgPath, + Errors: p.Errors, + GoFiles: p.GoFiles, + CompiledGoFiles: p.CompiledGoFiles, + OtherFiles: p.OtherFiles, + ExportFile: p.ExportFile, + } + if len(p.Imports) > 0 { + flat.Imports = make(map[string]string, len(p.Imports)) + for path, ipkg := range p.Imports { + flat.Imports[path] = ipkg.ID + } + } + return json.Marshal(flat) +} + +// UnmarshalJSON reads in a Package from its JSON format. +// See MarshalJSON for details about the format accepted. +func (p *Package) UnmarshalJSON(b []byte) error { + flat := &flatPackage{} + if err := json.Unmarshal(b, &flat); err != nil { + return err + } + *p = Package{ + ID: flat.ID, + Name: flat.Name, + PkgPath: flat.PkgPath, + Errors: flat.Errors, + GoFiles: flat.GoFiles, + CompiledGoFiles: flat.CompiledGoFiles, + OtherFiles: flat.OtherFiles, + ExportFile: flat.ExportFile, + } + if len(flat.Imports) > 0 { + p.Imports = make(map[string]*Package, len(flat.Imports)) + for path, id := range flat.Imports { + p.Imports[path] = &Package{ID: id} + } + } + return nil +} + +func (p *Package) String() string { return p.ID } + +// loaderPackage augments Package with state used during the loading phase +type loaderPackage struct { + *Package + importErrors map[string]error // maps each bad import to its error + loadOnce sync.Once + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern +} + +// loader holds the working state of a single call to load. +type loader struct { + pkgs map[string]*loaderPackage + Config + sizes types.Sizes + parseCache map[string]*parseValue + parseCacheMu sync.Mutex + exportMu sync.Mutex // enforces mutual exclusion of exportdata operations + + // Config.Mode contains the implied mode (see impliedLoadMode). + // Implied mode contains all the fields we need the data for. + // In requestedMode there are the actually requested fields. + // We'll zero them out before returning packages to the user. + // This makes it easier for us to get the conditions where + // we need certain modes right. + requestedMode LoadMode +} + +type parseValue struct { + f *ast.File + err error + ready chan struct{} +} + +func newLoader(cfg *Config) *loader { + ld := &loader{ + parseCache: map[string]*parseValue{}, + } + if cfg != nil { + ld.Config = *cfg + // If the user has provided a logger, use it. + ld.Config.Logf = cfg.Logf + } + if ld.Config.Logf == nil { + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the user has not provided a logger, default to log.Printf. + if debug { + ld.Config.Logf = log.Printf + } else { + ld.Config.Logf = func(format string, args ...interface{}) {} + } + } + if ld.Config.Mode == 0 { + ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility. + } + if ld.Config.Env == nil { + ld.Config.Env = os.Environ() + } + if ld.Context == nil { + ld.Context = context.Background() + } + if ld.Dir == "" { + if dir, err := os.Getwd(); err == nil { + ld.Dir = dir + } + } + + // Save the actually requested fields. We'll zero them out before returning packages to the user. + ld.requestedMode = ld.Mode + ld.Mode = impliedLoadMode(ld.Mode) + + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + if ld.Fset == nil { + ld.Fset = token.NewFileSet() + } + + // ParseFile is required even in LoadTypes mode + // because we load source if export data is missing. + if ld.ParseFile == nil { + ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + const mode = parser.AllErrors | parser.ParseComments + return parser.ParseFile(fset, filename, src, mode) + } + } + } + + return ld +} + +// refine connects the supplied packages into a graph and then adds type and +// and syntax information as requested by the LoadMode. +func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { + rootMap := make(map[string]int, len(roots)) + for i, root := range roots { + rootMap[root] = i + } + ld.pkgs = make(map[string]*loaderPackage) + // first pass, fixup and build the map and roots + var initial = make([]*loaderPackage, len(roots)) + for _, pkg := range list { + rootIndex := -1 + if i, found := rootMap[pkg.ID]; found { + rootIndex = i + } + lpkg := &loaderPackage{ + Package: pkg, + needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0, + needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0 || + len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files + pkg.ExportFile == "" && pkg.PkgPath != "unsafe", + } + ld.pkgs[lpkg.ID] = lpkg + if rootIndex >= 0 { + initial[rootIndex] = lpkg + lpkg.initial = true + } + } + for i, root := range roots { + if initial[i] == nil { + return nil, fmt.Errorf("root package %v is missing", root) + } + } + + // Materialize the import graph. + + const ( + white = 0 // new + grey = 1 // in progress + black = 2 // complete + ) + + // visit traverses the import graph, depth-first, + // and materializes the graph as Packages.Imports. + // + // Valid imports are saved in the Packages.Import map. + // Invalid imports (cycles and missing nodes) are saved in the importErrors map. + // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG. + // + // visit returns whether the package needs src or has a transitive + // dependency on a package that does. These are the only packages + // for which we load source code. + var stack []*loaderPackage + var visit func(lpkg *loaderPackage) bool + var srcPkgs []*loaderPackage + visit = func(lpkg *loaderPackage) bool { + switch lpkg.color { + case black: + return lpkg.needsrc + case grey: + panic("internal error: grey node") + } + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + // If NeedImports isn't set, the imports fields will all be zeroed out. + if ld.Mode&NeedImports != 0 { + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package + } + } + if lpkg.needsrc { + srcPkgs = append(srcPkgs, lpkg) + } + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + stack = stack[:len(stack)-1] // pop + lpkg.color = black + + return lpkg.needsrc + } + + if ld.Mode&NeedImports == 0 { + // We do this to drop the stub import packages that we are not even going to try to resolve. + for _, lpkg := range initial { + lpkg.Imports = nil + } + } else { + // For each initial package, create its import DAG. + for _, lpkg := range initial { + visit(lpkg) + } + } + if ld.Mode&NeedImports != 0 && ld.Mode&NeedTypes != 0 { + for _, lpkg := range srcPkgs { + // Complete type information is required for the + // immediate dependencies of each source package. + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + imp.needtypes = true + } + } + } + // Load type data and syntax if needed, starting at + // the initial packages (roots of the import DAG). + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + var wg sync.WaitGroup + for _, lpkg := range initial { + wg.Add(1) + go func(lpkg *loaderPackage) { + ld.loadRecursive(lpkg) + wg.Done() + }(lpkg) + } + wg.Wait() + } + + result := make([]*Package, len(initial)) + for i, lpkg := range initial { + result[i] = lpkg.Package + } + for i := range ld.pkgs { + // Clear all unrequested fields, for extra de-Hyrum-ization. + if ld.requestedMode&NeedName == 0 { + ld.pkgs[i].Name = "" + ld.pkgs[i].PkgPath = "" + } + if ld.requestedMode&NeedFiles == 0 { + ld.pkgs[i].GoFiles = nil + ld.pkgs[i].OtherFiles = nil + } + if ld.requestedMode&NeedCompiledGoFiles == 0 { + ld.pkgs[i].CompiledGoFiles = nil + } + if ld.requestedMode&NeedImports == 0 { + ld.pkgs[i].Imports = nil + } + if ld.requestedMode&NeedExportsFile == 0 { + ld.pkgs[i].ExportFile = "" + } + if ld.requestedMode&NeedTypes == 0 { + ld.pkgs[i].Types = nil + ld.pkgs[i].Fset = nil + ld.pkgs[i].IllTyped = false + } + if ld.requestedMode&NeedSyntax == 0 { + ld.pkgs[i].Syntax = nil + } + if ld.requestedMode&NeedTypesInfo == 0 { + ld.pkgs[i].TypesInfo = nil + } + if ld.requestedMode&NeedTypesSizes == 0 { + ld.pkgs[i].TypesSizes = nil + } + } + + return result, nil +} + +// loadRecursive loads the specified package and its dependencies, +// recursively, in parallel, in topological order. +// It is atomic and idempotent. +// Precondition: ld.Mode&NeedTypes. +func (ld *loader) loadRecursive(lpkg *loaderPackage) { + lpkg.loadOnce.Do(func() { + // Load the direct dependencies, in parallel. + var wg sync.WaitGroup + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + wg.Add(1) + go func(imp *loaderPackage) { + ld.loadRecursive(imp) + wg.Done() + }(imp) + } + wg.Wait() + ld.loadPackage(lpkg) + }) +} + +// loadPackage loads the specified package. +// It must be called only once per Package, +// after immediate dependencies are loaded. +// Precondition: ld.Mode & NeedTypes. +func (ld *loader) loadPackage(lpkg *loaderPackage) { + if lpkg.PkgPath == "unsafe" { + // Fill in the blanks to avoid surprises. + lpkg.Types = types.Unsafe + lpkg.Fset = ld.Fset + lpkg.Syntax = []*ast.File{} + lpkg.TypesInfo = new(types.Info) + lpkg.TypesSizes = ld.sizes + return + } + + // Call NewPackage directly with explicit name. + // This avoids skew between golist and go/types when the files' + // package declarations are inconsistent. + lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name) + lpkg.Fset = ld.Fset + + // Subtle: we populate all Types fields with an empty Package + // before loading export data so that export data processing + // never has to create a types.Package for an indirect dependency, + // which would then require that such created packages be explicitly + // inserted back into the Import graph as a final step after export data loading. + // The Diamond test exercises this case. + if !lpkg.needtypes && !lpkg.needsrc { + return + } + if !lpkg.needsrc { + ld.loadFromExportData(lpkg) + return // not a source package, don't get syntax trees + } + + appendError := func(err error) { + // Convert various error types into the one true Error. + var errs []Error + switch err := err.(type) { + case Error: + // from driver + errs = append(errs, err) + + case *os.PathError: + // from parser + errs = append(errs, Error{ + Pos: err.Path + ":1", + Msg: err.Err.Error(), + Kind: ParseError, + }) + + case scanner.ErrorList: + // from parser + for _, err := range err { + errs = append(errs, Error{ + Pos: err.Pos.String(), + Msg: err.Msg, + Kind: ParseError, + }) + } + + case types.Error: + // from type checker + errs = append(errs, Error{ + Pos: err.Fset.Position(err.Pos).String(), + Msg: err.Msg, + Kind: TypeError, + }) + + default: + // unexpected impoverished error from parser? + errs = append(errs, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, + }) + + // If you see this error message, please file a bug. + log.Printf("internal error: error %q (%T) without position", err, err) + } + + lpkg.Errors = append(lpkg.Errors, errs...) + } + + if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" { + // The config requested loading sources and types, but sources are missing. + // Add an error to the package and fall back to loading from export data. + appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError}) + ld.loadFromExportData(lpkg) + return // can't get syntax trees for this package + } + + files, errs := ld.parseFiles(lpkg.CompiledGoFiles) + for _, err := range errs { + appendError(err) + } + + lpkg.Syntax = files + if ld.Config.Mode&NeedTypes == 0 { + return + } + + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + lpkg.TypesSizes = ld.sizes + + importer := importerFunc(func(path string) (*types.Package, error) { + if path == "unsafe" { + return types.Unsafe, nil + } + + // The imports map is keyed by import path. + ipkg := lpkg.Imports[path] + if ipkg == nil { + if err := lpkg.importErrors[path]; err != nil { + return nil, err + } + // There was skew between the metadata and the + // import declarations, likely due to an edit + // race, or because the ParseFile feature was + // used to supply alternative file contents. + return nil, fmt.Errorf("no metadata for %s", path) + } + + if ipkg.Types != nil && ipkg.Types.Complete() { + return ipkg.Types, nil + } + log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg) + panic("unreachable") + }) + + // type-check + tc := &types.Config{ + Importer: importer, + + // Type-check bodies of functions only in non-initial packages. + // Example: for import graph A->B->C and initial packages {A,C}, + // we can ignore function bodies in B. + IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, + + Error: appendError, + Sizes: ld.sizes, + } + types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) + + lpkg.importErrors = nil // no longer needed + + // If !Cgo, the type-checker uses FakeImportC mode, so + // it doesn't invoke the importer for import "C", + // nor report an error for the import, + // or for any undefined C.f reference. + // We must detect this explicitly and correctly + // mark the package as IllTyped (by reporting an error). + // TODO(adonovan): if these errors are annoying, + // we could just set IllTyped quietly. + if tc.FakeImportC { + outer: + for _, f := range lpkg.Syntax { + for _, imp := range f.Imports { + if imp.Path.Value == `"C"` { + err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`} + appendError(err) + break outer + } + } + } + } + + // Record accumulated errors. + illTyped := len(lpkg.Errors) > 0 + if !illTyped { + for _, imp := range lpkg.Imports { + if imp.IllTyped { + illTyped = true + break + } + } + } + lpkg.IllTyped = illTyped +} + +// An importFunc is an implementation of the single-method +// types.Importer interface based on a function value. +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } + +// We use a counting semaphore to limit +// the number of parallel I/O calls per process. +var ioLimit = make(chan bool, 20) + +func (ld *loader) parseFile(filename string) (*ast.File, error) { + ld.parseCacheMu.Lock() + v, ok := ld.parseCache[filename] + if ok { + // cache hit + ld.parseCacheMu.Unlock() + <-v.ready + } else { + // cache miss + v = &parseValue{ready: make(chan struct{})} + ld.parseCache[filename] = v + ld.parseCacheMu.Unlock() + + var src []byte + for f, contents := range ld.Config.Overlay { + if sameFile(f, filename) { + src = contents + } + } + var err error + if src == nil { + ioLimit <- true // wait + src, err = ioutil.ReadFile(filename) + <-ioLimit // signal + } + if err != nil { + v.err = err + } else { + v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + } + + close(v.ready) + } + return v.f, v.err +} + +// parseFiles reads and parses the Go source files and returns the ASTs +// of the ones that could be at least partially parsed, along with a +// list of I/O and parse errors encountered. +// +// Because files are scanned in parallel, the token.Pos +// positions of the resulting ast.Files are not ordered. +// +func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { + var wg sync.WaitGroup + n := len(filenames) + parsed := make([]*ast.File, n) + errors := make([]error, n) + for i, file := range filenames { + if ld.Config.Context.Err() != nil { + parsed[i] = nil + errors[i] = ld.Config.Context.Err() + continue + } + wg.Add(1) + go func(i int, filename string) { + parsed[i], errors[i] = ld.parseFile(filename) + wg.Done() + }(i, file) + } + wg.Wait() + + // Eliminate nils, preserving order. + var o int + for _, f := range parsed { + if f != nil { + parsed[o] = f + o++ + } + } + parsed = parsed[:o] + + o = 0 + for _, err := range errors { + if err != nil { + errors[o] = err + o++ + } + } + errors = errors[:o] + + return parsed, errors +} + +// sameFile returns true if x and y have the same basename and denote +// the same file. +// +func sameFile(x, y string) bool { + if x == y { + // It could be the case that y doesn't exist. + // For instance, it may be an overlay file that + // hasn't been written to disk. To handle that case + // let x == y through. (We added the exact absolute path + // string to the CompiledGoFiles list, so the unwritten + // overlay case implies x==y.) + return true + } + if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation) + if xi, err := os.Stat(x); err == nil { + if yi, err := os.Stat(y); err == nil { + return os.SameFile(xi, yi) + } + } + } + return false +} + +// loadFromExportData returns type information for the specified +// package, loading it from an export data file on the first request. +func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) { + if lpkg.PkgPath == "" { + log.Fatalf("internal error: Package %s has no PkgPath", lpkg) + } + + // Because gcexportdata.Read has the potential to create or + // modify the types.Package for each node in the transitive + // closure of dependencies of lpkg, all exportdata operations + // must be sequential. (Finer-grained locking would require + // changes to the gcexportdata API.) + // + // The exportMu lock guards the Package.Pkg field and the + // types.Package it points to, for each Package in the graph. + // + // Not all accesses to Package.Pkg need to be protected by exportMu: + // graph ordering ensures that direct dependencies of source + // packages are fully loaded before the importer reads their Pkg field. + ld.exportMu.Lock() + defer ld.exportMu.Unlock() + + if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() { + return tpkg, nil // cache hit + } + + lpkg.IllTyped = true // fail safe + + if lpkg.ExportFile == "" { + // Errors while building export data will have been printed to stderr. + return nil, fmt.Errorf("no export data file") + } + f, err := os.Open(lpkg.ExportFile) + if err != nil { + return nil, err + } + defer f.Close() + + // Read gc export data. + // + // We don't currently support gccgo export data because all + // underlying workspaces use the gc toolchain. (Even build + // systems that support gccgo don't use it for workspace + // queries.) + r, err := gcexportdata.NewReader(f) + if err != nil { + return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + + // Build the view. + // + // The gcexportdata machinery has no concept of package ID. + // It identifies packages by their PkgPath, which although not + // globally unique is unique within the scope of one invocation + // of the linker, type-checker, or gcexportdata. + // + // So, we must build a PkgPath-keyed view of the global + // (conceptually ID-keyed) cache of packages and pass it to + // gcexportdata. The view must contain every existing + // package that might possibly be mentioned by the + // current package---its transitive closure. + // + // In loadPackage, we unconditionally create a types.Package for + // each dependency so that export data loading does not + // create new ones. + // + // TODO(adonovan): it would be simpler and more efficient + // if the export data machinery invoked a callback to + // get-or-create a package instead of a map. + // + view := make(map[string]*types.Package) // view seen by gcexportdata + seen := make(map[*loaderPackage]bool) // all visited packages + var visit func(pkgs map[string]*Package) + visit = func(pkgs map[string]*Package) { + for _, p := range pkgs { + lpkg := ld.pkgs[p.ID] + if !seen[lpkg] { + seen[lpkg] = true + view[lpkg.PkgPath] = lpkg.Types + visit(lpkg.Imports) + } + } + } + visit(lpkg.Imports) + + viewLen := len(view) + 1 // adding the self package + // Parse the export data. + // (May modify incomplete packages in view but not create new ones.) + tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath) + if err != nil { + return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + if viewLen != len(view) { + log.Fatalf("Unexpected package creation during export data loading") + } + + lpkg.Types = tpkg + lpkg.IllTyped = false + + return tpkg, nil +} + +// impliedLoadMode returns loadMode with its dependencies. +func impliedLoadMode(loadMode LoadMode) LoadMode { + if loadMode&NeedTypesInfo != 0 && loadMode&NeedImports == 0 { + // If NeedTypesInfo, go/packages needs to do typechecking itself so it can + // associate type info with the AST. To do so, we need the export data + // for dependencies, which means we need to ask for the direct dependencies. + // NeedImports is used to ask for the direct dependencies. + loadMode |= NeedImports + } + + if loadMode&NeedDeps != 0 && loadMode&NeedImports == 0 { + // With NeedDeps we need to load at least direct dependencies. + // NeedImports is used to ask for the direct dependencies. + loadMode |= NeedImports + } + + return loadMode +} + +func usesExportData(cfg *Config) bool { + return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 +} diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go new file mode 100644 index 000000000..b13cb081f --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -0,0 +1,55 @@ +package packages + +import ( + "fmt" + "os" + "sort" +) + +// Visit visits all the packages in the import graph whose roots are +// pkgs, calling the optional pre function the first time each package +// is encountered (preorder), and the optional post function after a +// package's dependencies have been visited (postorder). +// The boolean result of pre(pkg) determines whether +// the imports of package pkg are visited. +func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { + seen := make(map[*Package]bool) + var visit func(*Package) + visit = func(pkg *Package) { + if !seen[pkg] { + seen[pkg] = true + + if pre == nil || pre(pkg) { + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // Imports is a map, this makes visit stable + for _, path := range paths { + visit(pkg.Imports[path]) + } + } + + if post != nil { + post(pkg) + } + } + } + for _, pkg := range pkgs { + visit(pkg) + } +} + +// PrintErrors prints to os.Stderr the accumulated errors of all +// packages in the import graph rooted at pkgs, dependencies first. +// PrintErrors returns the number of errors printed. +func PrintErrors(pkgs []*Package) int { + var n int + Visit(pkgs, nil, func(pkg *Package) { + for _, err := range pkg.Errors { + fmt.Fprintln(os.Stderr, err) + n++ + } + }) + return n +} diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go new file mode 100644 index 000000000..882e3b3d8 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -0,0 +1,523 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package objectpath defines a naming scheme for types.Objects +// (that is, named entities in Go programs) relative to their enclosing +// package. +// +// Type-checker objects are canonical, so they are usually identified by +// their address in memory (a pointer), but a pointer has meaning only +// within one address space. By contrast, objectpath names allow the +// identity of an object to be sent from one program to another, +// establishing a correspondence between types.Object variables that are +// distinct but logically equivalent. +// +// A single object may have multiple paths. In this example, +// type A struct{ X int } +// type B A +// the field X has two paths due to its membership of both A and B. +// The For(obj) function always returns one of these paths, arbitrarily +// but consistently. +package objectpath + +import ( + "fmt" + "strconv" + "strings" + + "go/types" +) + +// A Path is an opaque name that identifies a types.Object +// relative to its package. Conceptually, the name consists of a +// sequence of destructuring operations applied to the package scope +// to obtain the original object. +// The name does not include the package itself. +type Path string + +// Encoding +// +// An object path is a textual and (with training) human-readable encoding +// of a sequence of destructuring operators, starting from a types.Package. +// The sequences represent a path through the package/object/type graph. +// We classify these operators by their type: +// +// PO package->object Package.Scope.Lookup +// OT object->type Object.Type +// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] +// TO type->object Type.{At,Field,Method,Obj} [AFMO] +// +// All valid paths start with a package and end at an object +// and thus may be defined by the regular language: +// +// objectpath = PO (OT TT* TO)* +// +// The concrete encoding follows directly: +// - The only PO operator is Package.Scope.Lookup, which requires an identifier. +// - The only OT operator is Object.Type, +// which we encode as '.' because dot cannot appear in an identifier. +// - The TT operators are encoded as [EKPRU]. +// - The OT operators are encoded as [AFMO]; +// three of these (At,Field,Method) require an integer operand, +// which is encoded as a string of decimal digits. +// These indices are stable across different representations +// of the same package, even source and export data. +// +// In the example below, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// field X has the path "T.UM0.RA1.F0", +// representing the following sequence of operations: +// +// p.Lookup("T") T +// .Type().Underlying().Method(0). f +// .Type().Results().At(1) b +// .Type().Field(0) X +// +// The encoding is not maximally compact---every R or P is +// followed by an A, for example---but this simplifies the +// encoder and decoder. +// +const ( + // object->type operators + opType = '.' // .Type() (Object) + + // type->type operators + opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) + opKey = 'K' // .Key() (Map) + opParams = 'P' // .Params() (Signature) + opResults = 'R' // .Results() (Signature) + opUnderlying = 'U' // .Underlying() (Named) + + // type->object operators + opAt = 'A' // .At(i) (Tuple) + opField = 'F' // .Field(i) (Struct) + opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) + opObj = 'O' // .Obj() (Named) +) + +// The For function returns the path to an object relative to its package, +// or an error if the object is not accessible from the package's Scope. +// +// The For function guarantees to return a path only for the following objects: +// - package-level types +// - exported package-level non-types +// - methods +// - parameter and result variables +// - struct fields +// These objects are sufficient to define the API of their package. +// The objects described by a package's export data are drawn from this set. +// +// For does not return a path for predeclared names, imported package +// names, local names, and unexported package-level names (except +// types). +// +// Example: given this definition, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// For(X) would return a path that denotes the following sequence of operations: +// +// p.Scope().Lookup("T") (TypeName T) +// .Type().Underlying().Method(0). (method Func f) +// .Type().Results().At(1) (field Var b) +// .Type().Field(0) (field Var X) +// +// where p is the package (*types.Package) to which X belongs. +func For(obj types.Object) (Path, error) { + pkg := obj.Pkg() + + // This table lists the cases of interest. + // + // Object Action + // ------ ------ + // nil reject + // builtin reject + // pkgname reject + // label reject + // var + // package-level accept + // func param/result accept + // local reject + // struct field accept + // const + // package-level accept + // local reject + // func + // package-level accept + // init functions reject + // concrete method accept + // interface method accept + // type + // package-level accept + // local reject + // + // The only accessible package-level objects are members of pkg itself. + // + // The cases are handled in four steps: + // + // 1. reject nil and builtin + // 2. accept package-level objects + // 3. reject obviously invalid objects + // 4. search the API for the path to the param/result/field/method. + + // 1. reference to nil or builtin? + if pkg == nil { + return "", fmt.Errorf("predeclared %s has no path", obj) + } + scope := pkg.Scope() + + // 2. package-level object? + if scope.Lookup(obj.Name()) == obj { + // Only exported objects (and non-exported types) have a path. + // Non-exported types may be referenced by other objects. + if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() { + return "", fmt.Errorf("no path for non-exported %v", obj) + } + return Path(obj.Name()), nil + } + + // 3. Not a package-level object. + // Reject obviously non-viable cases. + switch obj := obj.(type) { + case *types.Const, // Only package-level constants have a path. + *types.TypeName, // Only package-level types have a path. + *types.Label, // Labels are function-local. + *types.PkgName: // PkgNames are file-local. + return "", fmt.Errorf("no path for %v", obj) + + case *types.Var: + // Could be: + // - a field (obj.IsField()) + // - a func parameter or result + // - a local var. + // Sadly there is no way to distinguish + // a param/result from a local + // so we must proceed to the find. + + case *types.Func: + // A func, if not package-level, must be a method. + if recv := obj.Type().(*types.Signature).Recv(); recv == nil { + return "", fmt.Errorf("func is not a method: %v", obj) + } + // TODO(adonovan): opt: if the method is concrete, + // do a specialized version of the rest of this function so + // that it's O(1) not O(|scope|). Basically 'find' is needed + // only for struct fields and interface methods. + + default: + panic(obj) + } + + // 4. Search the API for the path to the var (field/param/result) or method. + + // First inspect package-level named types. + // In the presence of path aliases, these give + // the best paths because non-types may + // refer to types, but not the reverse. + empty := make([]byte, 0, 48) // initial space + for _, name := range scope.Names() { + o := scope.Lookup(name) + tname, ok := o.(*types.TypeName) + if !ok { + continue // handle non-types in second pass + } + + path := append(empty, name...) + path = append(path, opType) + + T := o.Type() + + if tname.IsAlias() { + // type alias + if r := find(obj, T, path); r != nil { + return Path(r), nil + } + } else { + // defined (named) type + if r := find(obj, T.Underlying(), append(path, opUnderlying)); r != nil { + return Path(r), nil + } + } + } + + // Then inspect everything else: + // non-types, and declared methods of defined types. + for _, name := range scope.Names() { + o := scope.Lookup(name) + path := append(empty, name...) + if _, ok := o.(*types.TypeName); !ok { + if o.Exported() { + // exported non-type (const, var, func) + if r := find(obj, o.Type(), append(path, opType)); r != nil { + return Path(r), nil + } + } + continue + } + + // Inspect declared methods of defined types. + if T, ok := o.Type().(*types.Named); ok { + path = append(path, opType) + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return Path(path2), nil // found declared method + } + if r := find(obj, m.Type(), append(path2, opType)); r != nil { + return Path(r), nil + } + } + } + } + + return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path()) +} + +func appendOpArg(path []byte, op byte, arg int) []byte { + path = append(path, op) + path = strconv.AppendInt(path, int64(arg), 10) + return path +} + +// find finds obj within type T, returning the path to it, or nil if not found. +func find(obj types.Object, T types.Type, path []byte) []byte { + switch T := T.(type) { + case *types.Basic, *types.Named: + // Named types belonging to pkg were handled already, + // so T must belong to another package. No path. + return nil + case *types.Pointer: + return find(obj, T.Elem(), append(path, opElem)) + case *types.Slice: + return find(obj, T.Elem(), append(path, opElem)) + case *types.Array: + return find(obj, T.Elem(), append(path, opElem)) + case *types.Chan: + return find(obj, T.Elem(), append(path, opElem)) + case *types.Map: + if r := find(obj, T.Key(), append(path, opKey)); r != nil { + return r + } + return find(obj, T.Elem(), append(path, opElem)) + case *types.Signature: + if r := find(obj, T.Params(), append(path, opParams)); r != nil { + return r + } + return find(obj, T.Results(), append(path, opResults)) + case *types.Struct: + for i := 0; i < T.NumFields(); i++ { + f := T.Field(i) + path2 := appendOpArg(path, opField, i) + if f == obj { + return path2 // found field var + } + if r := find(obj, f.Type(), append(path2, opType)); r != nil { + return r + } + } + return nil + case *types.Tuple: + for i := 0; i < T.Len(); i++ { + v := T.At(i) + path2 := appendOpArg(path, opAt, i) + if v == obj { + return path2 // found param/result var + } + if r := find(obj, v.Type(), append(path2, opType)); r != nil { + return r + } + } + return nil + case *types.Interface: + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return path2 // found interface method + } + if r := find(obj, m.Type(), append(path2, opType)); r != nil { + return r + } + } + return nil + } + panic(T) +} + +// Object returns the object denoted by path p within the package pkg. +func Object(pkg *types.Package, p Path) (types.Object, error) { + if p == "" { + return nil, fmt.Errorf("empty path") + } + + pathstr := string(p) + var pkgobj, suffix string + if dot := strings.IndexByte(pathstr, opType); dot < 0 { + pkgobj = pathstr + } else { + pkgobj = pathstr[:dot] + suffix = pathstr[dot:] // suffix starts with "." + } + + obj := pkg.Scope().Lookup(pkgobj) + if obj == nil { + return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj) + } + + // abstraction of *types.{Pointer,Slice,Array,Chan,Map} + type hasElem interface { + Elem() types.Type + } + // abstraction of *types.{Interface,Named} + type hasMethods interface { + Method(int) *types.Func + NumMethods() int + } + + // The loop state is the pair (t, obj), + // exactly one of which is non-nil, initially obj. + // All suffixes start with '.' (the only object->type operation), + // followed by optional type->type operations, + // then a type->object operation. + // The cycle then repeats. + var t types.Type + for suffix != "" { + code := suffix[0] + suffix = suffix[1:] + + // Codes [AFM] have an integer operand. + var index int + switch code { + case opAt, opField, opMethod: + rest := strings.TrimLeft(suffix, "0123456789") + numerals := suffix[:len(suffix)-len(rest)] + suffix = rest + i, err := strconv.Atoi(numerals) + if err != nil { + return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code) + } + index = int(i) + case opObj: + // no operand + default: + // The suffix must end with a type->object operation. + if suffix == "" { + return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code) + } + } + + if code == opType { + if t != nil { + return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType) + } + t = obj.Type() + obj = nil + continue + } + + if t == nil { + return nil, fmt.Errorf("invalid path: code %q in object context", code) + } + + // Inv: t != nil, obj == nil + + switch code { + case opElem: + hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t) + } + t = hasElem.Elem() + + case opKey: + mapType, ok := t.(*types.Map) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t) + } + t = mapType.Key() + + case opParams: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Params() + + case opResults: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Results() + + case opUnderlying: + named, ok := t.(*types.Named) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %s, want named)", code, t, t) + } + t = named.Underlying() + + case opAt: + tuple, ok := t.(*types.Tuple) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %s, want tuple)", code, t, t) + } + if n := tuple.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + obj = tuple.At(index) + t = nil + + case opField: + structType, ok := t.(*types.Struct) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t) + } + if n := structType.NumFields(); index >= n { + return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n) + } + obj = structType.Field(index) + t = nil + + case opMethod: + hasMethods, ok := t.(hasMethods) // Interface or Named + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %s, want interface or named)", code, t, t) + } + if n := hasMethods.NumMethods(); index >= n { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, n) + } + obj = hasMethods.Method(index) + t = nil + + case opObj: + named, ok := t.(*types.Named) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %s, want named)", code, t, t) + } + obj = named.Obj() + t = nil + + default: + return nil, fmt.Errorf("invalid path: unknown code %q", code) + } + } + + if obj.Pkg() != pkg { + return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) + } + + return obj, nil // success +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go new file mode 100644 index 000000000..38f596daf --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go @@ -0,0 +1,46 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/ast/astutil" +) + +// Callee returns the named target of a function call, if any: +// a function, method, builtin, or variable. +func Callee(info *types.Info, call *ast.CallExpr) types.Object { + var obj types.Object + switch fun := astutil.Unparen(call.Fun).(type) { + case *ast.Ident: + obj = info.Uses[fun] // type, var, builtin, or declared func + case *ast.SelectorExpr: + if sel, ok := info.Selections[fun]; ok { + obj = sel.Obj() // method or field + } else { + obj = info.Uses[fun.Sel] // qualified identifier? + } + } + if _, ok := obj.(*types.TypeName); ok { + return nil // T(x) is a conversion, not a call + } + return obj +} + +// StaticCallee returns the target (function or method) of a static +// function call, if any. It returns nil for calls to builtins. +func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { + if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) { + return f + } + return nil +} + +func interfaceMethod(f *types.Func) bool { + recv := f.Type().(*types.Signature).Recv() + return recv != nil && types.IsInterface(recv.Type()) +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go new file mode 100644 index 000000000..9c441dba9 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/imports.go @@ -0,0 +1,31 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import "go/types" + +// Dependencies returns all dependencies of the specified packages. +// +// Dependent packages appear in topological order: if package P imports +// package Q, Q appears earlier than P in the result. +// The algorithm follows import statements in the order they +// appear in the source code, so the result is a total order. +// +func Dependencies(pkgs ...*types.Package) []*types.Package { + var result []*types.Package + seen := make(map[*types.Package]bool) + var visit func(pkgs []*types.Package) + visit = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !seen[p] { + seen[p] = true + visit(p.Imports()) + result = append(result, p) + } + } + } + visit(pkgs) + return result +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go new file mode 100644 index 000000000..c7f754500 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -0,0 +1,313 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeutil defines various utilities for types, such as Map, +// a mapping from types.Type to interface{} values. +package typeutil // import "golang.org/x/tools/go/types/typeutil" + +import ( + "bytes" + "fmt" + "go/types" + "reflect" +) + +// Map is a hash-table-based mapping from types (types.Type) to +// arbitrary interface{} values. The concrete types that implement +// the Type interface are pointers. Since they are not canonicalized, +// == cannot be used to check for equivalence, and thus we cannot +// simply use a Go map. +// +// Just as with map[K]V, a nil *Map is a valid empty map. +// +// Not thread-safe. +// +type Map struct { + hasher Hasher // shared by many Maps + table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused + length int // number of map entries +} + +// entry is an entry (key/value association) in a hash bucket. +type entry struct { + key types.Type + value interface{} +} + +// SetHasher sets the hasher used by Map. +// +// All Hashers are functionally equivalent but contain internal state +// used to cache the results of hashing previously seen types. +// +// A single Hasher created by MakeHasher() may be shared among many +// Maps. This is recommended if the instances have many keys in +// common, as it will amortize the cost of hash computation. +// +// A Hasher may grow without bound as new types are seen. Even when a +// type is deleted from the map, the Hasher never shrinks, since other +// types in the map may reference the deleted type indirectly. +// +// Hashers are not thread-safe, and read-only operations such as +// Map.Lookup require updates to the hasher, so a full Mutex lock (not a +// read-lock) is require around all Map operations if a shared +// hasher is accessed from multiple threads. +// +// If SetHasher is not called, the Map will create a private hasher at +// the first call to Insert. +// +func (m *Map) SetHasher(hasher Hasher) { + m.hasher = hasher +} + +// Delete removes the entry with the given key, if any. +// It returns true if the entry was found. +// +func (m *Map) Delete(key types.Type) bool { + if m != nil && m.table != nil { + hash := m.hasher.Hash(key) + bucket := m.table[hash] + for i, e := range bucket { + if e.key != nil && types.Identical(key, e.key) { + // We can't compact the bucket as it + // would disturb iterators. + bucket[i] = entry{} + m.length-- + return true + } + } + } + return false +} + +// At returns the map entry for the given key. +// The result is nil if the entry is not present. +// +func (m *Map) At(key types.Type) interface{} { + if m != nil && m.table != nil { + for _, e := range m.table[m.hasher.Hash(key)] { + if e.key != nil && types.Identical(key, e.key) { + return e.value + } + } + } + return nil +} + +// Set sets the map entry for key to val, +// and returns the previous entry, if any. +func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) { + if m.table != nil { + hash := m.hasher.Hash(key) + bucket := m.table[hash] + var hole *entry + for i, e := range bucket { + if e.key == nil { + hole = &bucket[i] + } else if types.Identical(key, e.key) { + prev = e.value + bucket[i].value = value + return + } + } + + if hole != nil { + *hole = entry{key, value} // overwrite deleted entry + } else { + m.table[hash] = append(bucket, entry{key, value}) + } + } else { + if m.hasher.memo == nil { + m.hasher = MakeHasher() + } + hash := m.hasher.Hash(key) + m.table = map[uint32][]entry{hash: {entry{key, value}}} + } + + m.length++ + return +} + +// Len returns the number of map entries. +func (m *Map) Len() int { + if m != nil { + return m.length + } + return 0 +} + +// Iterate calls function f on each entry in the map in unspecified order. +// +// If f should mutate the map, Iterate provides the same guarantees as +// Go maps: if f deletes a map entry that Iterate has not yet reached, +// f will not be invoked for it, but if f inserts a map entry that +// Iterate has not yet reached, whether or not f will be invoked for +// it is unspecified. +// +func (m *Map) Iterate(f func(key types.Type, value interface{})) { + if m != nil { + for _, bucket := range m.table { + for _, e := range bucket { + if e.key != nil { + f(e.key, e.value) + } + } + } + } +} + +// Keys returns a new slice containing the set of map keys. +// The order is unspecified. +func (m *Map) Keys() []types.Type { + keys := make([]types.Type, 0, m.Len()) + m.Iterate(func(key types.Type, _ interface{}) { + keys = append(keys, key) + }) + return keys +} + +func (m *Map) toString(values bool) string { + if m == nil { + return "{}" + } + var buf bytes.Buffer + fmt.Fprint(&buf, "{") + sep := "" + m.Iterate(func(key types.Type, value interface{}) { + fmt.Fprint(&buf, sep) + sep = ", " + fmt.Fprint(&buf, key) + if values { + fmt.Fprintf(&buf, ": %q", value) + } + }) + fmt.Fprint(&buf, "}") + return buf.String() +} + +// String returns a string representation of the map's entries. +// Values are printed using fmt.Sprintf("%v", v). +// Order is unspecified. +// +func (m *Map) String() string { + return m.toString(true) +} + +// KeysString returns a string representation of the map's key set. +// Order is unspecified. +// +func (m *Map) KeysString() string { + return m.toString(false) +} + +//////////////////////////////////////////////////////////////////////// +// Hasher + +// A Hasher maps each type to its hash value. +// For efficiency, a hasher uses memoization; thus its memory +// footprint grows monotonically over time. +// Hashers are not thread-safe. +// Hashers have reference semantics. +// Call MakeHasher to create a Hasher. +type Hasher struct { + memo map[types.Type]uint32 +} + +// MakeHasher returns a new Hasher instance. +func MakeHasher() Hasher { + return Hasher{make(map[types.Type]uint32)} +} + +// Hash computes a hash value for the given type t such that +// Identical(t, t') => Hash(t) == Hash(t'). +func (h Hasher) Hash(t types.Type) uint32 { + hash, ok := h.memo[t] + if !ok { + hash = h.hashFor(t) + h.memo[t] = hash + } + return hash +} + +// hashString computes the Fowler–Noll–Vo hash of s. +func hashString(s string) uint32 { + var h uint32 + for i := 0; i < len(s); i++ { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +// hashFor computes the hash of t. +func (h Hasher) hashFor(t types.Type) uint32 { + // See Identical for rationale. + switch t := t.(type) { + case *types.Basic: + return uint32(t.Kind()) + + case *types.Array: + return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) + + case *types.Slice: + return 9049 + 2*h.Hash(t.Elem()) + + case *types.Struct: + var hash uint32 = 9059 + for i, n := 0, t.NumFields(); i < n; i++ { + f := t.Field(i) + if f.Anonymous() { + hash += 8861 + } + hash += hashString(t.Tag(i)) + hash += hashString(f.Name()) // (ignore f.Pkg) + hash += h.Hash(f.Type()) + } + return hash + + case *types.Pointer: + return 9067 + 2*h.Hash(t.Elem()) + + case *types.Signature: + var hash uint32 = 9091 + if t.Variadic() { + hash *= 8863 + } + return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) + + case *types.Interface: + var hash uint32 = 9103 + for i, n := 0, t.NumMethods(); i < n; i++ { + // See go/types.identicalMethods for rationale. + // Method order is not significant. + // Ignore m.Pkg(). + m := t.Method(i) + hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type()) + } + return hash + + case *types.Map: + return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem()) + + case *types.Chan: + return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem()) + + case *types.Named: + // Not safe with a copying GC; objects may move. + return uint32(reflect.ValueOf(t.Obj()).Pointer()) + + case *types.Tuple: + return h.hashTuple(t) + } + panic(t) +} + +func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { + // See go/types.identicalTypes for rationale. + n := tuple.Len() + var hash uint32 = 9137 + 2*uint32(n) + for i := 0; i < n; i++ { + hash += 3 * h.Hash(tuple.At(i).Type()) + } + return hash +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go new file mode 100644 index 000000000..32084610f --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go @@ -0,0 +1,72 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements a cache of method sets. + +package typeutil + +import ( + "go/types" + "sync" +) + +// A MethodSetCache records the method set of each type T for which +// MethodSet(T) is called so that repeat queries are fast. +// The zero value is a ready-to-use cache instance. +type MethodSetCache struct { + mu sync.Mutex + named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N + others map[types.Type]*types.MethodSet // all other types +} + +// MethodSet returns the method set of type T. It is thread-safe. +// +// If cache is nil, this function is equivalent to types.NewMethodSet(T). +// Utility functions can thus expose an optional *MethodSetCache +// parameter to clients that care about performance. +// +func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { + if cache == nil { + return types.NewMethodSet(T) + } + cache.mu.Lock() + defer cache.mu.Unlock() + + switch T := T.(type) { + case *types.Named: + return cache.lookupNamed(T).value + + case *types.Pointer: + if N, ok := T.Elem().(*types.Named); ok { + return cache.lookupNamed(N).pointer + } + } + + // all other types + // (The map uses pointer equivalence, not type identity.) + mset := cache.others[T] + if mset == nil { + mset = types.NewMethodSet(T) + if cache.others == nil { + cache.others = make(map[types.Type]*types.MethodSet) + } + cache.others[T] = mset + } + return mset +} + +func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { + if cache.named == nil { + cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) + } + // Avoid recomputing mset(*T) for each distinct Pointer + // instance whose underlying type is a named type. + msets, ok := cache.named[named] + if !ok { + msets.value = types.NewMethodSet(named) + msets.pointer = types.NewMethodSet(types.NewPointer(named)) + cache.named[named] = msets + } + return msets +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go new file mode 100644 index 000000000..9849c24ce --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/ui.go @@ -0,0 +1,52 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +// This file defines utilities for user interfaces that display types. + +import "go/types" + +// IntuitiveMethodSet returns the intuitive method set of a type T, +// which is the set of methods you can call on an addressable value of +// that type. +// +// The result always contains MethodSet(T), and is exactly MethodSet(T) +// for interface types and for pointer-to-concrete types. +// For all other concrete types T, the result additionally +// contains each method belonging to *T if there is no identically +// named method on T itself. +// +// This corresponds to user intuition about method sets; +// this function is intended only for user interfaces. +// +// The order of the result is as for types.MethodSet(T). +// +func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { + isPointerToConcrete := func(T types.Type) bool { + ptr, ok := T.(*types.Pointer) + return ok && !types.IsInterface(ptr.Elem()) + } + + var result []*types.Selection + mset := msets.MethodSet(T) + if types.IsInterface(T) || isPointerToConcrete(T) { + for i, n := 0, mset.Len(); i < n; i++ { + result = append(result, mset.At(i)) + } + } else { + // T is some other concrete type. + // Report methods of T and *T, preferring those of T. + pmset := msets.MethodSet(types.NewPointer(T)) + for i, n := 0, pmset.Len(); i < n; i++ { + meth := pmset.At(i) + if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { + meth = m + } + result = append(result, meth) + } + + } + return result +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go new file mode 100644 index 000000000..7219c8e9f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go @@ -0,0 +1,196 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fastwalk provides a faster version of filepath.Walk for file system +// scanning tools. +package fastwalk + +import ( + "errors" + "os" + "path/filepath" + "runtime" + "sync" +) + +// TraverseLink is used as a return value from WalkFuncs to indicate that the +// symlink named in the call may be traversed. +var TraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory") + +// SkipFiles is a used as a return value from WalkFuncs to indicate that the +// callback should not be called for any other files in the current directory. +// Child directories will still be traversed. +var SkipFiles = errors.New("fastwalk: skip remaining files in directory") + +// Walk is a faster implementation of filepath.Walk. +// +// filepath.Walk's design necessarily calls os.Lstat on each file, +// even if the caller needs less info. +// Many tools need only the type of each file. +// On some platforms, this information is provided directly by the readdir +// system call, avoiding the need to stat each file individually. +// fastwalk_unix.go contains a fork of the syscall routines. +// +// See golang.org/issue/16399 +// +// Walk walks the file tree rooted at root, calling walkFn for +// each file or directory in the tree, including root. +// +// If fastWalk returns filepath.SkipDir, the directory is skipped. +// +// Unlike filepath.Walk: +// * file stat calls must be done by the user. +// The only provided metadata is the file type, which does not include +// any permission bits. +// * multiple goroutines stat the filesystem concurrently. The provided +// walkFn must be safe for concurrent use. +// * fastWalk can follow symlinks if walkFn returns the TraverseLink +// sentinel error. It is the walkFn's responsibility to prevent +// fastWalk from going into symlink cycles. +func Walk(root string, walkFn func(path string, typ os.FileMode) error) error { + // TODO(bradfitz): make numWorkers configurable? We used a + // minimum of 4 to give the kernel more info about multiple + // things we want, in hopes its I/O scheduling can take + // advantage of that. Hopefully most are in cache. Maybe 4 is + // even too low of a minimum. Profile more. + numWorkers := 4 + if n := runtime.NumCPU(); n > numWorkers { + numWorkers = n + } + + // Make sure to wait for all workers to finish, otherwise + // walkFn could still be called after returning. This Wait call + // runs after close(e.donec) below. + var wg sync.WaitGroup + defer wg.Wait() + + w := &walker{ + fn: walkFn, + enqueuec: make(chan walkItem, numWorkers), // buffered for performance + workc: make(chan walkItem, numWorkers), // buffered for performance + donec: make(chan struct{}), + + // buffered for correctness & not leaking goroutines: + resc: make(chan error, numWorkers), + } + defer close(w.donec) + + for i := 0; i < numWorkers; i++ { + wg.Add(1) + go w.doWork(&wg) + } + todo := []walkItem{{dir: root}} + out := 0 + for { + workc := w.workc + var workItem walkItem + if len(todo) == 0 { + workc = nil + } else { + workItem = todo[len(todo)-1] + } + select { + case workc <- workItem: + todo = todo[:len(todo)-1] + out++ + case it := <-w.enqueuec: + todo = append(todo, it) + case err := <-w.resc: + out-- + if err != nil { + return err + } + if out == 0 && len(todo) == 0 { + // It's safe to quit here, as long as the buffered + // enqueue channel isn't also readable, which might + // happen if the worker sends both another unit of + // work and its result before the other select was + // scheduled and both w.resc and w.enqueuec were + // readable. + select { + case it := <-w.enqueuec: + todo = append(todo, it) + default: + return nil + } + } + } + } +} + +// doWork reads directories as instructed (via workc) and runs the +// user's callback function. +func (w *walker) doWork(wg *sync.WaitGroup) { + defer wg.Done() + for { + select { + case <-w.donec: + return + case it := <-w.workc: + select { + case <-w.donec: + return + case w.resc <- w.walk(it.dir, !it.callbackDone): + } + } + } +} + +type walker struct { + fn func(path string, typ os.FileMode) error + + donec chan struct{} // closed on fastWalk's return + workc chan walkItem // to workers + enqueuec chan walkItem // from workers + resc chan error // from workers +} + +type walkItem struct { + dir string + callbackDone bool // callback already called; don't do it again +} + +func (w *walker) enqueue(it walkItem) { + select { + case w.enqueuec <- it: + case <-w.donec: + } +} + +func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error { + joined := dirName + string(os.PathSeparator) + baseName + if typ == os.ModeDir { + w.enqueue(walkItem{dir: joined}) + return nil + } + + err := w.fn(joined, typ) + if typ == os.ModeSymlink { + if err == TraverseLink { + // Set callbackDone so we don't call it twice for both the + // symlink-as-symlink and the symlink-as-directory later: + w.enqueue(walkItem{dir: joined, callbackDone: true}) + return nil + } + if err == filepath.SkipDir { + // Permit SkipDir on symlinks too. + return nil + } + } + return err +} + +func (w *walker) walk(root string, runUserCallback bool) error { + if runUserCallback { + err := w.fn(root, os.ModeDir) + if err == filepath.SkipDir { + return nil + } + if err != nil { + return err + } + } + + return readDir(root, w.onDirEnt) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go new file mode 100644 index 000000000..ccffec5ad --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go @@ -0,0 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd + +package fastwalk + +import "syscall" + +func direntInode(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Fileno) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go new file mode 100644 index 000000000..ab7fbc0a9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin +// +build !appengine + +package fastwalk + +import "syscall" + +func direntInode(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Ino) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go new file mode 100644 index 000000000..a3b26a7ba --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd openbsd netbsd + +package fastwalk + +import "syscall" + +func direntNamlen(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Namlen) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go new file mode 100644 index 000000000..e880d358b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build !appengine + +package fastwalk + +import ( + "bytes" + "syscall" + "unsafe" +) + +func direntNamlen(dirent *syscall.Dirent) uint64 { + const fixedHdr = uint16(unsafe.Offsetof(syscall.Dirent{}.Name)) + nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) + const nameBufLen = uint16(len(nameBuf)) + limit := dirent.Reclen - fixedHdr + if limit > nameBufLen { + limit = nameBufLen + } + nameLen := bytes.IndexByte(nameBuf[:limit], 0) + if nameLen < 0 { + panic("failed to find terminating 0 byte in dirent") + } + return uint64(nameLen) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go new file mode 100644 index 000000000..a906b8759 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go @@ -0,0 +1,37 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd + +package fastwalk + +import ( + "io/ioutil" + "os" +) + +// readDir calls fn for each directory entry in dirName. +// It does not descend into directories or follow symlinks. +// If fn returns a non-nil error, readDir returns with that error +// immediately. +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fis, err := ioutil.ReadDir(dirName) + if err != nil { + return err + } + skipFiles := false + for _, fi := range fis { + if fi.Mode().IsRegular() && skipFiles { + continue + } + if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil { + if err == SkipFiles { + skipFiles = true + continue + } + return err + } + } + return nil +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go new file mode 100644 index 000000000..3369b1a0b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go @@ -0,0 +1,127 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin freebsd openbsd netbsd +// +build !appengine + +package fastwalk + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +const blockSize = 8 << 10 + +// unknownFileMode is a sentinel (and bogus) os.FileMode +// value used to represent a syscall.DT_UNKNOWN Dirent.Type. +const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDevice + +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fd, err := syscall.Open(dirName, 0, 0) + if err != nil { + return &os.PathError{Op: "open", Path: dirName, Err: err} + } + defer syscall.Close(fd) + + // The buffer must be at least a block long. + buf := make([]byte, blockSize) // stack-allocated; doesn't escape + bufp := 0 // starting read position in buf + nbuf := 0 // end valid data in buf + skipFiles := false + for { + if bufp >= nbuf { + bufp = 0 + nbuf, err = syscall.ReadDirent(fd, buf) + if err != nil { + return os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + return nil + } + } + consumed, name, typ := parseDirEnt(buf[bufp:nbuf]) + bufp += consumed + if name == "" || name == "." || name == ".." { + continue + } + // Fallback for filesystems (like old XFS) that don't + // support Dirent.Type and have DT_UNKNOWN (0) there + // instead. + if typ == unknownFileMode { + fi, err := os.Lstat(dirName + "/" + name) + if err != nil { + // It got deleted in the meantime. + if os.IsNotExist(err) { + continue + } + return err + } + typ = fi.Mode() & os.ModeType + } + if skipFiles && typ.IsRegular() { + continue + } + if err := fn(dirName, name, typ); err != nil { + if err == SkipFiles { + skipFiles = true + continue + } + return err + } + } +} + +func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) { + // golang.org/issue/15653 + dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v { + panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v)) + } + if len(buf) < int(dirent.Reclen) { + panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen)) + } + consumed = int(dirent.Reclen) + if direntInode(dirent) == 0 { // File absent in directory. + return + } + switch dirent.Type { + case syscall.DT_REG: + typ = 0 + case syscall.DT_DIR: + typ = os.ModeDir + case syscall.DT_LNK: + typ = os.ModeSymlink + case syscall.DT_BLK: + typ = os.ModeDevice + case syscall.DT_FIFO: + typ = os.ModeNamedPipe + case syscall.DT_SOCK: + typ = os.ModeSocket + case syscall.DT_UNKNOWN: + typ = unknownFileMode + default: + // Skip weird things. + // It's probably a DT_WHT (http://lwn.net/Articles/325369/) + // or something. Revisit if/when this package is moved outside + // of goimports. goimports only cares about regular files, + // symlinks, and directories. + return + } + + nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) + nameLen := direntNamlen(dirent) + + // Special cases for common things: + if nameLen == 1 && nameBuf[0] == '.' { + name = "." + } else if nameLen == 2 && nameBuf[0] == '.' && nameBuf[1] == '.' { + name = ".." + } else { + name = string(nameBuf[:nameLen]) + } + return +} diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go new file mode 100644 index 000000000..d06756228 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -0,0 +1,273 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gopathwalk is like filepath.Walk but specialized for finding Go +// packages, particularly in $GOPATH and $GOROOT. +package gopathwalk + +import ( + "bufio" + "bytes" + "fmt" + "go/build" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/tools/internal/fastwalk" +) + +// Options controls the behavior of a Walk call. +type Options struct { + Debug bool // Enable debug logging + ModulesEnabled bool // Search module caches. Also disables legacy goimports ignore rules. +} + +// RootType indicates the type of a Root. +type RootType int + +const ( + RootUnknown RootType = iota + RootGOROOT + RootGOPATH + RootCurrentModule + RootModuleCache + RootOther +) + +// A Root is a starting point for a Walk. +type Root struct { + Path string + Type RootType +} + +// SrcDirsRoots returns the roots from build.Default.SrcDirs(). Not modules-compatible. +func SrcDirsRoots(ctx *build.Context) []Root { + var roots []Root + roots = append(roots, Root{filepath.Join(ctx.GOROOT, "src"), RootGOROOT}) + for _, p := range filepath.SplitList(ctx.GOPATH) { + roots = append(roots, Root{filepath.Join(p, "src"), RootGOPATH}) + } + return roots +} + +// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// For each package found, add will be called (concurrently) with the absolute +// paths of the containing source directory and the package directory. +// add will be called concurrently. +func Walk(roots []Root, add func(root Root, dir string), opts Options) { + WalkSkip(roots, add, func(Root, string) bool { return false }, opts) +} + +// WalkSkip walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// For each package found, add will be called (concurrently) with the absolute +// paths of the containing source directory and the package directory. +// For each directory that will be scanned, skip will be called (concurrently) +// with the absolute paths of the containing source directory and the directory. +// If skip returns false on a directory it will be processed. +// add will be called concurrently. +// skip will be called concurrently. +func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) { + for _, root := range roots { + walkDir(root, add, skip, opts) + } +} + +// walkDir creates a walker and starts fastwalk with this walker. +func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) { + if _, err := os.Stat(root.Path); os.IsNotExist(err) { + if opts.Debug { + log.Printf("skipping nonexistent directory: %v", root.Path) + } + return + } + start := time.Now() + if opts.Debug { + log.Printf("gopathwalk: scanning %s", root.Path) + } + w := &walker{ + root: root, + add: add, + skip: skip, + opts: opts, + } + w.init() + if err := fastwalk.Walk(root.Path, w.walk); err != nil { + log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err) + } + + if opts.Debug { + log.Printf("gopathwalk: scanned %s in %v", root.Path, time.Since(start)) + } +} + +// walker is the callback for fastwalk.Walk. +type walker struct { + root Root // The source directory to scan. + add func(Root, string) // The callback that will be invoked for every possible Go package dir. + skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true. + opts Options // Options passed to Walk by the user. + + ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files. +} + +// init initializes the walker based on its Options +func (w *walker) init() { + var ignoredPaths []string + if w.root.Type == RootModuleCache { + ignoredPaths = []string{"cache"} + } + if !w.opts.ModulesEnabled && w.root.Type == RootGOPATH { + ignoredPaths = w.getIgnoredDirs(w.root.Path) + ignoredPaths = append(ignoredPaths, "v", "mod") + } + + for _, p := range ignoredPaths { + full := filepath.Join(w.root.Path, p) + if fi, err := os.Stat(full); err == nil { + w.ignoredDirs = append(w.ignoredDirs, fi) + if w.opts.Debug { + log.Printf("Directory added to ignore list: %s", full) + } + } else if w.opts.Debug { + log.Printf("Error statting ignored directory: %v", err) + } + } +} + +// getIgnoredDirs reads an optional config file at /.goimportsignore +// of relative directories to ignore when scanning for go files. +// The provided path is one of the $GOPATH entries with "src" appended. +func (w *walker) getIgnoredDirs(path string) []string { + file := filepath.Join(path, ".goimportsignore") + slurp, err := ioutil.ReadFile(file) + if w.opts.Debug { + if err != nil { + log.Print(err) + } else { + log.Printf("Read %s", file) + } + } + if err != nil { + return nil + } + + var ignoredDirs []string + bs := bufio.NewScanner(bytes.NewReader(slurp)) + for bs.Scan() { + line := strings.TrimSpace(bs.Text()) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + ignoredDirs = append(ignoredDirs, line) + } + return ignoredDirs +} + +// shouldSkipDir reports whether the file should be skipped or not. +func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool { + for _, ignoredDir := range w.ignoredDirs { + if os.SameFile(fi, ignoredDir) { + return true + } + } + if w.skip != nil { + // Check with the user specified callback. + return w.skip(w.root, dir) + } + return false +} + +// walk walks through the given path. +func (w *walker) walk(path string, typ os.FileMode) error { + dir := filepath.Dir(path) + if typ.IsRegular() { + if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { + // Doesn't make sense to have regular files + // directly in your $GOPATH/src or $GOROOT/src. + return fastwalk.SkipFiles + } + if !strings.HasSuffix(path, ".go") { + return nil + } + + w.add(w.root, dir) + return fastwalk.SkipFiles + } + if typ == os.ModeDir { + base := filepath.Base(path) + if base == "" || base[0] == '.' || base[0] == '_' || + base == "testdata" || + (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") || + (!w.opts.ModulesEnabled && base == "node_modules") { + return filepath.SkipDir + } + fi, err := os.Lstat(path) + if err == nil && w.shouldSkipDir(fi, path) { + return filepath.SkipDir + } + return nil + } + if typ == os.ModeSymlink { + base := filepath.Base(path) + if strings.HasPrefix(base, ".#") { + // Emacs noise. + return nil + } + fi, err := os.Lstat(path) + if err != nil { + // Just ignore it. + return nil + } + if w.shouldTraverse(dir, fi) { + return fastwalk.TraverseLink + } + } + return nil +} + +// shouldTraverse reports whether the symlink fi, found in dir, +// should be followed. It makes sure symlinks were never visited +// before to avoid symlink loops. +func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool { + path := filepath.Join(dir, fi.Name()) + target, err := filepath.EvalSymlinks(path) + if err != nil { + return false + } + ts, err := os.Stat(target) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return false + } + if !ts.IsDir() { + return false + } + if w.shouldSkipDir(ts, dir) { + return false + } + // Check for symlink loops by statting each directory component + // and seeing if any are the same file as ts. + for { + parent := filepath.Dir(path) + if parent == path { + // Made it to the root without seeing a cycle. + // Use this symlink. + return true + } + parentInfo, err := os.Stat(parent) + if err != nil { + return false + } + if os.SameFile(ts, parentInfo) { + // Cycle. Don't traverse. + return false + } + path = parent + } + +} diff --git a/vendor/golang.org/x/tools/internal/semver/semver.go b/vendor/golang.org/x/tools/internal/semver/semver.go new file mode 100644 index 000000000..4af7118e5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/semver/semver.go @@ -0,0 +1,388 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semver implements comparison of semantic version strings. +// In this package, semantic version strings must begin with a leading "v", +// as in "v1.0.0". +// +// The general form of a semantic version string accepted by this package is +// +// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] +// +// where square brackets indicate optional parts of the syntax; +// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; +// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers +// using only alphanumeric characters and hyphens; and +// all-numeric PRERELEASE identifiers must not have leading zeros. +// +// This package follows Semantic Versioning 2.0.0 (see semver.org) +// with two exceptions. First, it requires the "v" prefix. Second, it recognizes +// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) +// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. +package semver + +// parsed returns the parsed form of a semantic version string. +type parsed struct { + major string + minor string + patch string + short string + prerelease string + build string + err string +} + +// IsValid reports whether v is a valid semantic version string. +func IsValid(v string) bool { + _, ok := parse(v) + return ok +} + +// Canonical returns the canonical formatting of the semantic version v. +// It fills in any missing .MINOR or .PATCH and discards build metadata. +// Two semantic versions compare equal only if their canonical formattings +// are identical strings. +// The canonical invalid semantic version is the empty string. +func Canonical(v string) string { + p, ok := parse(v) + if !ok { + return "" + } + if p.build != "" { + return v[:len(v)-len(p.build)] + } + if p.short != "" { + return v + p.short + } + return v +} + +// Major returns the major version prefix of the semantic version v. +// For example, Major("v2.1.0") == "v2". +// If v is an invalid semantic version string, Major returns the empty string. +func Major(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return v[:1+len(pv.major)] +} + +// MajorMinor returns the major.minor version prefix of the semantic version v. +// For example, MajorMinor("v2.1.0") == "v2.1". +// If v is an invalid semantic version string, MajorMinor returns the empty string. +func MajorMinor(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + i := 1 + len(pv.major) + if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { + return v[:j] + } + return v[:i] + "." + pv.minor +} + +// Prerelease returns the prerelease suffix of the semantic version v. +// For example, Prerelease("v2.1.0-pre+meta") == "-pre". +// If v is an invalid semantic version string, Prerelease returns the empty string. +func Prerelease(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.prerelease +} + +// Build returns the build suffix of the semantic version v. +// For example, Build("v2.1.0+meta") == "+meta". +// If v is an invalid semantic version string, Build returns the empty string. +func Build(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.build +} + +// Compare returns an integer comparing two versions according to +// according to semantic version precedence. +// The result will be 0 if v == w, -1 if v < w, or +1 if v > w. +// +// An invalid semantic version string is considered less than a valid one. +// All invalid semantic version strings compare equal to each other. +func Compare(v, w string) int { + pv, ok1 := parse(v) + pw, ok2 := parse(w) + if !ok1 && !ok2 { + return 0 + } + if !ok1 { + return -1 + } + if !ok2 { + return +1 + } + if c := compareInt(pv.major, pw.major); c != 0 { + return c + } + if c := compareInt(pv.minor, pw.minor); c != 0 { + return c + } + if c := compareInt(pv.patch, pw.patch); c != 0 { + return c + } + return comparePrerelease(pv.prerelease, pw.prerelease) +} + +// Max canonicalizes its arguments and then returns the version string +// that compares greater. +func Max(v, w string) string { + v = Canonical(v) + w = Canonical(w) + if Compare(v, w) > 0 { + return v + } + return w +} + +func parse(v string) (p parsed, ok bool) { + if v == "" || v[0] != 'v' { + p.err = "missing v prefix" + return + } + p.major, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad major version" + return + } + if v == "" { + p.minor = "0" + p.patch = "0" + p.short = ".0.0" + return + } + if v[0] != '.' { + p.err = "bad minor prefix" + ok = false + return + } + p.minor, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad minor version" + return + } + if v == "" { + p.patch = "0" + p.short = ".0" + return + } + if v[0] != '.' { + p.err = "bad patch prefix" + ok = false + return + } + p.patch, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad patch version" + return + } + if len(v) > 0 && v[0] == '-' { + p.prerelease, v, ok = parsePrerelease(v) + if !ok { + p.err = "bad prerelease" + return + } + } + if len(v) > 0 && v[0] == '+' { + p.build, v, ok = parseBuild(v) + if !ok { + p.err = "bad build" + return + } + } + if v != "" { + p.err = "junk on end" + ok = false + return + } + ok = true + return +} + +func parseInt(v string) (t, rest string, ok bool) { + if v == "" { + return + } + if v[0] < '0' || '9' < v[0] { + return + } + i := 1 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + if v[0] == '0' && i != 1 { + return + } + return v[:i], v[i:], true +} + +func parsePrerelease(v string) (t, rest string, ok bool) { + // "A pre-release version MAY be denoted by appending a hyphen and + // a series of dot separated identifiers immediately following the patch version. + // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. + // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." + if v == "" || v[0] != '-' { + return + } + i := 1 + start := 1 + for i < len(v) && v[i] != '+' { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i || isBadNum(v[start:i]) { + return + } + start = i + 1 + } + i++ + } + if start == i || isBadNum(v[start:i]) { + return + } + return v[:i], v[i:], true +} + +func parseBuild(v string) (t, rest string, ok bool) { + if v == "" || v[0] != '+' { + return + } + i := 1 + start := 1 + for i < len(v) { + if !isIdentChar(v[i]) { + return + } + if v[i] == '.' { + if start == i { + return + } + start = i + 1 + } + i++ + } + if start == i { + return + } + return v[:i], v[i:], true +} + +func isIdentChar(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' +} + +func isBadNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) && i > 1 && v[0] == '0' +} + +func isNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) +} + +func compareInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} + +func comparePrerelease(x, y string) int { + // "When major, minor, and patch are equal, a pre-release version has + // lower precedence than a normal version. + // Example: 1.0.0-alpha < 1.0.0. + // Precedence for two pre-release versions with the same major, minor, + // and patch version MUST be determined by comparing each dot separated + // identifier from left to right until a difference is found as follows: + // identifiers consisting of only digits are compared numerically and + // identifiers with letters or hyphens are compared lexically in ASCII + // sort order. Numeric identifiers always have lower precedence than + // non-numeric identifiers. A larger set of pre-release fields has a + // higher precedence than a smaller set, if all of the preceding + // identifiers are equal. + // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < + // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." + if x == y { + return 0 + } + if x == "" { + return +1 + } + if y == "" { + return -1 + } + for x != "" && y != "" { + x = x[1:] // skip - or . + y = y[1:] // skip - or . + var dx, dy string + dx, x = nextIdent(x) + dy, y = nextIdent(y) + if dx != dy { + ix := isNum(dx) + iy := isNum(dy) + if ix != iy { + if ix { + return -1 + } else { + return +1 + } + } + if ix { + if len(dx) < len(dy) { + return -1 + } + if len(dx) > len(dy) { + return +1 + } + } + if dx < dy { + return -1 + } else { + return +1 + } + } + } + if x == "" { + return -1 + } else { + return +1 + } +} + +func nextIdent(x string) (dx, rest string) { + i := 0 + for i < len(x) && x[i] != '.' { + i++ + } + return x[:i], x[i:] +} diff --git a/vendor/golang.org/x/xerrors/LICENSE b/vendor/golang.org/x/xerrors/LICENSE new file mode 100644 index 000000000..e4a47e17f --- /dev/null +++ b/vendor/golang.org/x/xerrors/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2019 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/xerrors/PATENTS b/vendor/golang.org/x/xerrors/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/xerrors/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/xerrors/README b/vendor/golang.org/x/xerrors/README new file mode 100644 index 000000000..aac7867a5 --- /dev/null +++ b/vendor/golang.org/x/xerrors/README @@ -0,0 +1,2 @@ +This repository holds the transition packages for the new Go 1.13 error values. +See golang.org/design/29934-error-values. diff --git a/vendor/golang.org/x/xerrors/adaptor.go b/vendor/golang.org/x/xerrors/adaptor.go new file mode 100644 index 000000000..4317f2483 --- /dev/null +++ b/vendor/golang.org/x/xerrors/adaptor.go @@ -0,0 +1,193 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strconv" +) + +// FormatError calls the FormatError method of f with an errors.Printer +// configured according to s and verb, and writes the result to s. +func FormatError(f Formatter, s fmt.State, verb rune) { + // Assuming this function is only called from the Format method, and given + // that FormatError takes precedence over Format, it cannot be called from + // any package that supports errors.Formatter. It is therefore safe to + // disregard that State may be a specific printer implementation and use one + // of our choice instead. + + // limitations: does not support printing error as Go struct. + + var ( + sep = " " // separator before next error + p = &state{State: s} + direct = true + ) + + var err error = f + + switch verb { + // Note that this switch must match the preference order + // for ordinary string printing (%#v before %+v, and so on). + + case 'v': + if s.Flag('#') { + if stringer, ok := err.(fmt.GoStringer); ok { + io.WriteString(&p.buf, stringer.GoString()) + goto exit + } + // proceed as if it were %v + } else if s.Flag('+') { + p.printDetail = true + sep = "\n - " + } + case 's': + case 'q', 'x', 'X': + // Use an intermediate buffer in the rare cases that precision, + // truncation, or one of the alternative verbs (q, x, and X) are + // specified. + direct = false + + default: + p.buf.WriteString("%!") + p.buf.WriteRune(verb) + p.buf.WriteByte('(') + switch { + case err != nil: + p.buf.WriteString(reflect.TypeOf(f).String()) + default: + p.buf.WriteString("") + } + p.buf.WriteByte(')') + io.Copy(s, &p.buf) + return + } + +loop: + for { + switch v := err.(type) { + case Formatter: + err = v.FormatError((*printer)(p)) + case fmt.Formatter: + v.Format(p, 'v') + break loop + default: + io.WriteString(&p.buf, v.Error()) + break loop + } + if err == nil { + break + } + if p.needColon || !p.printDetail { + p.buf.WriteByte(':') + p.needColon = false + } + p.buf.WriteString(sep) + p.inDetail = false + p.needNewline = false + } + +exit: + width, okW := s.Width() + prec, okP := s.Precision() + + if !direct || (okW && width > 0) || okP { + // Construct format string from State s. + format := []byte{'%'} + if s.Flag('-') { + format = append(format, '-') + } + if s.Flag('+') { + format = append(format, '+') + } + if s.Flag(' ') { + format = append(format, ' ') + } + if okW { + format = strconv.AppendInt(format, int64(width), 10) + } + if okP { + format = append(format, '.') + format = strconv.AppendInt(format, int64(prec), 10) + } + format = append(format, string(verb)...) + fmt.Fprintf(s, string(format), p.buf.String()) + } else { + io.Copy(s, &p.buf) + } +} + +var detailSep = []byte("\n ") + +// state tracks error printing state. It implements fmt.State. +type state struct { + fmt.State + buf bytes.Buffer + + printDetail bool + inDetail bool + needColon bool + needNewline bool +} + +func (s *state) Write(b []byte) (n int, err error) { + if s.printDetail { + if len(b) == 0 { + return 0, nil + } + if s.inDetail && s.needColon { + s.needNewline = true + if b[0] == '\n' { + b = b[1:] + } + } + k := 0 + for i, c := range b { + if s.needNewline { + if s.inDetail && s.needColon { + s.buf.WriteByte(':') + s.needColon = false + } + s.buf.Write(detailSep) + s.needNewline = false + } + if c == '\n' { + s.buf.Write(b[k:i]) + k = i + 1 + s.needNewline = true + } + } + s.buf.Write(b[k:]) + if !s.inDetail { + s.needColon = true + } + } else if !s.inDetail { + s.buf.Write(b) + } + return len(b), nil +} + +// printer wraps a state to implement an xerrors.Printer. +type printer state + +func (s *printer) Print(args ...interface{}) { + if !s.inDetail || s.printDetail { + fmt.Fprint((*state)(s), args...) + } +} + +func (s *printer) Printf(format string, args ...interface{}) { + if !s.inDetail || s.printDetail { + fmt.Fprintf((*state)(s), format, args...) + } +} + +func (s *printer) Detail() bool { + s.inDetail = true + return s.printDetail +} diff --git a/vendor/golang.org/x/xerrors/codereview.cfg b/vendor/golang.org/x/xerrors/codereview.cfg new file mode 100644 index 000000000..3f8b14b64 --- /dev/null +++ b/vendor/golang.org/x/xerrors/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/xerrors/doc.go b/vendor/golang.org/x/xerrors/doc.go new file mode 100644 index 000000000..eef99d9d5 --- /dev/null +++ b/vendor/golang.org/x/xerrors/doc.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xerrors implements functions to manipulate errors. +// +// This package is based on the Go 2 proposal for error values: +// https://golang.org/design/29934-error-values +// +// These functions were incorporated into the standard library's errors package +// in Go 1.13: +// - Is +// - As +// - Unwrap +// +// Also, Errorf's %w verb was incorporated into fmt.Errorf. +// +// Use this package to get equivalent behavior in all supported Go versions. +// +// No other features of this package were included in Go 1.13, and at present +// there are no plans to include any of them. +package xerrors // import "golang.org/x/xerrors" diff --git a/vendor/golang.org/x/xerrors/errors.go b/vendor/golang.org/x/xerrors/errors.go new file mode 100644 index 000000000..e88d3772d --- /dev/null +++ b/vendor/golang.org/x/xerrors/errors.go @@ -0,0 +1,33 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import "fmt" + +// errorString is a trivial implementation of error. +type errorString struct { + s string + frame Frame +} + +// New returns an error that formats as the given text. +// +// The returned error contains a Frame set to the caller's location and +// implements Formatter to show this information when printed with details. +func New(text string) error { + return &errorString{text, Caller(1)} +} + +func (e *errorString) Error() string { + return e.s +} + +func (e *errorString) Format(s fmt.State, v rune) { FormatError(e, s, v) } + +func (e *errorString) FormatError(p Printer) (next error) { + p.Print(e.s) + e.frame.Format(p) + return nil +} diff --git a/vendor/golang.org/x/xerrors/fmt.go b/vendor/golang.org/x/xerrors/fmt.go new file mode 100644 index 000000000..829862ddf --- /dev/null +++ b/vendor/golang.org/x/xerrors/fmt.go @@ -0,0 +1,187 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/xerrors/internal" +) + +const percentBangString = "%!" + +// Errorf formats according to a format specifier and returns the string as a +// value that satisfies error. +// +// The returned error includes the file and line number of the caller when +// formatted with additional detail enabled. If the last argument is an error +// the returned error's Format method will return it if the format string ends +// with ": %s", ": %v", or ": %w". If the last argument is an error and the +// format string ends with ": %w", the returned error implements an Unwrap +// method returning it. +// +// If the format specifier includes a %w verb with an error operand in a +// position other than at the end, the returned error will still implement an +// Unwrap method returning the operand, but the error's Format method will not +// return the wrapped error. +// +// It is invalid to include more than one %w verb or to supply it with an +// operand that does not implement the error interface. The %w verb is otherwise +// a synonym for %v. +func Errorf(format string, a ...interface{}) error { + format = formatPlusW(format) + // Support a ": %[wsv]" suffix, which works well with xerrors.Formatter. + wrap := strings.HasSuffix(format, ": %w") + idx, format2, ok := parsePercentW(format) + percentWElsewhere := !wrap && idx >= 0 + if !percentWElsewhere && (wrap || strings.HasSuffix(format, ": %s") || strings.HasSuffix(format, ": %v")) { + err := errorAt(a, len(a)-1) + if err == nil { + return &noWrapError{fmt.Sprintf(format, a...), nil, Caller(1)} + } + // TODO: this is not entirely correct. The error value could be + // printed elsewhere in format if it mixes numbered with unnumbered + // substitutions. With relatively small changes to doPrintf we can + // have it optionally ignore extra arguments and pass the argument + // list in its entirety. + msg := fmt.Sprintf(format[:len(format)-len(": %s")], a[:len(a)-1]...) + frame := Frame{} + if internal.EnableTrace { + frame = Caller(1) + } + if wrap { + return &wrapError{msg, err, frame} + } + return &noWrapError{msg, err, frame} + } + // Support %w anywhere. + // TODO: don't repeat the wrapped error's message when %w occurs in the middle. + msg := fmt.Sprintf(format2, a...) + if idx < 0 { + return &noWrapError{msg, nil, Caller(1)} + } + err := errorAt(a, idx) + if !ok || err == nil { + // Too many %ws or argument of %w is not an error. Approximate the Go + // 1.13 fmt.Errorf message. + return &noWrapError{fmt.Sprintf("%sw(%s)", percentBangString, msg), nil, Caller(1)} + } + frame := Frame{} + if internal.EnableTrace { + frame = Caller(1) + } + return &wrapError{msg, err, frame} +} + +func errorAt(args []interface{}, i int) error { + if i < 0 || i >= len(args) { + return nil + } + err, ok := args[i].(error) + if !ok { + return nil + } + return err +} + +// formatPlusW is used to avoid the vet check that will barf at %w. +func formatPlusW(s string) string { + return s +} + +// Return the index of the only %w in format, or -1 if none. +// Also return a rewritten format string with %w replaced by %v, and +// false if there is more than one %w. +// TODO: handle "%[N]w". +func parsePercentW(format string) (idx int, newFormat string, ok bool) { + // Loosely copied from golang.org/x/tools/go/analysis/passes/printf/printf.go. + idx = -1 + ok = true + n := 0 + sz := 0 + var isW bool + for i := 0; i < len(format); i += sz { + if format[i] != '%' { + sz = 1 + continue + } + // "%%" is not a format directive. + if i+1 < len(format) && format[i+1] == '%' { + sz = 2 + continue + } + sz, isW = parsePrintfVerb(format[i:]) + if isW { + if idx >= 0 { + ok = false + } else { + idx = n + } + // "Replace" the last character, the 'w', with a 'v'. + p := i + sz - 1 + format = format[:p] + "v" + format[p+1:] + } + n++ + } + return idx, format, ok +} + +// Parse the printf verb starting with a % at s[0]. +// Return how many bytes it occupies and whether the verb is 'w'. +func parsePrintfVerb(s string) (int, bool) { + // Assume only that the directive is a sequence of non-letters followed by a single letter. + sz := 0 + var r rune + for i := 1; i < len(s); i += sz { + r, sz = utf8.DecodeRuneInString(s[i:]) + if unicode.IsLetter(r) { + return i + sz, r == 'w' + } + } + return len(s), false +} + +type noWrapError struct { + msg string + err error + frame Frame +} + +func (e *noWrapError) Error() string { + return fmt.Sprint(e) +} + +func (e *noWrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) } + +func (e *noWrapError) FormatError(p Printer) (next error) { + p.Print(e.msg) + e.frame.Format(p) + return e.err +} + +type wrapError struct { + msg string + err error + frame Frame +} + +func (e *wrapError) Error() string { + return fmt.Sprint(e) +} + +func (e *wrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) } + +func (e *wrapError) FormatError(p Printer) (next error) { + p.Print(e.msg) + e.frame.Format(p) + return e.err +} + +func (e *wrapError) Unwrap() error { + return e.err +} diff --git a/vendor/golang.org/x/xerrors/format.go b/vendor/golang.org/x/xerrors/format.go new file mode 100644 index 000000000..1bc9c26b9 --- /dev/null +++ b/vendor/golang.org/x/xerrors/format.go @@ -0,0 +1,34 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +// A Formatter formats error messages. +type Formatter interface { + error + + // FormatError prints the receiver's first error and returns the next error in + // the error chain, if any. + FormatError(p Printer) (next error) +} + +// A Printer formats error messages. +// +// The most common implementation of Printer is the one provided by package fmt +// during Printf (as of Go 1.13). Localization packages such as golang.org/x/text/message +// typically provide their own implementations. +type Printer interface { + // Print appends args to the message output. + Print(args ...interface{}) + + // Printf writes a formatted string. + Printf(format string, args ...interface{}) + + // Detail reports whether error detail is requested. + // After the first call to Detail, all text written to the Printer + // is formatted as additional detail, or ignored when + // detail has not been requested. + // If Detail returns false, the caller can avoid printing the detail at all. + Detail() bool +} diff --git a/vendor/golang.org/x/xerrors/frame.go b/vendor/golang.org/x/xerrors/frame.go new file mode 100644 index 000000000..0de628ec5 --- /dev/null +++ b/vendor/golang.org/x/xerrors/frame.go @@ -0,0 +1,56 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import ( + "runtime" +) + +// A Frame contains part of a call stack. +type Frame struct { + // Make room for three PCs: the one we were asked for, what it called, + // and possibly a PC for skipPleaseUseCallersFrames. See: + // https://go.googlesource.com/go/+/032678e0fb/src/runtime/extern.go#169 + frames [3]uintptr +} + +// Caller returns a Frame that describes a frame on the caller's stack. +// The argument skip is the number of frames to skip over. +// Caller(0) returns the frame for the caller of Caller. +func Caller(skip int) Frame { + var s Frame + runtime.Callers(skip+1, s.frames[:]) + return s +} + +// location reports the file, line, and function of a frame. +// +// The returned function may be "" even if file and line are not. +func (f Frame) location() (function, file string, line int) { + frames := runtime.CallersFrames(f.frames[:]) + if _, ok := frames.Next(); !ok { + return "", "", 0 + } + fr, ok := frames.Next() + if !ok { + return "", "", 0 + } + return fr.Function, fr.File, fr.Line +} + +// Format prints the stack as error detail. +// It should be called from an error's Format implementation +// after printing any other error detail. +func (f Frame) Format(p Printer) { + if p.Detail() { + function, file, line := f.location() + if function != "" { + p.Printf("%s\n ", function) + } + if file != "" { + p.Printf("%s:%d\n", file, line) + } + } +} diff --git a/vendor/golang.org/x/xerrors/go.mod b/vendor/golang.org/x/xerrors/go.mod new file mode 100644 index 000000000..870d4f612 --- /dev/null +++ b/vendor/golang.org/x/xerrors/go.mod @@ -0,0 +1,3 @@ +module golang.org/x/xerrors + +go 1.11 diff --git a/vendor/golang.org/x/xerrors/internal/internal.go b/vendor/golang.org/x/xerrors/internal/internal.go new file mode 100644 index 000000000..89f4eca5d --- /dev/null +++ b/vendor/golang.org/x/xerrors/internal/internal.go @@ -0,0 +1,8 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +// EnableTrace indicates whether stack information should be recorded in errors. +var EnableTrace = true diff --git a/vendor/golang.org/x/xerrors/wrap.go b/vendor/golang.org/x/xerrors/wrap.go new file mode 100644 index 000000000..9a3b51037 --- /dev/null +++ b/vendor/golang.org/x/xerrors/wrap.go @@ -0,0 +1,106 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import ( + "reflect" +) + +// A Wrapper provides context around another error. +type Wrapper interface { + // Unwrap returns the next error in the error chain. + // If there is no next error, Unwrap returns nil. + Unwrap() error +} + +// Opaque returns an error with the same error formatting as err +// but that does not match err and cannot be unwrapped. +func Opaque(err error) error { + return noWrapper{err} +} + +type noWrapper struct { + error +} + +func (e noWrapper) FormatError(p Printer) (next error) { + if f, ok := e.error.(Formatter); ok { + return f.FormatError(p) + } + p.Print(e.error) + return nil +} + +// Unwrap returns the result of calling the Unwrap method on err, if err implements +// Unwrap. Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + u, ok := err.(Wrapper) + if !ok { + return nil + } + return u.Unwrap() +} + +// Is reports whether any error in err's chain matches target. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +func Is(err, target error) bool { + if target == nil { + return err == target + } + + isComparable := reflect.TypeOf(target).Comparable() + for { + if isComparable && err == target { + return true + } + if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { + return true + } + // TODO: consider supporing target.Is(err). This would allow + // user-definable predicates, but also may allow for coping with sloppy + // APIs, thereby making it easier to get away with them. + if err = Unwrap(err); err == nil { + return false + } + } +} + +// As finds the first error in err's chain that matches the type to which target +// points, and if so, sets the target to its value and returns true. An error +// matches a type if it is assignable to the target type, or if it has a method +// As(interface{}) bool such that As(target) returns true. As will panic if target +// is not a non-nil pointer to a type which implements error or is of interface type. +// +// The As method should set the target to its value and return true if err +// matches the type to which target points. +func As(err error, target interface{}) bool { + if target == nil { + panic("errors: target cannot be nil") + } + val := reflect.ValueOf(target) + typ := val.Type() + if typ.Kind() != reflect.Ptr || val.IsNil() { + panic("errors: target must be a non-nil pointer") + } + if e := typ.Elem(); e.Kind() != reflect.Interface && !e.Implements(errorType) { + panic("errors: *target must be interface or implement error") + } + targetType := typ.Elem() + for err != nil { + if reflect.TypeOf(err).AssignableTo(targetType) { + val.Elem().Set(reflect.ValueOf(err)) + return true + } + if x, ok := err.(interface{ As(interface{}) bool }); ok && x.As(target) { + return true + } + err = Unwrap(err) + } + return false +} + +var errorType = reflect.TypeOf((*error)(nil)).Elem() diff --git a/vendor/github.com/appscode/jsonpatch/LICENSE b/vendor/gomodules.xyz/jsonpatch/v2/LICENSE similarity index 100% rename from vendor/github.com/appscode/jsonpatch/LICENSE rename to vendor/gomodules.xyz/jsonpatch/v2/LICENSE diff --git a/vendor/gomodules.xyz/jsonpatch/v2/go.mod b/vendor/gomodules.xyz/jsonpatch/v2/go.mod new file mode 100644 index 000000000..b5eaf830e --- /dev/null +++ b/vendor/gomodules.xyz/jsonpatch/v2/go.mod @@ -0,0 +1,9 @@ +module gomodules.xyz/jsonpatch/v2 + +go 1.12 + +require ( + github.com/evanphx/json-patch v4.5.0+incompatible + github.com/pkg/errors v0.8.1 // indirect + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/gomodules.xyz/jsonpatch/v2/go.sum b/vendor/gomodules.xyz/jsonpatch/v2/go.sum new file mode 100644 index 000000000..d8f9ffe1c --- /dev/null +++ b/vendor/gomodules.xyz/jsonpatch/v2/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= +github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/appscode/jsonpatch/jsonpatch.go b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go similarity index 97% rename from vendor/github.com/appscode/jsonpatch/jsonpatch.go rename to vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go index 3e698949e..e7cb7d6da 100644 --- a/vendor/github.com/appscode/jsonpatch/jsonpatch.go +++ b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go @@ -58,8 +58,8 @@ func NewPatch(operation, path string, value interface{}) Operation { // // An error will be returned if any of the two documents are invalid. func CreatePatch(a, b []byte) ([]Operation, error) { - aI := map[string]interface{}{} - bI := map[string]interface{}{} + var aI interface{} + var bI interface{} err := json.Unmarshal(a, &aI) if err != nil { return nil, errBadJSONDoc @@ -68,7 +68,7 @@ func CreatePatch(a, b []byte) ([]Operation, error) { if err != nil { return nil, errBadJSONDoc } - return diff(aI, bI, "", []Operation{}) + return handleValues(aI, bI, "", []Operation{}) } // Returns true if the values matches (must be json types) @@ -326,7 +326,7 @@ func backtrace(s, t []interface{}, p string, i int, j int, matrix [][]int) []Ope return append([]Operation{op}, backtrace(s, t, p, i-1, j-1, matrix)...) } - p2, _ := handleValues(s[j-1], t[j-1], makePath(p, i-1), []Operation{}) + p2, _ := handleValues(s[i-1], t[j-1], makePath(p, i-1), []Operation{}) return append(p2, backtrace(s, t, p, i-1, j-1, matrix)...) } if i > 0 && j > 0 && matrix[i-1][j-1] == matrix[i][j] { diff --git a/vendor/google.golang.org/appengine/go.mod b/vendor/google.golang.org/appengine/go.mod index 451592798..635c34f5a 100644 --- a/vendor/google.golang.org/appengine/go.mod +++ b/vendor/google.golang.org/appengine/go.mod @@ -1,10 +1,9 @@ module google.golang.org/appengine +go 1.11 + require ( github.com/golang/protobuf v1.3.1 - golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 // indirect golang.org/x/net v0.0.0-20190603091049-60506f45cf65 - golang.org/x/sys v0.0.0-20190606165138-5da285871e9c // indirect golang.org/x/text v0.3.2 - golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b // indirect ) diff --git a/vendor/google.golang.org/appengine/go.sum b/vendor/google.golang.org/appengine/go.sum index cb3232556..ce22f6856 100644 --- a/vendor/google.golang.org/appengine/go.sum +++ b/vendor/google.golang.org/appengine/go.sum @@ -1,22 +1,11 @@ -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225 h1:kNX+jCowfMYzvlSvJu5pQWEmyWFrBXJ3PBy10xKMXK8= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0Ola2caSKcY69NUBZrRQ= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go index 3b94cf0c6..fe429720e 100644 --- a/vendor/google.golang.org/appengine/internal/net.go +++ b/vendor/google.golang.org/appengine/internal/net.go @@ -32,7 +32,7 @@ func limitDial(network, addr string) (net.Conn, error) { // Dial with a timeout in case the API host is MIA. // The connection should normally be very fast. - conn, err := net.DialTimeout(network, addr, 500*time.Millisecond) + conn, err := net.DialTimeout(network, addr, 10*time.Second) if err != nil { limitRelease() return nil, err diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml index 9f556934d..055480b9e 100644 --- a/vendor/gopkg.in/yaml.v2/.travis.yml +++ b/vendor/gopkg.in/yaml.v2/.travis.yml @@ -1,12 +1,16 @@ language: go go: - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - 1.8 - - 1.9 - - tip + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "tip" go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go index e4e56e28e..129bc2a97 100644 --- a/vendor/gopkg.in/yaml.v2/decode.go +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -229,6 +229,10 @@ type decoder struct { mapType reflect.Type terrors []string strict bool + + decodeCount int + aliasCount int + aliasDepth int } var ( @@ -314,7 +318,43 @@ func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unm return out, false, false } +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } switch n.kind { case documentNode: return d.document(n, out) @@ -353,7 +393,9 @@ func (d *decoder) alias(n *node, out reflect.Value) (good bool) { failf("anchor '%s' value contains itself", n.value) } d.aliases[n] = true + d.aliasDepth++ good = d.unmarshal(n.alias, out) + d.aliasDepth-- delete(d.aliases, n) return good } @@ -746,8 +788,7 @@ func (d *decoder) merge(n *node, out reflect.Value) { case mappingNode: d.unmarshal(n, out) case aliasNode: - an, ok := d.doc.anchors[n.value] - if ok && an.kind != mappingNode { + if n.alias != nil && n.alias.kind != mappingNode { failWantMap() } d.unmarshal(n, out) @@ -756,8 +797,7 @@ func (d *decoder) merge(n *node, out reflect.Value) { for i := len(n.children) - 1; i >= 0; i-- { ni := n.children[i] if ni.kind == aliasNode { - an, ok := d.doc.anchors[ni.value] - if ok && an.kind != mappingNode { + if ni.alias != nil && ni.alias.kind != mappingNode { failWantMap() } } else if ni.kind != mappingNode { diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go index 6c151db6f..4120e0c91 100644 --- a/vendor/gopkg.in/yaml.v2/resolve.go +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -81,7 +81,7 @@ func resolvableTag(tag string) bool { return false } -var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) func resolve(tag string, in string) (rtag string, out interface{}) { if !resolvableTag(tag) { diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go index 077fd1dd2..b33bdbaec 100644 --- a/vendor/gopkg.in/yaml.v2/scannerc.go +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -634,13 +634,14 @@ func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { need_more_tokens = true } else { // Check if any potential simple key may occupy the head position. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - for i := range parser.simple_keys { + for i := len(parser.simple_keys) - 1; i >= 0; i-- { simple_key := &parser.simple_keys[i] - if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + if simple_key.token_number < parser.tokens_parsed { + break + } + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + } else if valid && simple_key.token_number == parser.tokens_parsed { need_more_tokens = true break } @@ -678,11 +679,6 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { return false } - // Remove obsolete potential simple keys. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - // Check the indentation level against the current column. if !yaml_parser_unroll_indent(parser, parser.mark.column) { return false @@ -837,29 +833,30 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { "found character that cannot start any token") } -// Check the list of potential simple keys and remove the positions that -// cannot contain simple keys anymore. -func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { - // Check for a potential simple key for each flow level. - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - - // The specification requires that a simple key - // - // - is limited to a single line, - // - is shorter than 1024 characters. - if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { - - // Check if the potential simple key to be removed is required. - if simple_key.required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") } + simple_key.possible = false + return false, true } - return true + return true, true } // Check if a simple key may start at the current position and add it if @@ -879,8 +876,8 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { possible: true, required: required, token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, } - simple_key.mark = parser.mark if !yaml_parser_remove_simple_key(parser) { return false @@ -906,13 +903,26 @@ func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { return true } +// max_flow_level limits the flow_level +const max_flow_level = 10000 + // Increase the flow level and resize the simple key list if needed. func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) // Increase the flow level. parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } return true } @@ -925,6 +935,9 @@ func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { return true } +// max_indents limits the indents stack size +const max_indents = 10000 + // Push the current indentation level to the stack and set the new level // the current column is greater than the indentation level. In this case, // append or insert the specified token into the token queue. @@ -939,6 +952,11 @@ func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml // indentation level. parser.indents = append(parser.indents, parser.indent) parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } // Create a token and insert it into the queue. token := yaml_token_t{ @@ -1270,7 +1288,11 @@ func yaml_parser_fetch_value(parser *yaml_parser_t) bool { simple_key := &parser.simple_keys[len(parser.simple_keys)-1] // Have we found a simple key? - if simple_key.possible { + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + // Create the KEY token and insert it into the queue. token := yaml_token_t{ typ: yaml_KEY_TOKEN, diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go index de85aa4cd..89650e293 100644 --- a/vendor/gopkg.in/yaml.v2/yaml.go +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -89,7 +89,7 @@ func UnmarshalStrict(in []byte, out interface{}) (err error) { return unmarshal(in, out, true) } -// A Decorder reads and decodes YAML values from an input stream. +// A Decoder reads and decodes YAML values from an input stream. type Decoder struct { strict bool parser *parser diff --git a/vendor/honnef.co/go/tools/LICENSE b/vendor/honnef.co/go/tools/LICENSE new file mode 100644 index 000000000..dfd031454 --- /dev/null +++ b/vendor/honnef.co/go/tools/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2016 Dominik Honnef + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY b/vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY new file mode 100644 index 000000000..7c241b71a --- /dev/null +++ b/vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY @@ -0,0 +1,226 @@ +Staticcheck and its related tools make use of third party projects, +either by reusing their code, or by statically linking them into +resulting binaries. These projects are: + +* The Go Programming Language - https://golang.org/ + + Copyright (c) 2009 The Go Authors. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +* github.com/BurntSushi/toml - https://github.com/BurntSushi/toml + + The MIT License (MIT) + + Copyright (c) 2013 TOML authors + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + +* github.com/google/renameio - https://github.com/google/renameio + + Copyright 2018 Google Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +* github.com/kisielk/gotool – https://github.com/kisielk/gotool + + Copyright (c) 2013 Kamil Kisiel + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + All the files in this distribution are covered under either the MIT + license (see the file LICENSE) except some files mentioned below. + + match.go, match_test.go: + + Copyright (c) 2009 The Go Authors. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +* github.com/rogpeppe/go-internal - https://github.com/rogpeppe/go-internal + + Copyright (c) 2018 The Go Authors. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +* golang.org/x/mod/module - https://github.com/golang/mod + + Copyright (c) 2009 The Go Authors. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +* golang.org/x/tools/go/analysis - https://github.com/golang/tools + + Copyright (c) 2009 The Go Authors. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/honnef.co/go/tools/arg/arg.go b/vendor/honnef.co/go/tools/arg/arg.go new file mode 100644 index 000000000..1e7f30db4 --- /dev/null +++ b/vendor/honnef.co/go/tools/arg/arg.go @@ -0,0 +1,48 @@ +package arg + +var args = map[string]int{ + "(*encoding/json.Decoder).Decode.v": 0, + "(*encoding/json.Encoder).Encode.v": 0, + "(*encoding/xml.Decoder).Decode.v": 0, + "(*encoding/xml.Encoder).Encode.v": 0, + "(*sync.Pool).Put.x": 0, + "(*text/template.Template).Parse.text": 0, + "(io.Seeker).Seek.offset": 0, + "(time.Time).Sub.u": 0, + "append.elems": 1, + "append.slice": 0, + "bytes.Equal.a": 0, + "bytes.Equal.b": 1, + "encoding/binary.Write.data": 2, + "errors.New.text": 0, + "fmt.Fprintf.format": 1, + "fmt.Printf.format": 0, + "fmt.Sprintf.a[0]": 1, + "fmt.Sprintf.format": 0, + "json.Marshal.v": 0, + "json.Unmarshal.v": 1, + "len.v": 0, + "make.size[0]": 1, + "make.size[1]": 2, + "make.t": 0, + "net/url.Parse.rawurl": 0, + "os.OpenFile.flag": 1, + "os/exec.Command.name": 0, + "os/signal.Notify.c": 0, + "regexp.Compile.expr": 0, + "runtime.SetFinalizer.finalizer": 1, + "runtime.SetFinalizer.obj": 0, + "sort.Sort.data": 0, + "time.Parse.layout": 0, + "time.Sleep.d": 0, + "xml.Marshal.v": 0, + "xml.Unmarshal.v": 1, +} + +func Arg(name string) int { + n, ok := args[name] + if !ok { + panic("unknown argument " + name) + } + return n +} diff --git a/vendor/honnef.co/go/tools/cmd/staticcheck/README.md b/vendor/honnef.co/go/tools/cmd/staticcheck/README.md new file mode 100644 index 000000000..4d14577fd --- /dev/null +++ b/vendor/honnef.co/go/tools/cmd/staticcheck/README.md @@ -0,0 +1,15 @@ +# staticcheck + +_staticcheck_ offers extensive analysis of Go code, covering a myriad +of categories. It will detect bugs, suggest code simplifications, +point out dead code, and more. + +## Installation + +See [the main README](https://github.com/dominikh/go-tools#installation) for installation instructions. + +## Documentation + +Detailed documentation can be found on +[staticcheck.io](https://staticcheck.io/docs/). + diff --git a/vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go b/vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go new file mode 100644 index 000000000..4f504dc39 --- /dev/null +++ b/vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go @@ -0,0 +1,44 @@ +// staticcheck analyses Go code and makes it better. +package main // import "honnef.co/go/tools/cmd/staticcheck" + +import ( + "log" + "os" + + "golang.org/x/tools/go/analysis" + "honnef.co/go/tools/lint" + "honnef.co/go/tools/lint/lintutil" + "honnef.co/go/tools/simple" + "honnef.co/go/tools/staticcheck" + "honnef.co/go/tools/stylecheck" + "honnef.co/go/tools/unused" +) + +func main() { + fs := lintutil.FlagSet("staticcheck") + wholeProgram := fs.Bool("unused.whole-program", false, "Run unused in whole program mode") + debug := fs.String("debug.unused-graph", "", "Write unused's object graph to `file`") + fs.Parse(os.Args[1:]) + + var cs []*analysis.Analyzer + for _, v := range simple.Analyzers { + cs = append(cs, v) + } + for _, v := range staticcheck.Analyzers { + cs = append(cs, v) + } + for _, v := range stylecheck.Analyzers { + cs = append(cs, v) + } + + u := unused.NewChecker(*wholeProgram) + if *debug != "" { + f, err := os.OpenFile(*debug, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + log.Fatal(err) + } + u.Debug = f + } + cums := []lint.CumulativeChecker{u} + lintutil.ProcessFlagSet(cs, cums, fs) +} diff --git a/vendor/honnef.co/go/tools/config/config.go b/vendor/honnef.co/go/tools/config/config.go new file mode 100644 index 000000000..c22093a6d --- /dev/null +++ b/vendor/honnef.co/go/tools/config/config.go @@ -0,0 +1,224 @@ +package config + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "reflect" + "strings" + + "github.com/BurntSushi/toml" + "golang.org/x/tools/go/analysis" +) + +var Analyzer = &analysis.Analyzer{ + Name: "config", + Doc: "loads configuration for the current package tree", + Run: func(pass *analysis.Pass) (interface{}, error) { + if len(pass.Files) == 0 { + cfg := DefaultConfig + return &cfg, nil + } + cache, err := os.UserCacheDir() + if err != nil { + cache = "" + } + var path string + for _, f := range pass.Files { + p := pass.Fset.PositionFor(f.Pos(), true).Filename + // FIXME(dh): using strings.HasPrefix isn't technically + // correct, but it should be good enough for now. + if cache != "" && strings.HasPrefix(p, cache) { + // File in the build cache of the standard Go build system + continue + } + path = p + break + } + + if path == "" { + // The package only consists of generated files. + cfg := DefaultConfig + return &cfg, nil + } + + dir := filepath.Dir(path) + cfg, err := Load(dir) + if err != nil { + return nil, fmt.Errorf("error loading staticcheck.conf: %s", err) + } + return &cfg, nil + }, + RunDespiteErrors: true, + ResultType: reflect.TypeOf((*Config)(nil)), +} + +func For(pass *analysis.Pass) *Config { + return pass.ResultOf[Analyzer].(*Config) +} + +func mergeLists(a, b []string) []string { + out := make([]string, 0, len(a)+len(b)) + for _, el := range b { + if el == "inherit" { + out = append(out, a...) + } else { + out = append(out, el) + } + } + + return out +} + +func normalizeList(list []string) []string { + if len(list) > 1 { + nlist := make([]string, 0, len(list)) + nlist = append(nlist, list[0]) + for i, el := range list[1:] { + if el != list[i] { + nlist = append(nlist, el) + } + } + list = nlist + } + + for _, el := range list { + if el == "inherit" { + // This should never happen, because the default config + // should not use "inherit" + panic(`unresolved "inherit"`) + } + } + + return list +} + +func (cfg Config) Merge(ocfg Config) Config { + if ocfg.Checks != nil { + cfg.Checks = mergeLists(cfg.Checks, ocfg.Checks) + } + if ocfg.Initialisms != nil { + cfg.Initialisms = mergeLists(cfg.Initialisms, ocfg.Initialisms) + } + if ocfg.DotImportWhitelist != nil { + cfg.DotImportWhitelist = mergeLists(cfg.DotImportWhitelist, ocfg.DotImportWhitelist) + } + if ocfg.HTTPStatusCodeWhitelist != nil { + cfg.HTTPStatusCodeWhitelist = mergeLists(cfg.HTTPStatusCodeWhitelist, ocfg.HTTPStatusCodeWhitelist) + } + return cfg +} + +type Config struct { + // TODO(dh): this implementation makes it impossible for external + // clients to add their own checkers with configuration. At the + // moment, we don't really care about that; we don't encourage + // that people use this package. In the future, we may. The + // obvious solution would be using map[string]interface{}, but + // that's obviously subpar. + + Checks []string `toml:"checks"` + Initialisms []string `toml:"initialisms"` + DotImportWhitelist []string `toml:"dot_import_whitelist"` + HTTPStatusCodeWhitelist []string `toml:"http_status_code_whitelist"` +} + +func (c Config) String() string { + buf := &bytes.Buffer{} + + fmt.Fprintf(buf, "Checks: %#v\n", c.Checks) + fmt.Fprintf(buf, "Initialisms: %#v\n", c.Initialisms) + fmt.Fprintf(buf, "DotImportWhitelist: %#v\n", c.DotImportWhitelist) + fmt.Fprintf(buf, "HTTPStatusCodeWhitelist: %#v", c.HTTPStatusCodeWhitelist) + + return buf.String() +} + +var DefaultConfig = Config{ + Checks: []string{"all", "-ST1000", "-ST1003", "-ST1016"}, + Initialisms: []string{ + "ACL", "API", "ASCII", "CPU", "CSS", "DNS", + "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", + "IP", "JSON", "QPS", "RAM", "RPC", "SLA", + "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", + "UDP", "UI", "GID", "UID", "UUID", "URI", + "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", + "XSS", "SIP", "RTP", + }, + DotImportWhitelist: []string{}, + HTTPStatusCodeWhitelist: []string{"200", "400", "404", "500"}, +} + +const configName = "staticcheck.conf" + +func parseConfigs(dir string) ([]Config, error) { + var out []Config + + // TODO(dh): consider stopping at the GOPATH/module boundary + for dir != "" { + f, err := os.Open(filepath.Join(dir, configName)) + if os.IsNotExist(err) { + ndir := filepath.Dir(dir) + if ndir == dir { + break + } + dir = ndir + continue + } + if err != nil { + return nil, err + } + var cfg Config + _, err = toml.DecodeReader(f, &cfg) + f.Close() + if err != nil { + return nil, err + } + out = append(out, cfg) + ndir := filepath.Dir(dir) + if ndir == dir { + break + } + dir = ndir + } + out = append(out, DefaultConfig) + if len(out) < 2 { + return out, nil + } + for i := 0; i < len(out)/2; i++ { + out[i], out[len(out)-1-i] = out[len(out)-1-i], out[i] + } + return out, nil +} + +func mergeConfigs(confs []Config) Config { + if len(confs) == 0 { + // This shouldn't happen because we always have at least a + // default config. + panic("trying to merge zero configs") + } + if len(confs) == 1 { + return confs[0] + } + conf := confs[0] + for _, oconf := range confs[1:] { + conf = conf.Merge(oconf) + } + return conf +} + +func Load(dir string) (Config, error) { + confs, err := parseConfigs(dir) + if err != nil { + return Config{}, err + } + conf := mergeConfigs(confs) + + conf.Checks = normalizeList(conf.Checks) + conf.Initialisms = normalizeList(conf.Initialisms) + conf.DotImportWhitelist = normalizeList(conf.DotImportWhitelist) + conf.HTTPStatusCodeWhitelist = normalizeList(conf.HTTPStatusCodeWhitelist) + + return conf, nil +} diff --git a/vendor/honnef.co/go/tools/config/example.conf b/vendor/honnef.co/go/tools/config/example.conf new file mode 100644 index 000000000..a715a24d4 --- /dev/null +++ b/vendor/honnef.co/go/tools/config/example.conf @@ -0,0 +1,10 @@ +checks = ["all", "-ST1003", "-ST1014"] +initialisms = ["ACL", "API", "ASCII", "CPU", "CSS", "DNS", + "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", + "IP", "JSON", "QPS", "RAM", "RPC", "SLA", + "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", + "UDP", "UI", "GID", "UID", "UUID", "URI", + "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", + "XSS", "SIP", "RTP"] +dot_import_whitelist = [] +http_status_code_whitelist = ["200", "400", "404", "500"] diff --git a/vendor/honnef.co/go/tools/deprecated/stdlib.go b/vendor/honnef.co/go/tools/deprecated/stdlib.go new file mode 100644 index 000000000..5d8ce186b --- /dev/null +++ b/vendor/honnef.co/go/tools/deprecated/stdlib.go @@ -0,0 +1,112 @@ +package deprecated + +type Deprecation struct { + DeprecatedSince int + AlternativeAvailableSince int +} + +var Stdlib = map[string]Deprecation{ + "image/jpeg.Reader": {4, 0}, + // FIXME(dh): AllowBinary isn't being detected as deprecated + // because the comment has a newline right after "Deprecated:" + "go/build.AllowBinary": {7, 7}, + "(archive/zip.FileHeader).CompressedSize": {1, 1}, + "(archive/zip.FileHeader).UncompressedSize": {1, 1}, + "(archive/zip.FileHeader).ModifiedTime": {10, 10}, + "(archive/zip.FileHeader).ModifiedDate": {10, 10}, + "(*archive/zip.FileHeader).ModTime": {10, 10}, + "(*archive/zip.FileHeader).SetModTime": {10, 10}, + "(go/doc.Package).Bugs": {1, 1}, + "os.SEEK_SET": {7, 7}, + "os.SEEK_CUR": {7, 7}, + "os.SEEK_END": {7, 7}, + "(net.Dialer).Cancel": {7, 7}, + "runtime.CPUProfile": {9, 0}, + "compress/flate.ReadError": {6, 6}, + "compress/flate.WriteError": {6, 6}, + "path/filepath.HasPrefix": {0, 0}, + "(net/http.Transport).Dial": {7, 7}, + "(*net/http.Transport).CancelRequest": {6, 5}, + "net/http.ErrWriteAfterFlush": {7, 0}, + "net/http.ErrHeaderTooLong": {8, 0}, + "net/http.ErrShortBody": {8, 0}, + "net/http.ErrMissingContentLength": {8, 0}, + "net/http/httputil.ErrPersistEOF": {0, 0}, + "net/http/httputil.ErrClosed": {0, 0}, + "net/http/httputil.ErrPipeline": {0, 0}, + "net/http/httputil.ServerConn": {0, 0}, + "net/http/httputil.NewServerConn": {0, 0}, + "net/http/httputil.ClientConn": {0, 0}, + "net/http/httputil.NewClientConn": {0, 0}, + "net/http/httputil.NewProxyClientConn": {0, 0}, + "(net/http.Request).Cancel": {7, 7}, + "(text/template/parse.PipeNode).Line": {1, 1}, + "(text/template/parse.ActionNode).Line": {1, 1}, + "(text/template/parse.BranchNode).Line": {1, 1}, + "(text/template/parse.TemplateNode).Line": {1, 1}, + "database/sql/driver.ColumnConverter": {9, 9}, + "database/sql/driver.Execer": {8, 8}, + "database/sql/driver.Queryer": {8, 8}, + "(database/sql/driver.Conn).Begin": {8, 8}, + "(database/sql/driver.Stmt).Exec": {8, 8}, + "(database/sql/driver.Stmt).Query": {8, 8}, + "syscall.StringByteSlice": {1, 1}, + "syscall.StringBytePtr": {1, 1}, + "syscall.StringSlicePtr": {1, 1}, + "syscall.StringToUTF16": {1, 1}, + "syscall.StringToUTF16Ptr": {1, 1}, + "(*regexp.Regexp).Copy": {12, 12}, + "(archive/tar.Header).Xattrs": {10, 10}, + "archive/tar.TypeRegA": {11, 1}, + "go/types.NewInterface": {11, 11}, + "(*go/types.Interface).Embedded": {11, 11}, + "go/importer.For": {12, 12}, + "encoding/json.InvalidUTF8Error": {2, 2}, + "encoding/json.UnmarshalFieldError": {2, 2}, + "encoding/csv.ErrTrailingComma": {2, 2}, + "(encoding/csv.Reader).TrailingComma": {2, 2}, + "(net.Dialer).DualStack": {12, 12}, + "net/http.ErrUnexpectedTrailer": {12, 12}, + "net/http.CloseNotifier": {11, 7}, + "net/http.ProtocolError": {8, 8}, + "(crypto/x509.CertificateRequest).Attributes": {5, 3}, + // This function has no alternative, but also no purpose. + "(*crypto/rc4.Cipher).Reset": {12, 0}, + "(net/http/httptest.ResponseRecorder).HeaderMap": {11, 7}, + + // All of these have been deprecated in favour of external libraries + "syscall.AttachLsf": {7, 0}, + "syscall.DetachLsf": {7, 0}, + "syscall.LsfSocket": {7, 0}, + "syscall.SetLsfPromisc": {7, 0}, + "syscall.LsfJump": {7, 0}, + "syscall.LsfStmt": {7, 0}, + "syscall.BpfStmt": {7, 0}, + "syscall.BpfJump": {7, 0}, + "syscall.BpfBuflen": {7, 0}, + "syscall.SetBpfBuflen": {7, 0}, + "syscall.BpfDatalink": {7, 0}, + "syscall.SetBpfDatalink": {7, 0}, + "syscall.SetBpfPromisc": {7, 0}, + "syscall.FlushBpf": {7, 0}, + "syscall.BpfInterface": {7, 0}, + "syscall.SetBpfInterface": {7, 0}, + "syscall.BpfTimeout": {7, 0}, + "syscall.SetBpfTimeout": {7, 0}, + "syscall.BpfStats": {7, 0}, + "syscall.SetBpfImmediate": {7, 0}, + "syscall.SetBpf": {7, 0}, + "syscall.CheckBpfVersion": {7, 0}, + "syscall.BpfHeadercmpl": {7, 0}, + "syscall.SetBpfHeadercmpl": {7, 0}, + "syscall.RouteRIB": {8, 0}, + "syscall.RoutingMessage": {8, 0}, + "syscall.RouteMessage": {8, 0}, + "syscall.InterfaceMessage": {8, 0}, + "syscall.InterfaceAddrMessage": {8, 0}, + "syscall.ParseRoutingMessage": {8, 0}, + "syscall.ParseRoutingSockaddr": {8, 0}, + "InterfaceAnnounceMessage": {7, 0}, + "InterfaceMulticastAddrMessage": {7, 0}, + "syscall.FormatMessage": {5, 0}, +} diff --git a/vendor/honnef.co/go/tools/facts/deprecated.go b/vendor/honnef.co/go/tools/facts/deprecated.go new file mode 100644 index 000000000..8587b0e0e --- /dev/null +++ b/vendor/honnef.co/go/tools/facts/deprecated.go @@ -0,0 +1,144 @@ +package facts + +import ( + "go/ast" + "go/token" + "go/types" + "reflect" + "strings" + + "golang.org/x/tools/go/analysis" +) + +type IsDeprecated struct{ Msg string } + +func (*IsDeprecated) AFact() {} +func (d *IsDeprecated) String() string { return "Deprecated: " + d.Msg } + +type DeprecatedResult struct { + Objects map[types.Object]*IsDeprecated + Packages map[*types.Package]*IsDeprecated +} + +var Deprecated = &analysis.Analyzer{ + Name: "fact_deprecated", + Doc: "Mark deprecated objects", + Run: deprecated, + FactTypes: []analysis.Fact{(*IsDeprecated)(nil)}, + ResultType: reflect.TypeOf(DeprecatedResult{}), +} + +func deprecated(pass *analysis.Pass) (interface{}, error) { + var names []*ast.Ident + + extractDeprecatedMessage := func(docs []*ast.CommentGroup) string { + for _, doc := range docs { + if doc == nil { + continue + } + parts := strings.Split(doc.Text(), "\n\n") + last := parts[len(parts)-1] + if !strings.HasPrefix(last, "Deprecated: ") { + continue + } + alt := last[len("Deprecated: "):] + alt = strings.Replace(alt, "\n", " ", -1) + return alt + } + return "" + } + doDocs := func(names []*ast.Ident, docs []*ast.CommentGroup) { + alt := extractDeprecatedMessage(docs) + if alt == "" { + return + } + + for _, name := range names { + obj := pass.TypesInfo.ObjectOf(name) + pass.ExportObjectFact(obj, &IsDeprecated{alt}) + } + } + + var docs []*ast.CommentGroup + for _, f := range pass.Files { + docs = append(docs, f.Doc) + } + if alt := extractDeprecatedMessage(docs); alt != "" { + // Don't mark package syscall as deprecated, even though + // it is. A lot of people still use it for simple + // constants like SIGKILL, and I am not comfortable + // telling them to use x/sys for that. + if pass.Pkg.Path() != "syscall" { + pass.ExportPackageFact(&IsDeprecated{alt}) + } + } + + docs = docs[:0] + for _, f := range pass.Files { + fn := func(node ast.Node) bool { + if node == nil { + return true + } + var ret bool + switch node := node.(type) { + case *ast.GenDecl: + switch node.Tok { + case token.TYPE, token.CONST, token.VAR: + docs = append(docs, node.Doc) + return true + default: + return false + } + case *ast.FuncDecl: + docs = append(docs, node.Doc) + names = []*ast.Ident{node.Name} + ret = false + case *ast.TypeSpec: + docs = append(docs, node.Doc) + names = []*ast.Ident{node.Name} + ret = true + case *ast.ValueSpec: + docs = append(docs, node.Doc) + names = node.Names + ret = false + case *ast.File: + return true + case *ast.StructType: + for _, field := range node.Fields.List { + doDocs(field.Names, []*ast.CommentGroup{field.Doc}) + } + return false + case *ast.InterfaceType: + for _, field := range node.Methods.List { + doDocs(field.Names, []*ast.CommentGroup{field.Doc}) + } + return false + default: + return false + } + if len(names) == 0 || len(docs) == 0 { + return ret + } + doDocs(names, docs) + + docs = docs[:0] + names = nil + return ret + } + ast.Inspect(f, fn) + } + + out := DeprecatedResult{ + Objects: map[types.Object]*IsDeprecated{}, + Packages: map[*types.Package]*IsDeprecated{}, + } + + for _, fact := range pass.AllObjectFacts() { + out.Objects[fact.Object] = fact.Fact.(*IsDeprecated) + } + for _, fact := range pass.AllPackageFacts() { + out.Packages[fact.Package] = fact.Fact.(*IsDeprecated) + } + + return out, nil +} diff --git a/vendor/honnef.co/go/tools/facts/generated.go b/vendor/honnef.co/go/tools/facts/generated.go new file mode 100644 index 000000000..1ed9563a3 --- /dev/null +++ b/vendor/honnef.co/go/tools/facts/generated.go @@ -0,0 +1,86 @@ +package facts + +import ( + "bufio" + "bytes" + "io" + "os" + "reflect" + "strings" + + "golang.org/x/tools/go/analysis" +) + +type Generator int + +// A list of known generators we can detect +const ( + Unknown Generator = iota + Goyacc + Cgo + Stringer +) + +var ( + // used by cgo before Go 1.11 + oldCgo = []byte("// Created by cgo - DO NOT EDIT") + prefix = []byte("// Code generated ") + suffix = []byte(" DO NOT EDIT.") + nl = []byte("\n") + crnl = []byte("\r\n") +) + +func isGenerated(path string) (Generator, bool) { + f, err := os.Open(path) + if err != nil { + return 0, false + } + defer f.Close() + br := bufio.NewReader(f) + for { + s, err := br.ReadBytes('\n') + if err != nil && err != io.EOF { + return 0, false + } + s = bytes.TrimSuffix(s, crnl) + s = bytes.TrimSuffix(s, nl) + if bytes.HasPrefix(s, prefix) && bytes.HasSuffix(s, suffix) { + text := string(s[len(prefix) : len(s)-len(suffix)]) + switch text { + case "by goyacc.": + return Goyacc, true + case "by cmd/cgo;": + return Cgo, true + } + if strings.HasPrefix(text, `by "stringer `) { + return Stringer, true + } + return Unknown, true + } + if bytes.Equal(s, oldCgo) { + return Cgo, true + } + if err == io.EOF { + break + } + } + return 0, false +} + +var Generated = &analysis.Analyzer{ + Name: "isgenerated", + Doc: "annotate file names that have been code generated", + Run: func(pass *analysis.Pass) (interface{}, error) { + m := map[string]Generator{} + for _, f := range pass.Files { + path := pass.Fset.PositionFor(f.Pos(), false).Filename + g, ok := isGenerated(path) + if ok { + m[path] = g + } + } + return m, nil + }, + RunDespiteErrors: true, + ResultType: reflect.TypeOf(map[string]Generator{}), +} diff --git a/vendor/honnef.co/go/tools/facts/purity.go b/vendor/honnef.co/go/tools/facts/purity.go new file mode 100644 index 000000000..861ca4110 --- /dev/null +++ b/vendor/honnef.co/go/tools/facts/purity.go @@ -0,0 +1,175 @@ +package facts + +import ( + "go/token" + "go/types" + "reflect" + + "golang.org/x/tools/go/analysis" + "honnef.co/go/tools/functions" + "honnef.co/go/tools/internal/passes/buildssa" + "honnef.co/go/tools/ssa" +) + +type IsPure struct{} + +func (*IsPure) AFact() {} +func (d *IsPure) String() string { return "is pure" } + +type PurityResult map[*types.Func]*IsPure + +var Purity = &analysis.Analyzer{ + Name: "fact_purity", + Doc: "Mark pure functions", + Run: purity, + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + FactTypes: []analysis.Fact{(*IsPure)(nil)}, + ResultType: reflect.TypeOf(PurityResult{}), +} + +var pureStdlib = map[string]struct{}{ + "errors.New": {}, + "fmt.Errorf": {}, + "fmt.Sprintf": {}, + "fmt.Sprint": {}, + "sort.Reverse": {}, + "strings.Map": {}, + "strings.Repeat": {}, + "strings.Replace": {}, + "strings.Title": {}, + "strings.ToLower": {}, + "strings.ToLowerSpecial": {}, + "strings.ToTitle": {}, + "strings.ToTitleSpecial": {}, + "strings.ToUpper": {}, + "strings.ToUpperSpecial": {}, + "strings.Trim": {}, + "strings.TrimFunc": {}, + "strings.TrimLeft": {}, + "strings.TrimLeftFunc": {}, + "strings.TrimPrefix": {}, + "strings.TrimRight": {}, + "strings.TrimRightFunc": {}, + "strings.TrimSpace": {}, + "strings.TrimSuffix": {}, + "(*net/http.Request).WithContext": {}, +} + +func purity(pass *analysis.Pass) (interface{}, error) { + seen := map[*ssa.Function]struct{}{} + ssapkg := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).Pkg + var check func(ssafn *ssa.Function) (ret bool) + check = func(ssafn *ssa.Function) (ret bool) { + if ssafn.Object() == nil { + // TODO(dh): support closures + return false + } + if pass.ImportObjectFact(ssafn.Object(), new(IsPure)) { + return true + } + if ssafn.Pkg != ssapkg { + // Function is in another package but wasn't marked as + // pure, ergo it isn't pure + return false + } + // Break recursion + if _, ok := seen[ssafn]; ok { + return false + } + + seen[ssafn] = struct{}{} + defer func() { + if ret { + pass.ExportObjectFact(ssafn.Object(), &IsPure{}) + } + }() + + if functions.IsStub(ssafn) { + return false + } + + if _, ok := pureStdlib[ssafn.Object().(*types.Func).FullName()]; ok { + return true + } + + if ssafn.Signature.Results().Len() == 0 { + // A function with no return values is empty or is doing some + // work we cannot see (for example because of build tags); + // don't consider it pure. + return false + } + + for _, param := range ssafn.Params { + if _, ok := param.Type().Underlying().(*types.Basic); !ok { + return false + } + } + + if ssafn.Blocks == nil { + return false + } + checkCall := func(common *ssa.CallCommon) bool { + if common.IsInvoke() { + return false + } + builtin, ok := common.Value.(*ssa.Builtin) + if !ok { + if common.StaticCallee() != ssafn { + if common.StaticCallee() == nil { + return false + } + if !check(common.StaticCallee()) { + return false + } + } + } else { + switch builtin.Name() { + case "len", "cap", "make", "new": + default: + return false + } + } + return true + } + for _, b := range ssafn.Blocks { + for _, ins := range b.Instrs { + switch ins := ins.(type) { + case *ssa.Call: + if !checkCall(ins.Common()) { + return false + } + case *ssa.Defer: + if !checkCall(&ins.Call) { + return false + } + case *ssa.Select: + return false + case *ssa.Send: + return false + case *ssa.Go: + return false + case *ssa.Panic: + return false + case *ssa.Store: + return false + case *ssa.FieldAddr: + return false + case *ssa.UnOp: + if ins.Op == token.MUL || ins.Op == token.AND { + return false + } + } + } + } + return true + } + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + check(ssafn) + } + + out := PurityResult{} + for _, fact := range pass.AllObjectFacts() { + out[fact.Object.(*types.Func)] = fact.Fact.(*IsPure) + } + return out, nil +} diff --git a/vendor/honnef.co/go/tools/facts/token.go b/vendor/honnef.co/go/tools/facts/token.go new file mode 100644 index 000000000..26e76ff73 --- /dev/null +++ b/vendor/honnef.co/go/tools/facts/token.go @@ -0,0 +1,24 @@ +package facts + +import ( + "go/ast" + "go/token" + "reflect" + + "golang.org/x/tools/go/analysis" +) + +var TokenFile = &analysis.Analyzer{ + Name: "tokenfileanalyzer", + Doc: "creates a mapping of *token.File to *ast.File", + Run: func(pass *analysis.Pass) (interface{}, error) { + m := map[*token.File]*ast.File{} + for _, af := range pass.Files { + tf := pass.Fset.File(af.Pos()) + m[tf] = af + } + return m, nil + }, + RunDespiteErrors: true, + ResultType: reflect.TypeOf(map[*token.File]*ast.File{}), +} diff --git a/vendor/honnef.co/go/tools/functions/loops.go b/vendor/honnef.co/go/tools/functions/loops.go new file mode 100644 index 000000000..15877a2f9 --- /dev/null +++ b/vendor/honnef.co/go/tools/functions/loops.go @@ -0,0 +1,54 @@ +package functions + +import "honnef.co/go/tools/ssa" + +type Loop struct{ ssa.BlockSet } + +func FindLoops(fn *ssa.Function) []Loop { + if fn.Blocks == nil { + return nil + } + tree := fn.DomPreorder() + var sets []Loop + for _, h := range tree { + for _, n := range h.Preds { + if !h.Dominates(n) { + continue + } + // n is a back-edge to h + // h is the loop header + if n == h { + set := Loop{} + set.Add(n) + sets = append(sets, set) + continue + } + set := Loop{} + set.Add(h) + set.Add(n) + for _, b := range allPredsBut(n, h, nil) { + set.Add(b) + } + sets = append(sets, set) + } + } + return sets +} + +func allPredsBut(b, but *ssa.BasicBlock, list []*ssa.BasicBlock) []*ssa.BasicBlock { +outer: + for _, pred := range b.Preds { + if pred == but { + continue + } + for _, p := range list { + // TODO improve big-o complexity of this function + if pred == p { + continue outer + } + } + list = append(list, pred) + list = allPredsBut(pred, but, list) + } + return list +} diff --git a/vendor/honnef.co/go/tools/functions/pure.go b/vendor/honnef.co/go/tools/functions/pure.go new file mode 100644 index 000000000..8bc558771 --- /dev/null +++ b/vendor/honnef.co/go/tools/functions/pure.go @@ -0,0 +1,46 @@ +package functions + +import ( + "honnef.co/go/tools/ssa" +) + +func filterDebug(instr []ssa.Instruction) []ssa.Instruction { + var out []ssa.Instruction + for _, ins := range instr { + if _, ok := ins.(*ssa.DebugRef); !ok { + out = append(out, ins) + } + } + return out +} + +// IsStub reports whether a function is a stub. A function is +// considered a stub if it has no instructions or exactly one +// instruction, which must be either returning only constant values or +// a panic. +func IsStub(fn *ssa.Function) bool { + if len(fn.Blocks) == 0 { + return true + } + if len(fn.Blocks) > 1 { + return false + } + instrs := filterDebug(fn.Blocks[0].Instrs) + if len(instrs) != 1 { + return false + } + + switch instrs[0].(type) { + case *ssa.Return: + // Since this is the only instruction, the return value must + // be a constant. We consider all constants as stubs, not just + // the zero value. This does not, unfortunately, cover zero + // initialised structs, as these cause additional + // instructions. + return true + case *ssa.Panic: + return true + default: + return false + } +} diff --git a/vendor/honnef.co/go/tools/functions/terminates.go b/vendor/honnef.co/go/tools/functions/terminates.go new file mode 100644 index 000000000..3e9c3a23f --- /dev/null +++ b/vendor/honnef.co/go/tools/functions/terminates.go @@ -0,0 +1,24 @@ +package functions + +import "honnef.co/go/tools/ssa" + +// Terminates reports whether fn is supposed to return, that is if it +// has at least one theoretic path that returns from the function. +// Explicit panics do not count as terminating. +func Terminates(fn *ssa.Function) bool { + if fn.Blocks == nil { + // assuming that a function terminates is the conservative + // choice + return true + } + + for _, block := range fn.Blocks { + if len(block.Instrs) == 0 { + continue + } + if _, ok := block.Instrs[len(block.Instrs)-1].(*ssa.Return); ok { + return true + } + } + return false +} diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/callee.go b/vendor/honnef.co/go/tools/go/types/typeutil/callee.go new file mode 100644 index 000000000..38f596daf --- /dev/null +++ b/vendor/honnef.co/go/tools/go/types/typeutil/callee.go @@ -0,0 +1,46 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/ast/astutil" +) + +// Callee returns the named target of a function call, if any: +// a function, method, builtin, or variable. +func Callee(info *types.Info, call *ast.CallExpr) types.Object { + var obj types.Object + switch fun := astutil.Unparen(call.Fun).(type) { + case *ast.Ident: + obj = info.Uses[fun] // type, var, builtin, or declared func + case *ast.SelectorExpr: + if sel, ok := info.Selections[fun]; ok { + obj = sel.Obj() // method or field + } else { + obj = info.Uses[fun.Sel] // qualified identifier? + } + } + if _, ok := obj.(*types.TypeName); ok { + return nil // T(x) is a conversion, not a call + } + return obj +} + +// StaticCallee returns the target (function or method) of a static +// function call, if any. It returns nil for calls to builtins. +func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { + if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) { + return f + } + return nil +} + +func interfaceMethod(f *types.Func) bool { + recv := f.Type().(*types.Signature).Recv() + return recv != nil && types.IsInterface(recv.Type()) +} diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/identical.go b/vendor/honnef.co/go/tools/go/types/typeutil/identical.go new file mode 100644 index 000000000..c0ca441c3 --- /dev/null +++ b/vendor/honnef.co/go/tools/go/types/typeutil/identical.go @@ -0,0 +1,75 @@ +package typeutil + +import ( + "go/types" +) + +// Identical reports whether x and y are identical types. +// Unlike types.Identical, receivers of Signature types are not ignored. +// Unlike types.Identical, interfaces are compared via pointer equality (except for the empty interface, which gets deduplicated). +// Unlike types.Identical, structs are compared via pointer equality. +func Identical(x, y types.Type) (ret bool) { + if !types.Identical(x, y) { + return false + } + + switch x := x.(type) { + case *types.Struct: + y, ok := y.(*types.Struct) + if !ok { + // should be impossible + return true + } + return x == y + case *types.Interface: + // The issue with interfaces, typeutil.Map and types.Identical + // + // types.Identical, when comparing two interfaces, only looks at the set + // of all methods, not differentiating between implicit (embedded) and + // explicit methods. + // + // When we see the following two types, in source order + // + // type I1 interface { foo() } + // type I2 interface { I1 } + // + // then we will first correctly process I1 and its underlying type. When + // we get to I2, we will see that its underlying type is identical to + // that of I1 and not process it again. This, however, means that we will + // not record the fact that I2 embeds I1. If only I2 is reachable via the + // graph root, then I1 will not be considered used. + // + // We choose to be lazy and compare interfaces by their + // pointers. This will obviously miss identical interfaces, + // but this only has a runtime cost, it doesn't affect + // correctness. + y, ok := y.(*types.Interface) + if !ok { + // should be impossible + return true + } + if x.NumEmbeddeds() == 0 && + y.NumEmbeddeds() == 0 && + x.NumMethods() == 0 && + y.NumMethods() == 0 { + // all truly empty interfaces are the same + return true + } + return x == y + case *types.Signature: + y, ok := y.(*types.Signature) + if !ok { + // should be impossible + return true + } + if x.Recv() == y.Recv() { + return true + } + if x.Recv() == nil || y.Recv() == nil { + return false + } + return Identical(x.Recv().Type(), y.Recv().Type()) + default: + return true + } +} diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/imports.go b/vendor/honnef.co/go/tools/go/types/typeutil/imports.go new file mode 100644 index 000000000..9c441dba9 --- /dev/null +++ b/vendor/honnef.co/go/tools/go/types/typeutil/imports.go @@ -0,0 +1,31 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import "go/types" + +// Dependencies returns all dependencies of the specified packages. +// +// Dependent packages appear in topological order: if package P imports +// package Q, Q appears earlier than P in the result. +// The algorithm follows import statements in the order they +// appear in the source code, so the result is a total order. +// +func Dependencies(pkgs ...*types.Package) []*types.Package { + var result []*types.Package + seen := make(map[*types.Package]bool) + var visit func(pkgs []*types.Package) + visit = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !seen[p] { + seen[p] = true + visit(p.Imports()) + result = append(result, p) + } + } + } + visit(pkgs) + return result +} diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/map.go b/vendor/honnef.co/go/tools/go/types/typeutil/map.go new file mode 100644 index 000000000..f929353cc --- /dev/null +++ b/vendor/honnef.co/go/tools/go/types/typeutil/map.go @@ -0,0 +1,319 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeutil defines various utilities for types, such as Map, +// a mapping from types.Type to interface{} values. +package typeutil + +import ( + "bytes" + "fmt" + "go/types" + "reflect" +) + +// Map is a hash-table-based mapping from types (types.Type) to +// arbitrary interface{} values. The concrete types that implement +// the Type interface are pointers. Since they are not canonicalized, +// == cannot be used to check for equivalence, and thus we cannot +// simply use a Go map. +// +// Just as with map[K]V, a nil *Map is a valid empty map. +// +// Not thread-safe. +// +// This fork handles Signatures correctly, respecting method +// receivers. Furthermore, it doesn't deduplicate interfaces or +// structs. Interfaces aren't deduplicated as not to conflate implicit +// and explicit methods. Structs aren't deduplicated because we track +// fields of each type separately. +// +type Map struct { + hasher Hasher // shared by many Maps + table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused + length int // number of map entries +} + +// entry is an entry (key/value association) in a hash bucket. +type entry struct { + key types.Type + value interface{} +} + +// SetHasher sets the hasher used by Map. +// +// All Hashers are functionally equivalent but contain internal state +// used to cache the results of hashing previously seen types. +// +// A single Hasher created by MakeHasher() may be shared among many +// Maps. This is recommended if the instances have many keys in +// common, as it will amortize the cost of hash computation. +// +// A Hasher may grow without bound as new types are seen. Even when a +// type is deleted from the map, the Hasher never shrinks, since other +// types in the map may reference the deleted type indirectly. +// +// Hashers are not thread-safe, and read-only operations such as +// Map.Lookup require updates to the hasher, so a full Mutex lock (not a +// read-lock) is require around all Map operations if a shared +// hasher is accessed from multiple threads. +// +// If SetHasher is not called, the Map will create a private hasher at +// the first call to Insert. +// +func (m *Map) SetHasher(hasher Hasher) { + m.hasher = hasher +} + +// Delete removes the entry with the given key, if any. +// It returns true if the entry was found. +// +func (m *Map) Delete(key types.Type) bool { + if m != nil && m.table != nil { + hash := m.hasher.Hash(key) + bucket := m.table[hash] + for i, e := range bucket { + if e.key != nil && Identical(key, e.key) { + // We can't compact the bucket as it + // would disturb iterators. + bucket[i] = entry{} + m.length-- + return true + } + } + } + return false +} + +// At returns the map entry for the given key. +// The result is nil if the entry is not present. +// +func (m *Map) At(key types.Type) interface{} { + if m != nil && m.table != nil { + for _, e := range m.table[m.hasher.Hash(key)] { + if e.key != nil && Identical(key, e.key) { + return e.value + } + } + } + return nil +} + +// Set sets the map entry for key to val, +// and returns the previous entry, if any. +func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) { + if m.table != nil { + hash := m.hasher.Hash(key) + bucket := m.table[hash] + var hole *entry + for i, e := range bucket { + if e.key == nil { + hole = &bucket[i] + } else if Identical(key, e.key) { + prev = e.value + bucket[i].value = value + return + } + } + + if hole != nil { + *hole = entry{key, value} // overwrite deleted entry + } else { + m.table[hash] = append(bucket, entry{key, value}) + } + } else { + if m.hasher.memo == nil { + m.hasher = MakeHasher() + } + hash := m.hasher.Hash(key) + m.table = map[uint32][]entry{hash: {entry{key, value}}} + } + + m.length++ + return +} + +// Len returns the number of map entries. +func (m *Map) Len() int { + if m != nil { + return m.length + } + return 0 +} + +// Iterate calls function f on each entry in the map in unspecified order. +// +// If f should mutate the map, Iterate provides the same guarantees as +// Go maps: if f deletes a map entry that Iterate has not yet reached, +// f will not be invoked for it, but if f inserts a map entry that +// Iterate has not yet reached, whether or not f will be invoked for +// it is unspecified. +// +func (m *Map) Iterate(f func(key types.Type, value interface{})) { + if m != nil { + for _, bucket := range m.table { + for _, e := range bucket { + if e.key != nil { + f(e.key, e.value) + } + } + } + } +} + +// Keys returns a new slice containing the set of map keys. +// The order is unspecified. +func (m *Map) Keys() []types.Type { + keys := make([]types.Type, 0, m.Len()) + m.Iterate(func(key types.Type, _ interface{}) { + keys = append(keys, key) + }) + return keys +} + +func (m *Map) toString(values bool) string { + if m == nil { + return "{}" + } + var buf bytes.Buffer + fmt.Fprint(&buf, "{") + sep := "" + m.Iterate(func(key types.Type, value interface{}) { + fmt.Fprint(&buf, sep) + sep = ", " + fmt.Fprint(&buf, key) + if values { + fmt.Fprintf(&buf, ": %q", value) + } + }) + fmt.Fprint(&buf, "}") + return buf.String() +} + +// String returns a string representation of the map's entries. +// Values are printed using fmt.Sprintf("%v", v). +// Order is unspecified. +// +func (m *Map) String() string { + return m.toString(true) +} + +// KeysString returns a string representation of the map's key set. +// Order is unspecified. +// +func (m *Map) KeysString() string { + return m.toString(false) +} + +//////////////////////////////////////////////////////////////////////// +// Hasher + +// A Hasher maps each type to its hash value. +// For efficiency, a hasher uses memoization; thus its memory +// footprint grows monotonically over time. +// Hashers are not thread-safe. +// Hashers have reference semantics. +// Call MakeHasher to create a Hasher. +type Hasher struct { + memo map[types.Type]uint32 +} + +// MakeHasher returns a new Hasher instance. +func MakeHasher() Hasher { + return Hasher{make(map[types.Type]uint32)} +} + +// Hash computes a hash value for the given type t such that +// Identical(t, t') => Hash(t) == Hash(t'). +func (h Hasher) Hash(t types.Type) uint32 { + hash, ok := h.memo[t] + if !ok { + hash = h.hashFor(t) + h.memo[t] = hash + } + return hash +} + +// hashString computes the Fowler–Noll–Vo hash of s. +func hashString(s string) uint32 { + var h uint32 + for i := 0; i < len(s); i++ { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +// hashFor computes the hash of t. +func (h Hasher) hashFor(t types.Type) uint32 { + // See Identical for rationale. + switch t := t.(type) { + case *types.Basic: + return uint32(t.Kind()) + + case *types.Array: + return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) + + case *types.Slice: + return 9049 + 2*h.Hash(t.Elem()) + + case *types.Struct: + var hash uint32 = 9059 + for i, n := 0, t.NumFields(); i < n; i++ { + f := t.Field(i) + if f.Anonymous() { + hash += 8861 + } + hash += hashString(t.Tag(i)) + hash += hashString(f.Name()) // (ignore f.Pkg) + hash += h.Hash(f.Type()) + } + return hash + + case *types.Pointer: + return 9067 + 2*h.Hash(t.Elem()) + + case *types.Signature: + var hash uint32 = 9091 + if t.Variadic() { + hash *= 8863 + } + return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) + + case *types.Interface: + var hash uint32 = 9103 + for i, n := 0, t.NumMethods(); i < n; i++ { + // See go/types.identicalMethods for rationale. + // Method order is not significant. + // Ignore m.Pkg(). + m := t.Method(i) + hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type()) + } + return hash + + case *types.Map: + return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem()) + + case *types.Chan: + return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem()) + + case *types.Named: + // Not safe with a copying GC; objects may move. + return uint32(reflect.ValueOf(t.Obj()).Pointer()) + + case *types.Tuple: + return h.hashTuple(t) + } + panic(t) +} + +func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { + // See go/types.identicalTypes for rationale. + n := tuple.Len() + var hash uint32 = 9137 + 2*uint32(n) + for i := 0; i < n; i++ { + hash += 3 * h.Hash(tuple.At(i).Type()) + } + return hash +} diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/methodsetcache.go b/vendor/honnef.co/go/tools/go/types/typeutil/methodsetcache.go new file mode 100644 index 000000000..32084610f --- /dev/null +++ b/vendor/honnef.co/go/tools/go/types/typeutil/methodsetcache.go @@ -0,0 +1,72 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements a cache of method sets. + +package typeutil + +import ( + "go/types" + "sync" +) + +// A MethodSetCache records the method set of each type T for which +// MethodSet(T) is called so that repeat queries are fast. +// The zero value is a ready-to-use cache instance. +type MethodSetCache struct { + mu sync.Mutex + named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N + others map[types.Type]*types.MethodSet // all other types +} + +// MethodSet returns the method set of type T. It is thread-safe. +// +// If cache is nil, this function is equivalent to types.NewMethodSet(T). +// Utility functions can thus expose an optional *MethodSetCache +// parameter to clients that care about performance. +// +func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { + if cache == nil { + return types.NewMethodSet(T) + } + cache.mu.Lock() + defer cache.mu.Unlock() + + switch T := T.(type) { + case *types.Named: + return cache.lookupNamed(T).value + + case *types.Pointer: + if N, ok := T.Elem().(*types.Named); ok { + return cache.lookupNamed(N).pointer + } + } + + // all other types + // (The map uses pointer equivalence, not type identity.) + mset := cache.others[T] + if mset == nil { + mset = types.NewMethodSet(T) + if cache.others == nil { + cache.others = make(map[types.Type]*types.MethodSet) + } + cache.others[T] = mset + } + return mset +} + +func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { + if cache.named == nil { + cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) + } + // Avoid recomputing mset(*T) for each distinct Pointer + // instance whose underlying type is a named type. + msets, ok := cache.named[named] + if !ok { + msets.value = types.NewMethodSet(named) + msets.pointer = types.NewMethodSet(types.NewPointer(named)) + cache.named[named] = msets + } + return msets +} diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/ui.go b/vendor/honnef.co/go/tools/go/types/typeutil/ui.go new file mode 100644 index 000000000..9849c24ce --- /dev/null +++ b/vendor/honnef.co/go/tools/go/types/typeutil/ui.go @@ -0,0 +1,52 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +// This file defines utilities for user interfaces that display types. + +import "go/types" + +// IntuitiveMethodSet returns the intuitive method set of a type T, +// which is the set of methods you can call on an addressable value of +// that type. +// +// The result always contains MethodSet(T), and is exactly MethodSet(T) +// for interface types and for pointer-to-concrete types. +// For all other concrete types T, the result additionally +// contains each method belonging to *T if there is no identically +// named method on T itself. +// +// This corresponds to user intuition about method sets; +// this function is intended only for user interfaces. +// +// The order of the result is as for types.MethodSet(T). +// +func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { + isPointerToConcrete := func(T types.Type) bool { + ptr, ok := T.(*types.Pointer) + return ok && !types.IsInterface(ptr.Elem()) + } + + var result []*types.Selection + mset := msets.MethodSet(T) + if types.IsInterface(T) || isPointerToConcrete(T) { + for i, n := 0, mset.Len(); i < n; i++ { + result = append(result, mset.At(i)) + } + } else { + // T is some other concrete type. + // Report methods of T and *T, preferring those of T. + pmset := msets.MethodSet(types.NewPointer(T)) + for i, n := 0, pmset.Len(); i < n; i++ { + meth := pmset.At(i) + if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { + meth = m + } + result = append(result, meth) + } + + } + return result +} diff --git a/vendor/honnef.co/go/tools/internal/cache/cache.go b/vendor/honnef.co/go/tools/internal/cache/cache.go new file mode 100644 index 000000000..2b33ca106 --- /dev/null +++ b/vendor/honnef.co/go/tools/internal/cache/cache.go @@ -0,0 +1,474 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cache implements a build artifact cache. +// +// This package is a slightly modified fork of Go's +// cmd/go/internal/cache package. +package cache + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "honnef.co/go/tools/internal/renameio" +) + +// An ActionID is a cache action key, the hash of a complete description of a +// repeatable computation (command line, environment variables, +// input file contents, executable contents). +type ActionID [HashSize]byte + +// An OutputID is a cache output key, the hash of an output of a computation. +type OutputID [HashSize]byte + +// A Cache is a package cache, backed by a file system directory tree. +type Cache struct { + dir string + now func() time.Time +} + +// Open opens and returns the cache in the given directory. +// +// It is safe for multiple processes on a single machine to use the +// same cache directory in a local file system simultaneously. +// They will coordinate using operating system file locks and may +// duplicate effort but will not corrupt the cache. +// +// However, it is NOT safe for multiple processes on different machines +// to share a cache directory (for example, if the directory were stored +// in a network file system). File locking is notoriously unreliable in +// network file systems and may not suffice to protect the cache. +// +func Open(dir string) (*Cache, error) { + info, err := os.Stat(dir) + if err != nil { + return nil, err + } + if !info.IsDir() { + return nil, &os.PathError{Op: "open", Path: dir, Err: fmt.Errorf("not a directory")} + } + for i := 0; i < 256; i++ { + name := filepath.Join(dir, fmt.Sprintf("%02x", i)) + if err := os.MkdirAll(name, 0777); err != nil { + return nil, err + } + } + c := &Cache{ + dir: dir, + now: time.Now, + } + return c, nil +} + +// fileName returns the name of the file corresponding to the given id. +func (c *Cache) fileName(id [HashSize]byte, key string) string { + return filepath.Join(c.dir, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+"-"+key) +} + +var errMissing = errors.New("cache entry not found") + +const ( + // action entry file is "v1 \n" + hexSize = HashSize * 2 + entrySize = 2 + 1 + hexSize + 1 + hexSize + 1 + 20 + 1 + 20 + 1 +) + +// verify controls whether to run the cache in verify mode. +// In verify mode, the cache always returns errMissing from Get +// but then double-checks in Put that the data being written +// exactly matches any existing entry. This provides an easy +// way to detect program behavior that would have been different +// had the cache entry been returned from Get. +// +// verify is enabled by setting the environment variable +// GODEBUG=gocacheverify=1. +var verify = false + +// DebugTest is set when GODEBUG=gocachetest=1 is in the environment. +var DebugTest = false + +func init() { initEnv() } + +func initEnv() { + verify = false + debugHash = false + debug := strings.Split(os.Getenv("GODEBUG"), ",") + for _, f := range debug { + if f == "gocacheverify=1" { + verify = true + } + if f == "gocachehash=1" { + debugHash = true + } + if f == "gocachetest=1" { + DebugTest = true + } + } +} + +// Get looks up the action ID in the cache, +// returning the corresponding output ID and file size, if any. +// Note that finding an output ID does not guarantee that the +// saved file for that output ID is still available. +func (c *Cache) Get(id ActionID) (Entry, error) { + if verify { + return Entry{}, errMissing + } + return c.get(id) +} + +type Entry struct { + OutputID OutputID + Size int64 + Time time.Time +} + +// get is Get but does not respect verify mode, so that Put can use it. +func (c *Cache) get(id ActionID) (Entry, error) { + missing := func() (Entry, error) { + return Entry{}, errMissing + } + f, err := os.Open(c.fileName(id, "a")) + if err != nil { + return missing() + } + defer f.Close() + entry := make([]byte, entrySize+1) // +1 to detect whether f is too long + if n, err := io.ReadFull(f, entry); n != entrySize || err != io.ErrUnexpectedEOF { + return missing() + } + if entry[0] != 'v' || entry[1] != '1' || entry[2] != ' ' || entry[3+hexSize] != ' ' || entry[3+hexSize+1+hexSize] != ' ' || entry[3+hexSize+1+hexSize+1+20] != ' ' || entry[entrySize-1] != '\n' { + return missing() + } + eid, entry := entry[3:3+hexSize], entry[3+hexSize:] + eout, entry := entry[1:1+hexSize], entry[1+hexSize:] + esize, entry := entry[1:1+20], entry[1+20:] + //lint:ignore SA4006 See https://github.com/dominikh/go-tools/issues/465 + etime, entry := entry[1:1+20], entry[1+20:] + var buf [HashSize]byte + if _, err := hex.Decode(buf[:], eid); err != nil || buf != id { + return missing() + } + if _, err := hex.Decode(buf[:], eout); err != nil { + return missing() + } + i := 0 + for i < len(esize) && esize[i] == ' ' { + i++ + } + size, err := strconv.ParseInt(string(esize[i:]), 10, 64) + if err != nil || size < 0 { + return missing() + } + i = 0 + for i < len(etime) && etime[i] == ' ' { + i++ + } + tm, err := strconv.ParseInt(string(etime[i:]), 10, 64) + if err != nil || size < 0 { + return missing() + } + + c.used(c.fileName(id, "a")) + + return Entry{buf, size, time.Unix(0, tm)}, nil +} + +// GetFile looks up the action ID in the cache and returns +// the name of the corresponding data file. +func (c *Cache) GetFile(id ActionID) (file string, entry Entry, err error) { + entry, err = c.Get(id) + if err != nil { + return "", Entry{}, err + } + file = c.OutputFile(entry.OutputID) + info, err := os.Stat(file) + if err != nil || info.Size() != entry.Size { + return "", Entry{}, errMissing + } + return file, entry, nil +} + +// GetBytes looks up the action ID in the cache and returns +// the corresponding output bytes. +// GetBytes should only be used for data that can be expected to fit in memory. +func (c *Cache) GetBytes(id ActionID) ([]byte, Entry, error) { + entry, err := c.Get(id) + if err != nil { + return nil, entry, err + } + data, _ := ioutil.ReadFile(c.OutputFile(entry.OutputID)) + if sha256.Sum256(data) != entry.OutputID { + return nil, entry, errMissing + } + return data, entry, nil +} + +// OutputFile returns the name of the cache file storing output with the given OutputID. +func (c *Cache) OutputFile(out OutputID) string { + file := c.fileName(out, "d") + c.used(file) + return file +} + +// Time constants for cache expiration. +// +// We set the mtime on a cache file on each use, but at most one per mtimeInterval (1 hour), +// to avoid causing many unnecessary inode updates. The mtimes therefore +// roughly reflect "time of last use" but may in fact be older by at most an hour. +// +// We scan the cache for entries to delete at most once per trimInterval (1 day). +// +// When we do scan the cache, we delete entries that have not been used for +// at least trimLimit (5 days). Statistics gathered from a month of usage by +// Go developers found that essentially all reuse of cached entries happened +// within 5 days of the previous reuse. See golang.org/issue/22990. +const ( + mtimeInterval = 1 * time.Hour + trimInterval = 24 * time.Hour + trimLimit = 5 * 24 * time.Hour +) + +// used makes a best-effort attempt to update mtime on file, +// so that mtime reflects cache access time. +// +// Because the reflection only needs to be approximate, +// and to reduce the amount of disk activity caused by using +// cache entries, used only updates the mtime if the current +// mtime is more than an hour old. This heuristic eliminates +// nearly all of the mtime updates that would otherwise happen, +// while still keeping the mtimes useful for cache trimming. +func (c *Cache) used(file string) { + info, err := os.Stat(file) + if err == nil && c.now().Sub(info.ModTime()) < mtimeInterval { + return + } + os.Chtimes(file, c.now(), c.now()) +} + +// Trim removes old cache entries that are likely not to be reused. +func (c *Cache) Trim() { + now := c.now() + + // We maintain in dir/trim.txt the time of the last completed cache trim. + // If the cache has been trimmed recently enough, do nothing. + // This is the common case. + data, _ := ioutil.ReadFile(filepath.Join(c.dir, "trim.txt")) + t, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) + if err == nil && now.Sub(time.Unix(t, 0)) < trimInterval { + return + } + + // Trim each of the 256 subdirectories. + // We subtract an additional mtimeInterval + // to account for the imprecision of our "last used" mtimes. + cutoff := now.Add(-trimLimit - mtimeInterval) + for i := 0; i < 256; i++ { + subdir := filepath.Join(c.dir, fmt.Sprintf("%02x", i)) + c.trimSubdir(subdir, cutoff) + } + + // Ignore errors from here: if we don't write the complete timestamp, the + // cache will appear older than it is, and we'll trim it again next time. + renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix()))) +} + +// trimSubdir trims a single cache subdirectory. +func (c *Cache) trimSubdir(subdir string, cutoff time.Time) { + // Read all directory entries from subdir before removing + // any files, in case removing files invalidates the file offset + // in the directory scan. Also, ignore error from f.Readdirnames, + // because we don't care about reporting the error and we still + // want to process any entries found before the error. + f, err := os.Open(subdir) + if err != nil { + return + } + names, _ := f.Readdirnames(-1) + f.Close() + + for _, name := range names { + // Remove only cache entries (xxxx-a and xxxx-d). + if !strings.HasSuffix(name, "-a") && !strings.HasSuffix(name, "-d") { + continue + } + entry := filepath.Join(subdir, name) + info, err := os.Stat(entry) + if err == nil && info.ModTime().Before(cutoff) { + os.Remove(entry) + } + } +} + +// putIndexEntry adds an entry to the cache recording that executing the action +// with the given id produces an output with the given output id (hash) and size. +func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify bool) error { + // Note: We expect that for one reason or another it may happen + // that repeating an action produces a different output hash + // (for example, if the output contains a time stamp or temp dir name). + // While not ideal, this is also not a correctness problem, so we + // don't make a big deal about it. In particular, we leave the action + // cache entries writable specifically so that they can be overwritten. + // + // Setting GODEBUG=gocacheverify=1 does make a big deal: + // in verify mode we are double-checking that the cache entries + // are entirely reproducible. As just noted, this may be unrealistic + // in some cases but the check is also useful for shaking out real bugs. + entry := []byte(fmt.Sprintf("v1 %x %x %20d %20d\n", id, out, size, time.Now().UnixNano())) + if verify && allowVerify { + old, err := c.get(id) + if err == nil && (old.OutputID != out || old.Size != size) { + // panic to show stack trace, so we can see what code is generating this cache entry. + msg := fmt.Sprintf("go: internal cache error: cache verify failed: id=%x changed:<<<\n%s\n>>>\nold: %x %d\nnew: %x %d", id, reverseHash(id), out, size, old.OutputID, old.Size) + panic(msg) + } + } + file := c.fileName(id, "a") + if err := ioutil.WriteFile(file, entry, 0666); err != nil { + // TODO(bcmills): This Remove potentially races with another go command writing to file. + // Can we eliminate it? + os.Remove(file) + return err + } + os.Chtimes(file, c.now(), c.now()) // mainly for tests + + return nil +} + +// Put stores the given output in the cache as the output for the action ID. +// It may read file twice. The content of file must not change between the two passes. +func (c *Cache) Put(id ActionID, file io.ReadSeeker) (OutputID, int64, error) { + return c.put(id, file, true) +} + +// PutNoVerify is like Put but disables the verify check +// when GODEBUG=goverifycache=1 is set. +// It is meant for data that is OK to cache but that we expect to vary slightly from run to run, +// like test output containing times and the like. +func (c *Cache) PutNoVerify(id ActionID, file io.ReadSeeker) (OutputID, int64, error) { + return c.put(id, file, false) +} + +func (c *Cache) put(id ActionID, file io.ReadSeeker, allowVerify bool) (OutputID, int64, error) { + // Compute output ID. + h := sha256.New() + if _, err := file.Seek(0, 0); err != nil { + return OutputID{}, 0, err + } + size, err := io.Copy(h, file) + if err != nil { + return OutputID{}, 0, err + } + var out OutputID + h.Sum(out[:0]) + + // Copy to cached output file (if not already present). + if err := c.copyFile(file, out, size); err != nil { + return out, size, err + } + + // Add to cache index. + return out, size, c.putIndexEntry(id, out, size, allowVerify) +} + +// PutBytes stores the given bytes in the cache as the output for the action ID. +func (c *Cache) PutBytes(id ActionID, data []byte) error { + _, _, err := c.Put(id, bytes.NewReader(data)) + return err +} + +// copyFile copies file into the cache, expecting it to have the given +// output ID and size, if that file is not present already. +func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error { + name := c.fileName(out, "d") + info, err := os.Stat(name) + if err == nil && info.Size() == size { + // Check hash. + if f, err := os.Open(name); err == nil { + h := sha256.New() + io.Copy(h, f) + f.Close() + var out2 OutputID + h.Sum(out2[:0]) + if out == out2 { + return nil + } + } + // Hash did not match. Fall through and rewrite file. + } + + // Copy file to cache directory. + mode := os.O_RDWR | os.O_CREATE + if err == nil && info.Size() > size { // shouldn't happen but fix in case + mode |= os.O_TRUNC + } + f, err := os.OpenFile(name, mode, 0666) + if err != nil { + return err + } + defer f.Close() + if size == 0 { + // File now exists with correct size. + // Only one possible zero-length file, so contents are OK too. + // Early return here makes sure there's a "last byte" for code below. + return nil + } + + // From here on, if any of the I/O writing the file fails, + // we make a best-effort attempt to truncate the file f + // before returning, to avoid leaving bad bytes in the file. + + // Copy file to f, but also into h to double-check hash. + if _, err := file.Seek(0, 0); err != nil { + f.Truncate(0) + return err + } + h := sha256.New() + w := io.MultiWriter(f, h) + if _, err := io.CopyN(w, file, size-1); err != nil { + f.Truncate(0) + return err + } + // Check last byte before writing it; writing it will make the size match + // what other processes expect to find and might cause them to start + // using the file. + buf := make([]byte, 1) + if _, err := file.Read(buf); err != nil { + f.Truncate(0) + return err + } + h.Write(buf) + sum := h.Sum(nil) + if !bytes.Equal(sum, out[:]) { + f.Truncate(0) + return fmt.Errorf("file content changed underfoot") + } + + // Commit cache file entry. + if _, err := f.Write(buf); err != nil { + f.Truncate(0) + return err + } + if err := f.Close(); err != nil { + // Data might not have been written, + // but file may look like it is the right size. + // To be extra careful, remove cached file. + os.Remove(name) + return err + } + os.Chtimes(name, c.now(), c.now()) // mainly for tests + + return nil +} diff --git a/vendor/honnef.co/go/tools/internal/cache/default.go b/vendor/honnef.co/go/tools/internal/cache/default.go new file mode 100644 index 000000000..3034f76a5 --- /dev/null +++ b/vendor/honnef.co/go/tools/internal/cache/default.go @@ -0,0 +1,85 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "sync" +) + +// Default returns the default cache to use. +func Default() (*Cache, error) { + defaultOnce.Do(initDefaultCache) + return defaultCache, defaultDirErr +} + +var ( + defaultOnce sync.Once + defaultCache *Cache +) + +// cacheREADME is a message stored in a README in the cache directory. +// Because the cache lives outside the normal Go trees, we leave the +// README as a courtesy to explain where it came from. +const cacheREADME = `This directory holds cached build artifacts from staticcheck. +` + +// initDefaultCache does the work of finding the default cache +// the first time Default is called. +func initDefaultCache() { + dir := DefaultDir() + if err := os.MkdirAll(dir, 0777); err != nil { + log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err) + } + if _, err := os.Stat(filepath.Join(dir, "README")); err != nil { + // Best effort. + ioutil.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666) + } + + c, err := Open(dir) + if err != nil { + log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err) + } + defaultCache = c +} + +var ( + defaultDirOnce sync.Once + defaultDir string + defaultDirErr error +) + +// DefaultDir returns the effective STATICCHECK_CACHE setting. +func DefaultDir() string { + // Save the result of the first call to DefaultDir for later use in + // initDefaultCache. cmd/go/main.go explicitly sets GOCACHE so that + // subprocesses will inherit it, but that means initDefaultCache can't + // otherwise distinguish between an explicit "off" and a UserCacheDir error. + + defaultDirOnce.Do(func() { + defaultDir = os.Getenv("STATICCHECK_CACHE") + if filepath.IsAbs(defaultDir) { + return + } + if defaultDir != "" { + defaultDirErr = fmt.Errorf("STATICCHECK_CACHE is not an absolute path") + return + } + + // Compute default location. + dir, err := os.UserCacheDir() + if err != nil { + defaultDirErr = fmt.Errorf("STATICCHECK_CACHE is not defined and %v", err) + return + } + defaultDir = filepath.Join(dir, "staticcheck") + }) + + return defaultDir +} diff --git a/vendor/honnef.co/go/tools/internal/cache/hash.go b/vendor/honnef.co/go/tools/internal/cache/hash.go new file mode 100644 index 000000000..a53543ec5 --- /dev/null +++ b/vendor/honnef.co/go/tools/internal/cache/hash.go @@ -0,0 +1,176 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "bytes" + "crypto/sha256" + "fmt" + "hash" + "io" + "os" + "sync" +) + +var debugHash = false // set when GODEBUG=gocachehash=1 + +// HashSize is the number of bytes in a hash. +const HashSize = 32 + +// A Hash provides access to the canonical hash function used to index the cache. +// The current implementation uses salted SHA256, but clients must not assume this. +type Hash struct { + h hash.Hash + name string // for debugging + buf *bytes.Buffer // for verify +} + +// hashSalt is a salt string added to the beginning of every hash +// created by NewHash. Using the Staticcheck version makes sure that different +// versions of the command do not address the same cache +// entries, so that a bug in one version does not affect the execution +// of other versions. This salt will result in additional ActionID files +// in the cache, but not additional copies of the large output files, +// which are still addressed by unsalted SHA256. +var hashSalt []byte + +func SetSalt(b []byte) { + hashSalt = b +} + +// Subkey returns an action ID corresponding to mixing a parent +// action ID with a string description of the subkey. +func Subkey(parent ActionID, desc string) ActionID { + h := sha256.New() + h.Write([]byte("subkey:")) + h.Write(parent[:]) + h.Write([]byte(desc)) + var out ActionID + h.Sum(out[:0]) + if debugHash { + fmt.Fprintf(os.Stderr, "HASH subkey %x %q = %x\n", parent, desc, out) + } + if verify { + hashDebug.Lock() + hashDebug.m[out] = fmt.Sprintf("subkey %x %q", parent, desc) + hashDebug.Unlock() + } + return out +} + +// NewHash returns a new Hash. +// The caller is expected to Write data to it and then call Sum. +func NewHash(name string) *Hash { + h := &Hash{h: sha256.New(), name: name} + if debugHash { + fmt.Fprintf(os.Stderr, "HASH[%s]\n", h.name) + } + h.Write(hashSalt) + if verify { + h.buf = new(bytes.Buffer) + } + return h +} + +// Write writes data to the running hash. +func (h *Hash) Write(b []byte) (int, error) { + if debugHash { + fmt.Fprintf(os.Stderr, "HASH[%s]: %q\n", h.name, b) + } + if h.buf != nil { + h.buf.Write(b) + } + return h.h.Write(b) +} + +// Sum returns the hash of the data written previously. +func (h *Hash) Sum() [HashSize]byte { + var out [HashSize]byte + h.h.Sum(out[:0]) + if debugHash { + fmt.Fprintf(os.Stderr, "HASH[%s]: %x\n", h.name, out) + } + if h.buf != nil { + hashDebug.Lock() + if hashDebug.m == nil { + hashDebug.m = make(map[[HashSize]byte]string) + } + hashDebug.m[out] = h.buf.String() + hashDebug.Unlock() + } + return out +} + +// In GODEBUG=gocacheverify=1 mode, +// hashDebug holds the input to every computed hash ID, +// so that we can work backward from the ID involved in a +// cache entry mismatch to a description of what should be there. +var hashDebug struct { + sync.Mutex + m map[[HashSize]byte]string +} + +// reverseHash returns the input used to compute the hash id. +func reverseHash(id [HashSize]byte) string { + hashDebug.Lock() + s := hashDebug.m[id] + hashDebug.Unlock() + return s +} + +var hashFileCache struct { + sync.Mutex + m map[string][HashSize]byte +} + +// FileHash returns the hash of the named file. +// It caches repeated lookups for a given file, +// and the cache entry for a file can be initialized +// using SetFileHash. +// The hash used by FileHash is not the same as +// the hash used by NewHash. +func FileHash(file string) ([HashSize]byte, error) { + hashFileCache.Lock() + out, ok := hashFileCache.m[file] + hashFileCache.Unlock() + + if ok { + return out, nil + } + + h := sha256.New() + f, err := os.Open(file) + if err != nil { + if debugHash { + fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err) + } + return [HashSize]byte{}, err + } + _, err = io.Copy(h, f) + f.Close() + if err != nil { + if debugHash { + fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err) + } + return [HashSize]byte{}, err + } + h.Sum(out[:0]) + if debugHash { + fmt.Fprintf(os.Stderr, "HASH %s: %x\n", file, out) + } + + SetFileHash(file, out) + return out, nil +} + +// SetFileHash sets the hash returned by FileHash for file. +func SetFileHash(file string, sum [HashSize]byte) { + hashFileCache.Lock() + if hashFileCache.m == nil { + hashFileCache.m = make(map[string][HashSize]byte) + } + hashFileCache.m[file] = sum + hashFileCache.Unlock() +} diff --git a/vendor/honnef.co/go/tools/internal/passes/buildssa/buildssa.go b/vendor/honnef.co/go/tools/internal/passes/buildssa/buildssa.go new file mode 100644 index 000000000..fde918d12 --- /dev/null +++ b/vendor/honnef.co/go/tools/internal/passes/buildssa/buildssa.go @@ -0,0 +1,116 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package buildssa defines an Analyzer that constructs the SSA +// representation of an error-free package and returns the set of all +// functions within it. It does not report any diagnostics itself but +// may be used as an input to other analyzers. +// +// THIS INTERFACE IS EXPERIMENTAL AND MAY BE SUBJECT TO INCOMPATIBLE CHANGE. +package buildssa + +import ( + "go/ast" + "go/types" + "reflect" + + "golang.org/x/tools/go/analysis" + "honnef.co/go/tools/ssa" +) + +var Analyzer = &analysis.Analyzer{ + Name: "buildssa", + Doc: "build SSA-form IR for later passes", + Run: run, + ResultType: reflect.TypeOf(new(SSA)), +} + +// SSA provides SSA-form intermediate representation for all the +// non-blank source functions in the current package. +type SSA struct { + Pkg *ssa.Package + SrcFuncs []*ssa.Function +} + +func run(pass *analysis.Pass) (interface{}, error) { + // Plundered from ssautil.BuildPackage. + + // We must create a new Program for each Package because the + // analysis API provides no place to hang a Program shared by + // all Packages. Consequently, SSA Packages and Functions do not + // have a canonical representation across an analysis session of + // multiple packages. This is unlikely to be a problem in + // practice because the analysis API essentially forces all + // packages to be analysed independently, so any given call to + // Analysis.Run on a package will see only SSA objects belonging + // to a single Program. + + mode := ssa.GlobalDebug + + prog := ssa.NewProgram(pass.Fset, mode) + + // Create SSA packages for all imports. + // Order is not significant. + created := make(map[*types.Package]bool) + var createAll func(pkgs []*types.Package) + createAll = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !created[p] { + created[p] = true + prog.CreatePackage(p, nil, nil, true) + createAll(p.Imports()) + } + } + } + createAll(pass.Pkg.Imports()) + + // Create and build the primary package. + ssapkg := prog.CreatePackage(pass.Pkg, pass.Files, pass.TypesInfo, false) + ssapkg.Build() + + // Compute list of source functions, including literals, + // in source order. + var funcs []*ssa.Function + var addAnons func(f *ssa.Function) + addAnons = func(f *ssa.Function) { + funcs = append(funcs, f) + for _, anon := range f.AnonFuncs { + addAnons(anon) + } + } + addAnons(ssapkg.Members["init"].(*ssa.Function)) + for _, f := range pass.Files { + for _, decl := range f.Decls { + if fdecl, ok := decl.(*ast.FuncDecl); ok { + + // SSA will not build a Function + // for a FuncDecl named blank. + // That's arguably too strict but + // relaxing it would break uniqueness of + // names of package members. + if fdecl.Name.Name == "_" { + continue + } + + // (init functions have distinct Func + // objects named "init" and distinct + // ssa.Functions named "init#1", ...) + + fn := pass.TypesInfo.Defs[fdecl.Name].(*types.Func) + if fn == nil { + panic(fn) + } + + f := ssapkg.Prog.FuncValue(fn) + if f == nil { + panic(fn) + } + + addAnons(f) + } + } + } + + return &SSA{Pkg: ssapkg, SrcFuncs: funcs}, nil +} diff --git a/vendor/honnef.co/go/tools/internal/renameio/renameio.go b/vendor/honnef.co/go/tools/internal/renameio/renameio.go new file mode 100644 index 000000000..3f3f1708f --- /dev/null +++ b/vendor/honnef.co/go/tools/internal/renameio/renameio.go @@ -0,0 +1,83 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package renameio writes files atomically by renaming temporary files. +package renameio + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "time" +) + +const patternSuffix = "*.tmp" + +// Pattern returns a glob pattern that matches the unrenamed temporary files +// created when writing to filename. +func Pattern(filename string) string { + return filepath.Join(filepath.Dir(filename), filepath.Base(filename)+patternSuffix) +} + +// WriteFile is like ioutil.WriteFile, but first writes data to an arbitrary +// file in the same directory as filename, then renames it atomically to the +// final name. +// +// That ensures that the final location, if it exists, is always a complete file. +func WriteFile(filename string, data []byte) (err error) { + return WriteToFile(filename, bytes.NewReader(data)) +} + +// WriteToFile is a variant of WriteFile that accepts the data as an io.Reader +// instead of a slice. +func WriteToFile(filename string, data io.Reader) (err error) { + f, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)+patternSuffix) + if err != nil { + return err + } + defer func() { + // Only call os.Remove on f.Name() if we failed to rename it: otherwise, + // some other process may have created a new file with the same name after + // that. + if err != nil { + f.Close() + os.Remove(f.Name()) + } + }() + + if _, err := io.Copy(f, data); err != nil { + return err + } + // Sync the file before renaming it: otherwise, after a crash the reader may + // observe a 0-length file instead of the actual contents. + // See https://golang.org/issue/22397#issuecomment-380831736. + if err := f.Sync(); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + + var start time.Time + for { + err := os.Rename(f.Name(), filename) + if err == nil || runtime.GOOS != "windows" || !strings.HasSuffix(err.Error(), "Access is denied.") { + return err + } + + // Windows seems to occasionally trigger spurious "Access is denied" errors + // here (see golang.org/issue/31247). We're not sure why. It's probably + // worth a little extra latency to avoid propagating the spurious errors. + if start.IsZero() { + start = time.Now() + } else if time.Since(start) >= 500*time.Millisecond { + return err + } + time.Sleep(5 * time.Millisecond) + } +} diff --git a/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go b/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go new file mode 100644 index 000000000..affee6607 --- /dev/null +++ b/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go @@ -0,0 +1,70 @@ +package sharedcheck + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + "honnef.co/go/tools/internal/passes/buildssa" + . "honnef.co/go/tools/lint/lintdsl" + "honnef.co/go/tools/ssa" +) + +func CheckRangeStringRunes(pass *analysis.Pass) (interface{}, error) { + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + fn := func(node ast.Node) bool { + rng, ok := node.(*ast.RangeStmt) + if !ok || !IsBlank(rng.Key) { + return true + } + + v, _ := ssafn.ValueForExpr(rng.X) + + // Check that we're converting from string to []rune + val, _ := v.(*ssa.Convert) + if val == nil { + return true + } + Tsrc, ok := val.X.Type().(*types.Basic) + if !ok || Tsrc.Kind() != types.String { + return true + } + Tdst, ok := val.Type().(*types.Slice) + if !ok { + return true + } + TdstElem, ok := Tdst.Elem().(*types.Basic) + if !ok || TdstElem.Kind() != types.Int32 { + return true + } + + // Check that the result of the conversion is only used to + // range over + refs := val.Referrers() + if refs == nil { + return true + } + + // Expect two refs: one for obtaining the length of the slice, + // one for accessing the elements + if len(FilterDebug(*refs)) != 2 { + // TODO(dh): right now, we check that only one place + // refers to our slice. This will miss cases such as + // ranging over the slice twice. Ideally, we'd ensure that + // the slice is only used for ranging over (without + // accessing the key), but that is harder to do because in + // SSA form, ranging over a slice looks like an ordinary + // loop with index increments and slice accesses. We'd + // have to look at the associated AST node to check that + // it's a range statement. + return true + } + + pass.Reportf(rng.Pos(), "should range over string, not []rune(string)") + + return true + } + Inspect(ssafn.Syntax(), fn) + } + return nil, nil +} diff --git a/vendor/honnef.co/go/tools/lint/LICENSE b/vendor/honnef.co/go/tools/lint/LICENSE new file mode 100644 index 000000000..796130a12 --- /dev/null +++ b/vendor/honnef.co/go/tools/lint/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. +Copyright (c) 2016 Dominik Honnef. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/honnef.co/go/tools/lint/lint.go b/vendor/honnef.co/go/tools/lint/lint.go new file mode 100644 index 000000000..de5a8f128 --- /dev/null +++ b/vendor/honnef.co/go/tools/lint/lint.go @@ -0,0 +1,491 @@ +// Package lint provides the foundation for tools like staticcheck +package lint // import "honnef.co/go/tools/lint" + +import ( + "bytes" + "fmt" + "go/scanner" + "go/token" + "go/types" + "path/filepath" + "sort" + "strings" + "sync" + "sync/atomic" + "unicode" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/packages" + "honnef.co/go/tools/config" +) + +type Documentation struct { + Title string + Text string + Since string + NonDefault bool + Options []string +} + +func (doc *Documentation) String() string { + b := &strings.Builder{} + fmt.Fprintf(b, "%s\n\n", doc.Title) + if doc.Text != "" { + fmt.Fprintf(b, "%s\n\n", doc.Text) + } + fmt.Fprint(b, "Available since\n ") + if doc.Since == "" { + fmt.Fprint(b, "unreleased") + } else { + fmt.Fprintf(b, "%s", doc.Since) + } + if doc.NonDefault { + fmt.Fprint(b, ", non-default") + } + fmt.Fprint(b, "\n") + if len(doc.Options) > 0 { + fmt.Fprintf(b, "\nOptions\n") + for _, opt := range doc.Options { + fmt.Fprintf(b, " %s", opt) + } + fmt.Fprint(b, "\n") + } + return b.String() +} + +type Ignore interface { + Match(p Problem) bool +} + +type LineIgnore struct { + File string + Line int + Checks []string + Matched bool + Pos token.Pos +} + +func (li *LineIgnore) Match(p Problem) bool { + pos := p.Pos + if pos.Filename != li.File || pos.Line != li.Line { + return false + } + for _, c := range li.Checks { + if m, _ := filepath.Match(c, p.Check); m { + li.Matched = true + return true + } + } + return false +} + +func (li *LineIgnore) String() string { + matched := "not matched" + if li.Matched { + matched = "matched" + } + return fmt.Sprintf("%s:%d %s (%s)", li.File, li.Line, strings.Join(li.Checks, ", "), matched) +} + +type FileIgnore struct { + File string + Checks []string +} + +func (fi *FileIgnore) Match(p Problem) bool { + if p.Pos.Filename != fi.File { + return false + } + for _, c := range fi.Checks { + if m, _ := filepath.Match(c, p.Check); m { + return true + } + } + return false +} + +type Severity uint8 + +const ( + Error Severity = iota + Warning + Ignored +) + +// Problem represents a problem in some source code. +type Problem struct { + Pos token.Position + End token.Position + Message string + Check string + Severity Severity +} + +func (p *Problem) String() string { + return fmt.Sprintf("%s (%s)", p.Message, p.Check) +} + +// A Linter lints Go source code. +type Linter struct { + Checkers []*analysis.Analyzer + CumulativeCheckers []CumulativeChecker + GoVersion int + Config config.Config + Stats Stats +} + +type CumulativeChecker interface { + Analyzer() *analysis.Analyzer + Result() []types.Object + ProblemObject(*token.FileSet, types.Object) Problem +} + +func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error) { + var allAnalyzers []*analysis.Analyzer + allAnalyzers = append(allAnalyzers, l.Checkers...) + for _, cum := range l.CumulativeCheckers { + allAnalyzers = append(allAnalyzers, cum.Analyzer()) + } + + // The -checks command line flag overrules all configuration + // files, which means that for `-checks="foo"`, no check other + // than foo can ever be reported to the user. Make use of this + // fact to cull the list of analyses we need to run. + + // replace "inherit" with "all", as we don't want to base the + // list of all checks on the default configuration, which + // disables certain checks. + checks := make([]string, len(l.Config.Checks)) + copy(checks, l.Config.Checks) + for i, c := range checks { + if c == "inherit" { + checks[i] = "all" + } + } + + allowed := FilterChecks(allAnalyzers, checks) + var allowedAnalyzers []*analysis.Analyzer + for _, c := range l.Checkers { + if allowed[c.Name] { + allowedAnalyzers = append(allowedAnalyzers, c) + } + } + hasCumulative := false + for _, cum := range l.CumulativeCheckers { + a := cum.Analyzer() + if allowed[a.Name] { + hasCumulative = true + allowedAnalyzers = append(allowedAnalyzers, a) + } + } + + r, err := NewRunner(&l.Stats) + if err != nil { + return nil, err + } + r.goVersion = l.GoVersion + + pkgs, err := r.Run(cfg, patterns, allowedAnalyzers, hasCumulative) + if err != nil { + return nil, err + } + + tpkgToPkg := map[*types.Package]*Package{} + for _, pkg := range pkgs { + tpkgToPkg[pkg.Types] = pkg + + for _, e := range pkg.errs { + switch e := e.(type) { + case types.Error: + p := Problem{ + Pos: e.Fset.PositionFor(e.Pos, false), + Message: e.Msg, + Severity: Error, + Check: "compile", + } + pkg.problems = append(pkg.problems, p) + case packages.Error: + msg := e.Msg + if len(msg) != 0 && msg[0] == '\n' { + // TODO(dh): See https://github.com/golang/go/issues/32363 + msg = msg[1:] + } + + var pos token.Position + if e.Pos == "" { + // Under certain conditions (malformed package + // declarations, multiple packages in the same + // directory), go list emits an error on stderr + // instead of JSON. Those errors do not have + // associated position information in + // go/packages.Error, even though the output on + // stderr may contain it. + if p, n, err := parsePos(msg); err == nil { + if abs, err := filepath.Abs(p.Filename); err == nil { + p.Filename = abs + } + pos = p + msg = msg[n+2:] + } + } else { + var err error + pos, _, err = parsePos(e.Pos) + if err != nil { + panic(fmt.Sprintf("internal error: %s", e)) + } + } + p := Problem{ + Pos: pos, + Message: msg, + Severity: Error, + Check: "compile", + } + pkg.problems = append(pkg.problems, p) + case scanner.ErrorList: + for _, e := range e { + p := Problem{ + Pos: e.Pos, + Message: e.Msg, + Severity: Error, + Check: "compile", + } + pkg.problems = append(pkg.problems, p) + } + case error: + p := Problem{ + Pos: token.Position{}, + Message: e.Error(), + Severity: Error, + Check: "compile", + } + pkg.problems = append(pkg.problems, p) + } + } + } + + atomic.StoreUint32(&r.stats.State, StateCumulative) + var problems []Problem + for _, cum := range l.CumulativeCheckers { + for _, res := range cum.Result() { + pkg := tpkgToPkg[res.Pkg()] + allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks) + if allowedChecks[cum.Analyzer().Name] { + pos := DisplayPosition(pkg.Fset, res.Pos()) + // FIXME(dh): why are we ignoring generated files + // here? Surely this is specific to 'unused', not all + // cumulative checkers + if _, ok := pkg.gen[pos.Filename]; ok { + continue + } + p := cum.ProblemObject(pkg.Fset, res) + problems = append(problems, p) + } + } + } + + for _, pkg := range pkgs { + for _, ig := range pkg.ignores { + for i := range pkg.problems { + p := &pkg.problems[i] + if ig.Match(*p) { + p.Severity = Ignored + } + } + for i := range problems { + p := &problems[i] + if ig.Match(*p) { + p.Severity = Ignored + } + } + } + + if pkg.cfg == nil { + // The package failed to load, otherwise we would have a + // valid config. Pass through all errors. + problems = append(problems, pkg.problems...) + } else { + for _, p := range pkg.problems { + allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks) + allowedChecks["compile"] = true + if allowedChecks[p.Check] { + problems = append(problems, p) + } + } + } + + for _, ig := range pkg.ignores { + ig, ok := ig.(*LineIgnore) + if !ok { + continue + } + if ig.Matched { + continue + } + + couldveMatched := false + allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks) + for _, c := range ig.Checks { + if !allowedChecks[c] { + continue + } + couldveMatched = true + break + } + + if !couldveMatched { + // The ignored checks were disabled for the containing package. + // Don't flag the ignore for not having matched. + continue + } + p := Problem{ + Pos: DisplayPosition(pkg.Fset, ig.Pos), + Message: "this linter directive didn't match anything; should it be removed?", + Check: "", + } + problems = append(problems, p) + } + } + + if len(problems) == 0 { + return nil, nil + } + + sort.Slice(problems, func(i, j int) bool { + pi := problems[i].Pos + pj := problems[j].Pos + + if pi.Filename != pj.Filename { + return pi.Filename < pj.Filename + } + if pi.Line != pj.Line { + return pi.Line < pj.Line + } + if pi.Column != pj.Column { + return pi.Column < pj.Column + } + + return problems[i].Message < problems[j].Message + }) + + var out []Problem + out = append(out, problems[0]) + for i, p := range problems[1:] { + // We may encounter duplicate problems because one file + // can be part of many packages. + if problems[i] != p { + out = append(out, p) + } + } + return out, nil +} + +func FilterChecks(allChecks []*analysis.Analyzer, checks []string) map[string]bool { + // OPT(dh): this entire computation could be cached per package + allowedChecks := map[string]bool{} + + for _, check := range checks { + b := true + if len(check) > 1 && check[0] == '-' { + b = false + check = check[1:] + } + if check == "*" || check == "all" { + // Match all + for _, c := range allChecks { + allowedChecks[c.Name] = b + } + } else if strings.HasSuffix(check, "*") { + // Glob + prefix := check[:len(check)-1] + isCat := strings.IndexFunc(prefix, func(r rune) bool { return unicode.IsNumber(r) }) == -1 + + for _, c := range allChecks { + idx := strings.IndexFunc(c.Name, func(r rune) bool { return unicode.IsNumber(r) }) + if isCat { + // Glob is S*, which should match S1000 but not SA1000 + cat := c.Name[:idx] + if prefix == cat { + allowedChecks[c.Name] = b + } + } else { + // Glob is S1* + if strings.HasPrefix(c.Name, prefix) { + allowedChecks[c.Name] = b + } + } + } + } else { + // Literal check name + allowedChecks[check] = b + } + } + return allowedChecks +} + +type Positioner interface { + Pos() token.Pos +} + +func DisplayPosition(fset *token.FileSet, p token.Pos) token.Position { + if p == token.NoPos { + return token.Position{} + } + + // Only use the adjusted position if it points to another Go file. + // This means we'll point to the original file for cgo files, but + // we won't point to a YACC grammar file. + pos := fset.PositionFor(p, false) + adjPos := fset.PositionFor(p, true) + + if filepath.Ext(adjPos.Filename) == ".go" { + return adjPos + } + return pos +} + +var bufferPool = &sync.Pool{ + New: func() interface{} { + buf := bytes.NewBuffer(nil) + buf.Grow(64) + return buf + }, +} + +func FuncName(f *types.Func) string { + buf := bufferPool.Get().(*bytes.Buffer) + buf.Reset() + if f.Type() != nil { + sig := f.Type().(*types.Signature) + if recv := sig.Recv(); recv != nil { + buf.WriteByte('(') + if _, ok := recv.Type().(*types.Interface); ok { + // gcimporter creates abstract methods of + // named interfaces using the interface type + // (not the named type) as the receiver. + // Don't print it in full. + buf.WriteString("interface") + } else { + types.WriteType(buf, recv.Type(), nil) + } + buf.WriteByte(')') + buf.WriteByte('.') + } else if f.Pkg() != nil { + writePackage(buf, f.Pkg()) + } + } + buf.WriteString(f.Name()) + s := buf.String() + bufferPool.Put(buf) + return s +} + +func writePackage(buf *bytes.Buffer, pkg *types.Package) { + if pkg == nil { + return + } + s := pkg.Path() + if s != "" { + buf.WriteString(s) + buf.WriteByte('.') + } +} diff --git a/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go b/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go new file mode 100644 index 000000000..3b939e95f --- /dev/null +++ b/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go @@ -0,0 +1,400 @@ +// Package lintdsl provides helpers for implementing static analysis +// checks. Dot-importing this package is encouraged. +package lintdsl + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/constant" + "go/printer" + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/go/analysis" + "honnef.co/go/tools/facts" + "honnef.co/go/tools/lint" + "honnef.co/go/tools/ssa" +) + +type packager interface { + Package() *ssa.Package +} + +func CallName(call *ssa.CallCommon) string { + if call.IsInvoke() { + return "" + } + switch v := call.Value.(type) { + case *ssa.Function: + fn, ok := v.Object().(*types.Func) + if !ok { + return "" + } + return lint.FuncName(fn) + case *ssa.Builtin: + return v.Name() + } + return "" +} + +func IsCallTo(call *ssa.CallCommon, name string) bool { return CallName(call) == name } +func IsType(T types.Type, name string) bool { return types.TypeString(T, nil) == name } + +func FilterDebug(instr []ssa.Instruction) []ssa.Instruction { + var out []ssa.Instruction + for _, ins := range instr { + if _, ok := ins.(*ssa.DebugRef); !ok { + out = append(out, ins) + } + } + return out +} + +func IsExample(fn *ssa.Function) bool { + if !strings.HasPrefix(fn.Name(), "Example") { + return false + } + f := fn.Prog.Fset.File(fn.Pos()) + if f == nil { + return false + } + return strings.HasSuffix(f.Name(), "_test.go") +} + +func IsPointerLike(T types.Type) bool { + switch T := T.Underlying().(type) { + case *types.Interface, *types.Chan, *types.Map, *types.Signature, *types.Pointer: + return true + case *types.Basic: + return T.Kind() == types.UnsafePointer + } + return false +} + +func IsIdent(expr ast.Expr, ident string) bool { + id, ok := expr.(*ast.Ident) + return ok && id.Name == ident +} + +// isBlank returns whether id is the blank identifier "_". +// If id == nil, the answer is false. +func IsBlank(id ast.Expr) bool { + ident, _ := id.(*ast.Ident) + return ident != nil && ident.Name == "_" +} + +func IsIntLiteral(expr ast.Expr, literal string) bool { + lit, ok := expr.(*ast.BasicLit) + return ok && lit.Kind == token.INT && lit.Value == literal +} + +// Deprecated: use IsIntLiteral instead +func IsZero(expr ast.Expr) bool { + return IsIntLiteral(expr, "0") +} + +func IsOfType(pass *analysis.Pass, expr ast.Expr, name string) bool { + return IsType(pass.TypesInfo.TypeOf(expr), name) +} + +func IsInTest(pass *analysis.Pass, node lint.Positioner) bool { + // FIXME(dh): this doesn't work for global variables with + // initializers + f := pass.Fset.File(node.Pos()) + return f != nil && strings.HasSuffix(f.Name(), "_test.go") +} + +func IsInMain(pass *analysis.Pass, node lint.Positioner) bool { + if node, ok := node.(packager); ok { + return node.Package().Pkg.Name() == "main" + } + return pass.Pkg.Name() == "main" +} + +func SelectorName(pass *analysis.Pass, expr *ast.SelectorExpr) string { + info := pass.TypesInfo + sel := info.Selections[expr] + if sel == nil { + if x, ok := expr.X.(*ast.Ident); ok { + pkg, ok := info.ObjectOf(x).(*types.PkgName) + if !ok { + // This shouldn't happen + return fmt.Sprintf("%s.%s", x.Name, expr.Sel.Name) + } + return fmt.Sprintf("%s.%s", pkg.Imported().Path(), expr.Sel.Name) + } + panic(fmt.Sprintf("unsupported selector: %v", expr)) + } + return fmt.Sprintf("(%s).%s", sel.Recv(), sel.Obj().Name()) +} + +func IsNil(pass *analysis.Pass, expr ast.Expr) bool { + return pass.TypesInfo.Types[expr].IsNil() +} + +func BoolConst(pass *analysis.Pass, expr ast.Expr) bool { + val := pass.TypesInfo.ObjectOf(expr.(*ast.Ident)).(*types.Const).Val() + return constant.BoolVal(val) +} + +func IsBoolConst(pass *analysis.Pass, expr ast.Expr) bool { + // We explicitly don't support typed bools because more often than + // not, custom bool types are used as binary enums and the + // explicit comparison is desired. + + ident, ok := expr.(*ast.Ident) + if !ok { + return false + } + obj := pass.TypesInfo.ObjectOf(ident) + c, ok := obj.(*types.Const) + if !ok { + return false + } + basic, ok := c.Type().(*types.Basic) + if !ok { + return false + } + if basic.Kind() != types.UntypedBool && basic.Kind() != types.Bool { + return false + } + return true +} + +func ExprToInt(pass *analysis.Pass, expr ast.Expr) (int64, bool) { + tv := pass.TypesInfo.Types[expr] + if tv.Value == nil { + return 0, false + } + if tv.Value.Kind() != constant.Int { + return 0, false + } + return constant.Int64Val(tv.Value) +} + +func ExprToString(pass *analysis.Pass, expr ast.Expr) (string, bool) { + val := pass.TypesInfo.Types[expr].Value + if val == nil { + return "", false + } + if val.Kind() != constant.String { + return "", false + } + return constant.StringVal(val), true +} + +// Dereference returns a pointer's element type; otherwise it returns +// T. +func Dereference(T types.Type) types.Type { + if p, ok := T.Underlying().(*types.Pointer); ok { + return p.Elem() + } + return T +} + +// DereferenceR returns a pointer's element type; otherwise it returns +// T. If the element type is itself a pointer, DereferenceR will be +// applied recursively. +func DereferenceR(T types.Type) types.Type { + if p, ok := T.Underlying().(*types.Pointer); ok { + return DereferenceR(p.Elem()) + } + return T +} + +func IsGoVersion(pass *analysis.Pass, minor int) bool { + version := pass.Analyzer.Flags.Lookup("go").Value.(flag.Getter).Get().(int) + return version >= minor +} + +func CallNameAST(pass *analysis.Pass, call *ast.CallExpr) string { + switch fun := call.Fun.(type) { + case *ast.SelectorExpr: + fn, ok := pass.TypesInfo.ObjectOf(fun.Sel).(*types.Func) + if !ok { + return "" + } + return lint.FuncName(fn) + case *ast.Ident: + obj := pass.TypesInfo.ObjectOf(fun) + switch obj := obj.(type) { + case *types.Func: + return lint.FuncName(obj) + case *types.Builtin: + return obj.Name() + default: + return "" + } + default: + return "" + } +} + +func IsCallToAST(pass *analysis.Pass, node ast.Node, name string) bool { + call, ok := node.(*ast.CallExpr) + if !ok { + return false + } + return CallNameAST(pass, call) == name +} + +func IsCallToAnyAST(pass *analysis.Pass, node ast.Node, names ...string) bool { + for _, name := range names { + if IsCallToAST(pass, node, name) { + return true + } + } + return false +} + +func Render(pass *analysis.Pass, x interface{}) string { + var buf bytes.Buffer + if err := printer.Fprint(&buf, pass.Fset, x); err != nil { + panic(err) + } + return buf.String() +} + +func RenderArgs(pass *analysis.Pass, args []ast.Expr) string { + var ss []string + for _, arg := range args { + ss = append(ss, Render(pass, arg)) + } + return strings.Join(ss, ", ") +} + +func Preamble(f *ast.File) string { + cutoff := f.Package + if f.Doc != nil { + cutoff = f.Doc.Pos() + } + var out []string + for _, cmt := range f.Comments { + if cmt.Pos() >= cutoff { + break + } + out = append(out, cmt.Text()) + } + return strings.Join(out, "\n") +} + +func Inspect(node ast.Node, fn func(node ast.Node) bool) { + if node == nil { + return + } + ast.Inspect(node, fn) +} + +func GroupSpecs(fset *token.FileSet, specs []ast.Spec) [][]ast.Spec { + if len(specs) == 0 { + return nil + } + groups := make([][]ast.Spec, 1) + groups[0] = append(groups[0], specs[0]) + + for _, spec := range specs[1:] { + g := groups[len(groups)-1] + if fset.PositionFor(spec.Pos(), false).Line-1 != + fset.PositionFor(g[len(g)-1].End(), false).Line { + + groups = append(groups, nil) + } + + groups[len(groups)-1] = append(groups[len(groups)-1], spec) + } + + return groups +} + +func IsObject(obj types.Object, name string) bool { + var path string + if pkg := obj.Pkg(); pkg != nil { + path = pkg.Path() + "." + } + return path+obj.Name() == name +} + +type Field struct { + Var *types.Var + Tag string + Path []int +} + +// FlattenFields recursively flattens T and embedded structs, +// returning a list of fields. If multiple fields with the same name +// exist, all will be returned. +func FlattenFields(T *types.Struct) []Field { + return flattenFields(T, nil, nil) +} + +func flattenFields(T *types.Struct, path []int, seen map[types.Type]bool) []Field { + if seen == nil { + seen = map[types.Type]bool{} + } + if seen[T] { + return nil + } + seen[T] = true + var out []Field + for i := 0; i < T.NumFields(); i++ { + field := T.Field(i) + tag := T.Tag(i) + np := append(path[:len(path):len(path)], i) + if field.Anonymous() { + if s, ok := Dereference(field.Type()).Underlying().(*types.Struct); ok { + out = append(out, flattenFields(s, np, seen)...) + } + } else { + out = append(out, Field{field, tag, np}) + } + } + return out +} + +func File(pass *analysis.Pass, node lint.Positioner) *ast.File { + pass.Fset.PositionFor(node.Pos(), true) + m := pass.ResultOf[facts.TokenFile].(map[*token.File]*ast.File) + return m[pass.Fset.File(node.Pos())] +} + +// IsGenerated reports whether pos is in a generated file, It ignores +// //line directives. +func IsGenerated(pass *analysis.Pass, pos token.Pos) bool { + _, ok := Generator(pass, pos) + return ok +} + +// Generator returns the generator that generated the file containing +// pos. It ignores //line directives. +func Generator(pass *analysis.Pass, pos token.Pos) (facts.Generator, bool) { + file := pass.Fset.PositionFor(pos, false).Filename + m := pass.ResultOf[facts.Generated].(map[string]facts.Generator) + g, ok := m[file] + return g, ok +} + +func ReportfFG(pass *analysis.Pass, pos token.Pos, f string, args ...interface{}) { + file := lint.DisplayPosition(pass.Fset, pos).Filename + m := pass.ResultOf[facts.Generated].(map[string]facts.Generator) + if _, ok := m[file]; ok { + return + } + pass.Reportf(pos, f, args...) +} + +func ReportNodef(pass *analysis.Pass, node ast.Node, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + pass.Report(analysis.Diagnostic{Pos: node.Pos(), End: node.End(), Message: msg}) +} + +func ReportNodefFG(pass *analysis.Pass, node ast.Node, format string, args ...interface{}) { + file := lint.DisplayPosition(pass.Fset, node.Pos()).Filename + m := pass.ResultOf[facts.Generated].(map[string]facts.Generator) + if _, ok := m[file]; ok { + return + } + ReportNodef(pass, node, format, args...) +} diff --git a/vendor/honnef.co/go/tools/lint/lintutil/format/format.go b/vendor/honnef.co/go/tools/lint/lintutil/format/format.go new file mode 100644 index 000000000..9385431f8 --- /dev/null +++ b/vendor/honnef.co/go/tools/lint/lintutil/format/format.go @@ -0,0 +1,135 @@ +// Package format provides formatters for linter problems. +package format + +import ( + "encoding/json" + "fmt" + "go/token" + "io" + "os" + "path/filepath" + "text/tabwriter" + + "honnef.co/go/tools/lint" +) + +func shortPath(path string) string { + cwd, err := os.Getwd() + if err != nil { + return path + } + if rel, err := filepath.Rel(cwd, path); err == nil && len(rel) < len(path) { + return rel + } + return path +} + +func relativePositionString(pos token.Position) string { + s := shortPath(pos.Filename) + if pos.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", pos.Line, pos.Column) + } + if s == "" { + s = "-" + } + return s +} + +type Statter interface { + Stats(total, errors, warnings int) +} + +type Formatter interface { + Format(p lint.Problem) +} + +type Text struct { + W io.Writer +} + +func (o Text) Format(p lint.Problem) { + fmt.Fprintf(o.W, "%v: %s\n", relativePositionString(p.Pos), p.String()) +} + +type JSON struct { + W io.Writer +} + +func severity(s lint.Severity) string { + switch s { + case lint.Error: + return "error" + case lint.Warning: + return "warning" + case lint.Ignored: + return "ignored" + } + return "" +} + +func (o JSON) Format(p lint.Problem) { + type location struct { + File string `json:"file"` + Line int `json:"line"` + Column int `json:"column"` + } + jp := struct { + Code string `json:"code"` + Severity string `json:"severity,omitempty"` + Location location `json:"location"` + End location `json:"end"` + Message string `json:"message"` + }{ + Code: p.Check, + Severity: severity(p.Severity), + Location: location{ + File: p.Pos.Filename, + Line: p.Pos.Line, + Column: p.Pos.Column, + }, + End: location{ + File: p.End.Filename, + Line: p.End.Line, + Column: p.End.Column, + }, + Message: p.Message, + } + _ = json.NewEncoder(o.W).Encode(jp) +} + +type Stylish struct { + W io.Writer + + prevFile string + tw *tabwriter.Writer +} + +func (o *Stylish) Format(p lint.Problem) { + pos := p.Pos + if pos.Filename == "" { + pos.Filename = "-" + } + + if pos.Filename != o.prevFile { + if o.prevFile != "" { + o.tw.Flush() + fmt.Fprintln(o.W) + } + fmt.Fprintln(o.W, pos.Filename) + o.prevFile = pos.Filename + o.tw = tabwriter.NewWriter(o.W, 0, 4, 2, ' ', 0) + } + fmt.Fprintf(o.tw, " (%d, %d)\t%s\t%s\n", pos.Line, pos.Column, p.Check, p.Message) +} + +func (o *Stylish) Stats(total, errors, warnings int) { + if o.tw != nil { + o.tw.Flush() + fmt.Fprintln(o.W) + } + fmt.Fprintf(o.W, " ✖ %d problems (%d errors, %d warnings)\n", + total, errors, warnings) +} diff --git a/vendor/honnef.co/go/tools/lint/lintutil/stats.go b/vendor/honnef.co/go/tools/lint/lintutil/stats.go new file mode 100644 index 000000000..ba8caf0af --- /dev/null +++ b/vendor/honnef.co/go/tools/lint/lintutil/stats.go @@ -0,0 +1,7 @@ +// +build !aix,!android,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package lintutil + +import "os" + +var infoSignals = []os.Signal{} diff --git a/vendor/honnef.co/go/tools/lint/lintutil/stats_bsd.go b/vendor/honnef.co/go/tools/lint/lintutil/stats_bsd.go new file mode 100644 index 000000000..3a62ede03 --- /dev/null +++ b/vendor/honnef.co/go/tools/lint/lintutil/stats_bsd.go @@ -0,0 +1,10 @@ +// +build darwin dragonfly freebsd netbsd openbsd + +package lintutil + +import ( + "os" + "syscall" +) + +var infoSignals = []os.Signal{syscall.SIGINFO} diff --git a/vendor/honnef.co/go/tools/lint/lintutil/stats_posix.go b/vendor/honnef.co/go/tools/lint/lintutil/stats_posix.go new file mode 100644 index 000000000..53f21c666 --- /dev/null +++ b/vendor/honnef.co/go/tools/lint/lintutil/stats_posix.go @@ -0,0 +1,10 @@ +// +build aix android linux solaris + +package lintutil + +import ( + "os" + "syscall" +) + +var infoSignals = []os.Signal{syscall.SIGUSR1} diff --git a/vendor/honnef.co/go/tools/lint/lintutil/util.go b/vendor/honnef.co/go/tools/lint/lintutil/util.go new file mode 100644 index 000000000..fe0279f92 --- /dev/null +++ b/vendor/honnef.co/go/tools/lint/lintutil/util.go @@ -0,0 +1,392 @@ +// Copyright (c) 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// Package lintutil provides helpers for writing linter command lines. +package lintutil // import "honnef.co/go/tools/lint/lintutil" + +import ( + "crypto/sha256" + "errors" + "flag" + "fmt" + "go/build" + "go/token" + "io" + "log" + "os" + "os/signal" + "regexp" + "runtime" + "runtime/pprof" + "strconv" + "strings" + "sync/atomic" + + "honnef.co/go/tools/config" + "honnef.co/go/tools/internal/cache" + "honnef.co/go/tools/lint" + "honnef.co/go/tools/lint/lintutil/format" + "honnef.co/go/tools/version" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/buildutil" + "golang.org/x/tools/go/packages" +) + +func NewVersionFlag() flag.Getter { + tags := build.Default.ReleaseTags + v := tags[len(tags)-1][2:] + version := new(VersionFlag) + if err := version.Set(v); err != nil { + panic(fmt.Sprintf("internal error: %s", err)) + } + return version +} + +type VersionFlag int + +func (v *VersionFlag) String() string { + return fmt.Sprintf("1.%d", *v) + +} + +func (v *VersionFlag) Set(s string) error { + if len(s) < 3 { + return errors.New("invalid Go version") + } + if s[0] != '1' { + return errors.New("invalid Go version") + } + if s[1] != '.' { + return errors.New("invalid Go version") + } + i, err := strconv.Atoi(s[2:]) + *v = VersionFlag(i) + return err +} + +func (v *VersionFlag) Get() interface{} { + return int(*v) +} + +func usage(name string, flags *flag.FlagSet) func() { + return func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", name) + fmt.Fprintf(os.Stderr, "\t%s [flags] # runs on package in current directory\n", name) + fmt.Fprintf(os.Stderr, "\t%s [flags] packages\n", name) + fmt.Fprintf(os.Stderr, "\t%s [flags] directory\n", name) + fmt.Fprintf(os.Stderr, "\t%s [flags] files... # must be a single package\n", name) + fmt.Fprintf(os.Stderr, "Flags:\n") + flags.PrintDefaults() + } +} + +type list []string + +func (list *list) String() string { + return `"` + strings.Join(*list, ",") + `"` +} + +func (list *list) Set(s string) error { + if s == "" { + *list = nil + return nil + } + + *list = strings.Split(s, ",") + return nil +} + +func FlagSet(name string) *flag.FlagSet { + flags := flag.NewFlagSet("", flag.ExitOnError) + flags.Usage = usage(name, flags) + flags.String("tags", "", "List of `build tags`") + flags.Bool("tests", true, "Include tests") + flags.Bool("version", false, "Print version and exit") + flags.Bool("show-ignored", false, "Don't filter ignored problems") + flags.String("f", "text", "Output `format` (valid choices are 'stylish', 'text' and 'json')") + flags.String("explain", "", "Print description of `check`") + + flags.String("debug.cpuprofile", "", "Write CPU profile to `file`") + flags.String("debug.memprofile", "", "Write memory profile to `file`") + flags.Bool("debug.version", false, "Print detailed version information about this program") + flags.Bool("debug.no-compile-errors", false, "Don't print compile errors") + + checks := list{"inherit"} + fail := list{"all"} + flags.Var(&checks, "checks", "Comma-separated list of `checks` to enable.") + flags.Var(&fail, "fail", "Comma-separated list of `checks` that can cause a non-zero exit status.") + + tags := build.Default.ReleaseTags + v := tags[len(tags)-1][2:] + version := new(VersionFlag) + if err := version.Set(v); err != nil { + panic(fmt.Sprintf("internal error: %s", err)) + } + + flags.Var(version, "go", "Target Go `version` in the format '1.x'") + return flags +} + +func findCheck(cs []*analysis.Analyzer, check string) (*analysis.Analyzer, bool) { + for _, c := range cs { + if c.Name == check { + return c, true + } + } + return nil, false +} + +func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs *flag.FlagSet) { + tags := fs.Lookup("tags").Value.(flag.Getter).Get().(string) + tests := fs.Lookup("tests").Value.(flag.Getter).Get().(bool) + goVersion := fs.Lookup("go").Value.(flag.Getter).Get().(int) + formatter := fs.Lookup("f").Value.(flag.Getter).Get().(string) + printVersion := fs.Lookup("version").Value.(flag.Getter).Get().(bool) + showIgnored := fs.Lookup("show-ignored").Value.(flag.Getter).Get().(bool) + explain := fs.Lookup("explain").Value.(flag.Getter).Get().(string) + + cpuProfile := fs.Lookup("debug.cpuprofile").Value.(flag.Getter).Get().(string) + memProfile := fs.Lookup("debug.memprofile").Value.(flag.Getter).Get().(string) + debugVersion := fs.Lookup("debug.version").Value.(flag.Getter).Get().(bool) + debugNoCompile := fs.Lookup("debug.no-compile-errors").Value.(flag.Getter).Get().(bool) + + cfg := config.Config{} + cfg.Checks = *fs.Lookup("checks").Value.(*list) + + exit := func(code int) { + if cpuProfile != "" { + pprof.StopCPUProfile() + } + if memProfile != "" { + f, err := os.Create(memProfile) + if err != nil { + panic(err) + } + runtime.GC() + pprof.WriteHeapProfile(f) + } + os.Exit(code) + } + if cpuProfile != "" { + f, err := os.Create(cpuProfile) + if err != nil { + log.Fatal(err) + } + pprof.StartCPUProfile(f) + } + + if debugVersion { + version.Verbose() + exit(0) + } + + if printVersion { + version.Print() + exit(0) + } + + // Validate that the tags argument is well-formed. go/packages + // doesn't detect malformed build flags and returns unhelpful + // errors. + tf := buildutil.TagsFlag{} + if err := tf.Set(tags); err != nil { + fmt.Fprintln(os.Stderr, fmt.Errorf("invalid value %q for flag -tags: %s", tags, err)) + exit(1) + } + + if explain != "" { + var haystack []*analysis.Analyzer + haystack = append(haystack, cs...) + for _, cum := range cums { + haystack = append(haystack, cum.Analyzer()) + } + check, ok := findCheck(haystack, explain) + if !ok { + fmt.Fprintln(os.Stderr, "Couldn't find check", explain) + exit(1) + } + if check.Doc == "" { + fmt.Fprintln(os.Stderr, explain, "has no documentation") + exit(1) + } + fmt.Println(check.Doc) + exit(0) + } + + ps, err := Lint(cs, cums, fs.Args(), &Options{ + Tags: tags, + LintTests: tests, + GoVersion: goVersion, + Config: cfg, + }) + if err != nil { + fmt.Fprintln(os.Stderr, err) + exit(1) + } + + var f format.Formatter + switch formatter { + case "text": + f = format.Text{W: os.Stdout} + case "stylish": + f = &format.Stylish{W: os.Stdout} + case "json": + f = format.JSON{W: os.Stdout} + default: + fmt.Fprintf(os.Stderr, "unsupported output format %q\n", formatter) + exit(2) + } + + var ( + total int + errors int + warnings int + ) + + fail := *fs.Lookup("fail").Value.(*list) + analyzers := make([]*analysis.Analyzer, len(cs), len(cs)+len(cums)) + copy(analyzers, cs) + for _, cum := range cums { + analyzers = append(analyzers, cum.Analyzer()) + } + shouldExit := lint.FilterChecks(analyzers, fail) + shouldExit["compile"] = true + + total = len(ps) + for _, p := range ps { + if p.Check == "compile" && debugNoCompile { + continue + } + if p.Severity == lint.Ignored && !showIgnored { + continue + } + if shouldExit[p.Check] { + errors++ + } else { + p.Severity = lint.Warning + warnings++ + } + f.Format(p) + } + if f, ok := f.(format.Statter); ok { + f.Stats(total, errors, warnings) + } + if errors > 0 { + exit(1) + } + exit(0) +} + +type Options struct { + Config config.Config + + Tags string + LintTests bool + GoVersion int +} + +func computeSalt() ([]byte, error) { + if version.Version != "devel" { + return []byte(version.Version), nil + } + p, err := os.Executable() + if err != nil { + return nil, err + } + f, err := os.Open(p) + if err != nil { + return nil, err + } + defer f.Close() + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + return nil, err + } + return h.Sum(nil), nil +} + +func Lint(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, paths []string, opt *Options) ([]lint.Problem, error) { + salt, err := computeSalt() + if err != nil { + return nil, fmt.Errorf("could not compute salt for cache: %s", err) + } + cache.SetSalt(salt) + + if opt == nil { + opt = &Options{} + } + + l := &lint.Linter{ + Checkers: cs, + CumulativeCheckers: cums, + GoVersion: opt.GoVersion, + Config: opt.Config, + } + cfg := &packages.Config{} + if opt.LintTests { + cfg.Tests = true + } + if opt.Tags != "" { + cfg.BuildFlags = append(cfg.BuildFlags, "-tags", opt.Tags) + } + + printStats := func() { + // Individual stats are read atomically, but overall there + // is no synchronisation. For printing rough progress + // information, this doesn't matter. + switch atomic.LoadUint32(&l.Stats.State) { + case lint.StateInitializing: + fmt.Fprintln(os.Stderr, "Status: initializing") + case lint.StateGraph: + fmt.Fprintln(os.Stderr, "Status: loading package graph") + case lint.StateProcessing: + fmt.Fprintf(os.Stderr, "Packages: %d/%d initial, %d/%d total; Workers: %d/%d; Problems: %d\n", + atomic.LoadUint32(&l.Stats.ProcessedInitialPackages), + atomic.LoadUint32(&l.Stats.InitialPackages), + atomic.LoadUint32(&l.Stats.ProcessedPackages), + atomic.LoadUint32(&l.Stats.TotalPackages), + atomic.LoadUint32(&l.Stats.ActiveWorkers), + atomic.LoadUint32(&l.Stats.TotalWorkers), + atomic.LoadUint32(&l.Stats.Problems), + ) + case lint.StateCumulative: + fmt.Fprintln(os.Stderr, "Status: processing cumulative checkers") + } + } + if len(infoSignals) > 0 { + ch := make(chan os.Signal, 1) + signal.Notify(ch, infoSignals...) + defer signal.Stop(ch) + go func() { + for range ch { + printStats() + } + }() + } + + return l.Lint(cfg, paths) +} + +var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?$`) + +func parsePos(pos string) token.Position { + if pos == "-" || pos == "" { + return token.Position{} + } + parts := posRe.FindStringSubmatch(pos) + if parts == nil { + panic(fmt.Sprintf("internal error: malformed position %q", pos)) + } + file := parts[1] + line, _ := strconv.Atoi(parts[2]) + col, _ := strconv.Atoi(parts[3]) + return token.Position{ + Filename: file, + Line: line, + Column: col, + } +} diff --git a/vendor/honnef.co/go/tools/lint/runner.go b/vendor/honnef.co/go/tools/lint/runner.go new file mode 100644 index 000000000..3b22a63fa --- /dev/null +++ b/vendor/honnef.co/go/tools/lint/runner.go @@ -0,0 +1,970 @@ +package lint + +/* +Parallelism + +Runner implements parallel processing of packages by spawning one +goroutine per package in the dependency graph, without any semaphores. +Each goroutine initially waits on the completion of all of its +dependencies, thus establishing correct order of processing. Once all +dependencies finish processing, the goroutine will load the package +from export data or source – this loading is guarded by a semaphore, +sized according to the number of CPU cores. This way, we only have as +many packages occupying memory and CPU resources as there are actual +cores to process them. + +This combination of unbounded goroutines but bounded package loading +means that if we have many parallel, independent subgraphs, they will +all execute in parallel, while not wasting resources for long linear +chains or trying to process more subgraphs in parallel than the system +can handle. + +*/ + +import ( + "bytes" + "encoding/gob" + "encoding/hex" + "fmt" + "go/ast" + "go/token" + "go/types" + "reflect" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/go/types/objectpath" + "honnef.co/go/tools/config" + "honnef.co/go/tools/facts" + "honnef.co/go/tools/internal/cache" + "honnef.co/go/tools/loader" +) + +// If enabled, abuse of the go/analysis API will lead to panics +const sanityCheck = true + +// OPT(dh): for a dependency tree A->B->C->D, if we have cached data +// for B, there should be no need to load C and D individually. Go's +// export data for B contains all the data we need on types, and our +// fact cache could store the union of B, C and D in B. +// +// This may change unused's behavior, however, as it may observe fewer +// interfaces from transitive dependencies. + +type Package struct { + dependents uint64 + + *packages.Package + Imports []*Package + initial bool + fromSource bool + hash string + done chan struct{} + + resultsMu sync.Mutex + // results maps analyzer IDs to analyzer results + results []*result + + cfg *config.Config + gen map[string]facts.Generator + problems []Problem + ignores []Ignore + errs []error + + // these slices are indexed by analysis + facts []map[types.Object][]analysis.Fact + pkgFacts [][]analysis.Fact + + canClearTypes bool +} + +func (pkg *Package) decUse() { + atomic.AddUint64(&pkg.dependents, ^uint64(0)) + if atomic.LoadUint64(&pkg.dependents) == 0 { + // nobody depends on this package anymore + if pkg.canClearTypes { + pkg.Types = nil + } + pkg.facts = nil + pkg.pkgFacts = nil + + for _, imp := range pkg.Imports { + imp.decUse() + } + } +} + +type result struct { + v interface{} + err error + ready chan struct{} +} + +type Runner struct { + ld loader.Loader + cache *cache.Cache + + analyzerIDs analyzerIDs + + // limits parallelism of loading packages + loadSem chan struct{} + + goVersion int + stats *Stats +} + +type analyzerIDs struct { + m map[*analysis.Analyzer]int +} + +func (ids analyzerIDs) get(a *analysis.Analyzer) int { + id, ok := ids.m[a] + if !ok { + panic(fmt.Sprintf("no analyzer ID for %s", a.Name)) + } + return id +} + +type Fact struct { + Path string + Fact analysis.Fact +} + +type analysisAction struct { + analyzer *analysis.Analyzer + analyzerID int + pkg *Package + newPackageFacts []analysis.Fact + problems []Problem + + pkgFacts map[*types.Package][]analysis.Fact +} + +func (ac *analysisAction) String() string { + return fmt.Sprintf("%s @ %s", ac.analyzer, ac.pkg) +} + +func (ac *analysisAction) allObjectFacts() []analysis.ObjectFact { + out := make([]analysis.ObjectFact, 0, len(ac.pkg.facts[ac.analyzerID])) + for obj, facts := range ac.pkg.facts[ac.analyzerID] { + for _, fact := range facts { + out = append(out, analysis.ObjectFact{ + Object: obj, + Fact: fact, + }) + } + } + return out +} + +func (ac *analysisAction) allPackageFacts() []analysis.PackageFact { + out := make([]analysis.PackageFact, 0, len(ac.pkgFacts)) + for pkg, facts := range ac.pkgFacts { + for _, fact := range facts { + out = append(out, analysis.PackageFact{ + Package: pkg, + Fact: fact, + }) + } + } + return out +} + +func (ac *analysisAction) importObjectFact(obj types.Object, fact analysis.Fact) bool { + if sanityCheck && len(ac.analyzer.FactTypes) == 0 { + panic("analysis doesn't export any facts") + } + for _, f := range ac.pkg.facts[ac.analyzerID][obj] { + if reflect.TypeOf(f) == reflect.TypeOf(fact) { + reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem()) + return true + } + } + return false +} + +func (ac *analysisAction) importPackageFact(pkg *types.Package, fact analysis.Fact) bool { + if sanityCheck && len(ac.analyzer.FactTypes) == 0 { + panic("analysis doesn't export any facts") + } + for _, f := range ac.pkgFacts[pkg] { + if reflect.TypeOf(f) == reflect.TypeOf(fact) { + reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem()) + return true + } + } + return false +} + +func (ac *analysisAction) exportObjectFact(obj types.Object, fact analysis.Fact) { + if sanityCheck && len(ac.analyzer.FactTypes) == 0 { + panic("analysis doesn't export any facts") + } + ac.pkg.facts[ac.analyzerID][obj] = append(ac.pkg.facts[ac.analyzerID][obj], fact) +} + +func (ac *analysisAction) exportPackageFact(fact analysis.Fact) { + if sanityCheck && len(ac.analyzer.FactTypes) == 0 { + panic("analysis doesn't export any facts") + } + ac.pkgFacts[ac.pkg.Types] = append(ac.pkgFacts[ac.pkg.Types], fact) + ac.newPackageFacts = append(ac.newPackageFacts, fact) +} + +func (ac *analysisAction) report(pass *analysis.Pass, d analysis.Diagnostic) { + p := Problem{ + Pos: DisplayPosition(pass.Fset, d.Pos), + End: DisplayPosition(pass.Fset, d.End), + Message: d.Message, + Check: pass.Analyzer.Name, + } + ac.problems = append(ac.problems, p) +} + +func (r *Runner) runAnalysis(ac *analysisAction) (ret interface{}, err error) { + ac.pkg.resultsMu.Lock() + res := ac.pkg.results[r.analyzerIDs.get(ac.analyzer)] + if res != nil { + ac.pkg.resultsMu.Unlock() + <-res.ready + return res.v, res.err + } else { + res = &result{ + ready: make(chan struct{}), + } + ac.pkg.results[r.analyzerIDs.get(ac.analyzer)] = res + ac.pkg.resultsMu.Unlock() + + defer func() { + res.v = ret + res.err = err + close(res.ready) + }() + + pass := new(analysis.Pass) + *pass = analysis.Pass{ + Analyzer: ac.analyzer, + Fset: ac.pkg.Fset, + Files: ac.pkg.Syntax, + // type information may be nil or may be populated. if it is + // nil, it will get populated later. + Pkg: ac.pkg.Types, + TypesInfo: ac.pkg.TypesInfo, + TypesSizes: ac.pkg.TypesSizes, + ResultOf: map[*analysis.Analyzer]interface{}{}, + ImportObjectFact: ac.importObjectFact, + ImportPackageFact: ac.importPackageFact, + ExportObjectFact: ac.exportObjectFact, + ExportPackageFact: ac.exportPackageFact, + Report: func(d analysis.Diagnostic) { + ac.report(pass, d) + }, + AllObjectFacts: ac.allObjectFacts, + AllPackageFacts: ac.allPackageFacts, + } + + if !ac.pkg.initial { + // Don't report problems in dependencies + pass.Report = func(analysis.Diagnostic) {} + } + return r.runAnalysisUser(pass, ac) + } +} + +func (r *Runner) loadCachedFacts(a *analysis.Analyzer, pkg *Package) ([]Fact, bool) { + if len(a.FactTypes) == 0 { + return nil, true + } + + var facts []Fact + // Look in the cache for facts + aID, err := passActionID(pkg, a) + if err != nil { + return nil, false + } + aID = cache.Subkey(aID, "facts") + b, _, err := r.cache.GetBytes(aID) + if err != nil { + // No cached facts, analyse this package like a user-provided one, but ignore diagnostics + return nil, false + } + + if err := gob.NewDecoder(bytes.NewReader(b)).Decode(&facts); err != nil { + // Cached facts are broken, analyse this package like a user-provided one, but ignore diagnostics + return nil, false + } + return facts, true +} + +type dependencyError struct { + dep string + err error +} + +func (err dependencyError) nested() dependencyError { + if o, ok := err.err.(dependencyError); ok { + return o.nested() + } + return err +} + +func (err dependencyError) Error() string { + if o, ok := err.err.(dependencyError); ok { + return o.Error() + } + return fmt.Sprintf("error running dependency %s: %s", err.dep, err.err) +} + +func (r *Runner) makeAnalysisAction(a *analysis.Analyzer, pkg *Package) *analysisAction { + aid := r.analyzerIDs.get(a) + ac := &analysisAction{ + analyzer: a, + analyzerID: aid, + pkg: pkg, + } + + if len(a.FactTypes) == 0 { + return ac + } + + // Merge all package facts of dependencies + ac.pkgFacts = map[*types.Package][]analysis.Fact{} + seen := map[*Package]struct{}{} + var dfs func(*Package) + dfs = func(pkg *Package) { + if _, ok := seen[pkg]; ok { + return + } + seen[pkg] = struct{}{} + s := pkg.pkgFacts[aid] + ac.pkgFacts[pkg.Types] = s[0:len(s):len(s)] + for _, imp := range pkg.Imports { + dfs(imp) + } + } + dfs(pkg) + + return ac +} + +// analyzes that we always want to run, even if they're not being run +// explicitly or as dependencies. these are necessary for the inner +// workings of the runner. +var injectedAnalyses = []*analysis.Analyzer{facts.Generated, config.Analyzer} + +func (r *Runner) runAnalysisUser(pass *analysis.Pass, ac *analysisAction) (interface{}, error) { + if !ac.pkg.fromSource { + panic(fmt.Sprintf("internal error: %s was not loaded from source", ac.pkg)) + } + + // User-provided package, analyse it + // First analyze it with dependencies + for _, req := range ac.analyzer.Requires { + acReq := r.makeAnalysisAction(req, ac.pkg) + ret, err := r.runAnalysis(acReq) + if err != nil { + // We couldn't run a dependency, no point in going on + return nil, dependencyError{req.Name, err} + } + + pass.ResultOf[req] = ret + } + + // Then with this analyzer + ret, err := ac.analyzer.Run(pass) + if err != nil { + return nil, err + } + + if len(ac.analyzer.FactTypes) > 0 { + // Merge new facts into the package and persist them. + var facts []Fact + for _, fact := range ac.newPackageFacts { + id := r.analyzerIDs.get(ac.analyzer) + ac.pkg.pkgFacts[id] = append(ac.pkg.pkgFacts[id], fact) + facts = append(facts, Fact{"", fact}) + } + for obj, afacts := range ac.pkg.facts[ac.analyzerID] { + if obj.Pkg() != ac.pkg.Package.Types { + continue + } + path, err := objectpath.For(obj) + if err != nil { + continue + } + for _, fact := range afacts { + facts = append(facts, Fact{string(path), fact}) + } + } + + buf := &bytes.Buffer{} + if err := gob.NewEncoder(buf).Encode(facts); err != nil { + return nil, err + } + aID, err := passActionID(ac.pkg, ac.analyzer) + if err != nil { + return nil, err + } + aID = cache.Subkey(aID, "facts") + if err := r.cache.PutBytes(aID, buf.Bytes()); err != nil { + return nil, err + } + } + + return ret, nil +} + +func NewRunner(stats *Stats) (*Runner, error) { + cache, err := cache.Default() + if err != nil { + return nil, err + } + + return &Runner{ + cache: cache, + stats: stats, + }, nil +} + +// Run loads packages corresponding to patterns and analyses them with +// analyzers. It returns the loaded packages, which contain reported +// diagnostics as well as extracted ignore directives. +// +// Note that diagnostics have not been filtered at this point yet, to +// accomodate cumulative analyzes that require additional steps to +// produce diagnostics. +func (r *Runner) Run(cfg *packages.Config, patterns []string, analyzers []*analysis.Analyzer, hasCumulative bool) ([]*Package, error) { + r.analyzerIDs = analyzerIDs{m: map[*analysis.Analyzer]int{}} + id := 0 + seen := map[*analysis.Analyzer]struct{}{} + var dfs func(a *analysis.Analyzer) + dfs = func(a *analysis.Analyzer) { + if _, ok := seen[a]; ok { + return + } + seen[a] = struct{}{} + r.analyzerIDs.m[a] = id + id++ + for _, f := range a.FactTypes { + gob.Register(f) + } + for _, req := range a.Requires { + dfs(req) + } + } + for _, a := range analyzers { + if v := a.Flags.Lookup("go"); v != nil { + v.Value.Set(fmt.Sprintf("1.%d", r.goVersion)) + } + dfs(a) + } + for _, a := range injectedAnalyses { + dfs(a) + } + + var dcfg packages.Config + if cfg != nil { + dcfg = *cfg + } + + atomic.StoreUint32(&r.stats.State, StateGraph) + initialPkgs, err := r.ld.Graph(dcfg, patterns...) + if err != nil { + return nil, err + } + + defer r.cache.Trim() + + var allPkgs []*Package + m := map[*packages.Package]*Package{} + packages.Visit(initialPkgs, nil, func(l *packages.Package) { + m[l] = &Package{ + Package: l, + results: make([]*result, len(r.analyzerIDs.m)), + facts: make([]map[types.Object][]analysis.Fact, len(r.analyzerIDs.m)), + pkgFacts: make([][]analysis.Fact, len(r.analyzerIDs.m)), + done: make(chan struct{}), + // every package needs itself + dependents: 1, + canClearTypes: !hasCumulative, + } + allPkgs = append(allPkgs, m[l]) + for i := range m[l].facts { + m[l].facts[i] = map[types.Object][]analysis.Fact{} + } + for _, err := range l.Errors { + m[l].errs = append(m[l].errs, err) + } + for _, v := range l.Imports { + m[v].dependents++ + m[l].Imports = append(m[l].Imports, m[v]) + } + + m[l].hash, err = packageHash(m[l]) + if err != nil { + m[l].errs = append(m[l].errs, err) + } + }) + + pkgs := make([]*Package, len(initialPkgs)) + for i, l := range initialPkgs { + pkgs[i] = m[l] + pkgs[i].initial = true + } + + atomic.StoreUint32(&r.stats.InitialPackages, uint32(len(initialPkgs))) + atomic.StoreUint32(&r.stats.TotalPackages, uint32(len(allPkgs))) + atomic.StoreUint32(&r.stats.State, StateProcessing) + + var wg sync.WaitGroup + wg.Add(len(allPkgs)) + r.loadSem = make(chan struct{}, runtime.GOMAXPROCS(-1)) + atomic.StoreUint32(&r.stats.TotalWorkers, uint32(cap(r.loadSem))) + for _, pkg := range allPkgs { + pkg := pkg + go func() { + r.processPkg(pkg, analyzers) + + if pkg.initial { + atomic.AddUint32(&r.stats.ProcessedInitialPackages, 1) + } + atomic.AddUint32(&r.stats.Problems, uint32(len(pkg.problems))) + wg.Done() + }() + } + wg.Wait() + + return pkgs, nil +} + +var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?`) + +func parsePos(pos string) (token.Position, int, error) { + if pos == "-" || pos == "" { + return token.Position{}, 0, nil + } + parts := posRe.FindStringSubmatch(pos) + if parts == nil { + return token.Position{}, 0, fmt.Errorf("malformed position %q", pos) + } + file := parts[1] + line, _ := strconv.Atoi(parts[2]) + col, _ := strconv.Atoi(parts[3]) + return token.Position{ + Filename: file, + Line: line, + Column: col, + }, len(parts[0]), nil +} + +// loadPkg loads a Go package. If the package is in the set of initial +// packages, it will be loaded from source, otherwise it will be +// loaded from export data. In the case that the package was loaded +// from export data, cached facts will also be loaded. +// +// Currently, only cached facts for this package will be loaded, not +// for any of its dependencies. +func (r *Runner) loadPkg(pkg *Package, analyzers []*analysis.Analyzer) error { + if pkg.Types != nil { + panic(fmt.Sprintf("internal error: %s has already been loaded", pkg.Package)) + } + + // Load type information + if pkg.initial { + // Load package from source + pkg.fromSource = true + return r.ld.LoadFromSource(pkg.Package) + } + + // Load package from export data + if err := r.ld.LoadFromExport(pkg.Package); err != nil { + // We asked Go to give us up to date export data, yet + // we can't load it. There must be something wrong. + // + // Attempt loading from source. This should fail (because + // otherwise there would be export data); we just want to + // get the compile errors. If loading from source succeeds + // we discard the result, anyway. Otherwise we'll fail + // when trying to reload from export data later. + // + // FIXME(dh): we no longer reload from export data, so + // theoretically we should be able to continue + pkg.fromSource = true + if err := r.ld.LoadFromSource(pkg.Package); err != nil { + return err + } + // Make sure this package can't be imported successfully + pkg.Package.Errors = append(pkg.Package.Errors, packages.Error{ + Pos: "-", + Msg: fmt.Sprintf("could not load export data: %s", err), + Kind: packages.ParseError, + }) + return fmt.Errorf("could not load export data: %s", err) + } + + failed := false + seen := make([]bool, len(r.analyzerIDs.m)) + var dfs func(*analysis.Analyzer) + dfs = func(a *analysis.Analyzer) { + if seen[r.analyzerIDs.get(a)] { + return + } + seen[r.analyzerIDs.get(a)] = true + + if len(a.FactTypes) > 0 { + facts, ok := r.loadCachedFacts(a, pkg) + if !ok { + failed = true + return + } + + for _, f := range facts { + if f.Path == "" { + // This is a package fact + pkg.pkgFacts[r.analyzerIDs.get(a)] = append(pkg.pkgFacts[r.analyzerIDs.get(a)], f.Fact) + continue + } + obj, err := objectpath.Object(pkg.Types, objectpath.Path(f.Path)) + if err != nil { + // Be lenient about these errors. For example, when + // analysing io/ioutil from source, we may get a fact + // for methods on the devNull type, and objectpath + // will happily create a path for them. However, when + // we later load io/ioutil from export data, the path + // no longer resolves. + // + // If an exported type embeds the unexported type, + // then (part of) the unexported type will become part + // of the type information and our path will resolve + // again. + continue + } + pkg.facts[r.analyzerIDs.get(a)][obj] = append(pkg.facts[r.analyzerIDs.get(a)][obj], f.Fact) + } + } + + for _, req := range a.Requires { + dfs(req) + } + } + for _, a := range analyzers { + dfs(a) + } + + if failed { + pkg.fromSource = true + // XXX we added facts to the maps, we need to get rid of those + return r.ld.LoadFromSource(pkg.Package) + } + + return nil +} + +type analysisError struct { + analyzer *analysis.Analyzer + pkg *Package + err error +} + +func (err analysisError) Error() string { + return fmt.Sprintf("error running analyzer %s on %s: %s", err.analyzer, err.pkg, err.err) +} + +// processPkg processes a package. This involves loading the package, +// either from export data or from source. For packages loaded from +// source, the provides analyzers will be run on the package. +func (r *Runner) processPkg(pkg *Package, analyzers []*analysis.Analyzer) { + defer func() { + // Clear information we no longer need. Make sure to do this + // when returning from processPkg so that we clear + // dependencies, not just initial packages. + pkg.TypesInfo = nil + pkg.Syntax = nil + pkg.results = nil + + atomic.AddUint32(&r.stats.ProcessedPackages, 1) + pkg.decUse() + close(pkg.done) + }() + + // Ensure all packages have the generated map and config. This is + // required by interna of the runner. Analyses that themselves + // make use of either have an explicit dependency so that other + // runners work correctly, too. + analyzers = append(analyzers[0:len(analyzers):len(analyzers)], injectedAnalyses...) + + if len(pkg.errs) != 0 { + return + } + + for _, imp := range pkg.Imports { + <-imp.done + if len(imp.errs) > 0 { + if imp.initial { + // Don't print the error of the dependency since it's + // an initial package and we're already printing the + // error. + pkg.errs = append(pkg.errs, fmt.Errorf("could not analyze dependency %s of %s", imp, pkg)) + } else { + var s string + for _, err := range imp.errs { + s += "\n\t" + err.Error() + } + pkg.errs = append(pkg.errs, fmt.Errorf("could not analyze dependency %s of %s: %s", imp, pkg, s)) + } + return + } + } + if pkg.PkgPath == "unsafe" { + pkg.Types = types.Unsafe + return + } + + r.loadSem <- struct{}{} + atomic.AddUint32(&r.stats.ActiveWorkers, 1) + defer func() { + <-r.loadSem + atomic.AddUint32(&r.stats.ActiveWorkers, ^uint32(0)) + }() + if err := r.loadPkg(pkg, analyzers); err != nil { + pkg.errs = append(pkg.errs, err) + return + } + + // A package's object facts is the union of all of its dependencies. + for _, imp := range pkg.Imports { + for ai, m := range imp.facts { + for obj, facts := range m { + pkg.facts[ai][obj] = facts[0:len(facts):len(facts)] + } + } + } + + if !pkg.fromSource { + // Nothing left to do for the package. + return + } + + // Run analyses on initial packages and those missing facts + var wg sync.WaitGroup + wg.Add(len(analyzers)) + errs := make([]error, len(analyzers)) + var acs []*analysisAction + for i, a := range analyzers { + i := i + a := a + ac := r.makeAnalysisAction(a, pkg) + acs = append(acs, ac) + go func() { + defer wg.Done() + // Only initial packages and packages with missing + // facts will have been loaded from source. + if pkg.initial || r.hasFacts(a) { + if _, err := r.runAnalysis(ac); err != nil { + errs[i] = analysisError{a, pkg, err} + return + } + } + }() + } + wg.Wait() + + depErrors := map[dependencyError]int{} + for _, err := range errs { + if err == nil { + continue + } + switch err := err.(type) { + case analysisError: + switch err := err.err.(type) { + case dependencyError: + depErrors[err.nested()]++ + default: + pkg.errs = append(pkg.errs, err) + } + default: + pkg.errs = append(pkg.errs, err) + } + } + for err, count := range depErrors { + pkg.errs = append(pkg.errs, + fmt.Errorf("could not run %s@%s, preventing %d analyzers from running: %s", err.dep, pkg, count, err.err)) + } + + // We can't process ignores at this point because `unused` needs + // to see more than one package to make its decision. + ignores, problems := parseDirectives(pkg.Package) + pkg.ignores = append(pkg.ignores, ignores...) + pkg.problems = append(pkg.problems, problems...) + for _, ac := range acs { + pkg.problems = append(pkg.problems, ac.problems...) + } + + if pkg.initial { + // Only initial packages have these analyzers run, and only + // initial packages need these. + if pkg.results[r.analyzerIDs.get(config.Analyzer)].v != nil { + pkg.cfg = pkg.results[r.analyzerIDs.get(config.Analyzer)].v.(*config.Config) + } + pkg.gen = pkg.results[r.analyzerIDs.get(facts.Generated)].v.(map[string]facts.Generator) + } + + // In a previous version of the code, we would throw away all type + // information and reload it from export data. That was + // nonsensical. The *types.Package doesn't keep any information + // live that export data wouldn't also. We only need to discard + // the AST and the TypesInfo maps; that happens after we return + // from processPkg. +} + +// hasFacts reports whether an analysis exports any facts. An analysis +// that has a transitive dependency that exports facts is considered +// to be exporting facts. +func (r *Runner) hasFacts(a *analysis.Analyzer) bool { + ret := false + seen := make([]bool, len(r.analyzerIDs.m)) + var dfs func(*analysis.Analyzer) + dfs = func(a *analysis.Analyzer) { + if seen[r.analyzerIDs.get(a)] { + return + } + seen[r.analyzerIDs.get(a)] = true + if len(a.FactTypes) > 0 { + ret = true + } + for _, req := range a.Requires { + if ret { + break + } + dfs(req) + } + } + dfs(a) + return ret +} + +func parseDirective(s string) (cmd string, args []string) { + if !strings.HasPrefix(s, "//lint:") { + return "", nil + } + s = strings.TrimPrefix(s, "//lint:") + fields := strings.Split(s, " ") + return fields[0], fields[1:] +} + +// parseDirectives extracts all linter directives from the source +// files of the package. Malformed directives are returned as problems. +func parseDirectives(pkg *packages.Package) ([]Ignore, []Problem) { + var ignores []Ignore + var problems []Problem + + for _, f := range pkg.Syntax { + found := false + commentLoop: + for _, cg := range f.Comments { + for _, c := range cg.List { + if strings.Contains(c.Text, "//lint:") { + found = true + break commentLoop + } + } + } + if !found { + continue + } + cm := ast.NewCommentMap(pkg.Fset, f, f.Comments) + for node, cgs := range cm { + for _, cg := range cgs { + for _, c := range cg.List { + if !strings.HasPrefix(c.Text, "//lint:") { + continue + } + cmd, args := parseDirective(c.Text) + switch cmd { + case "ignore", "file-ignore": + if len(args) < 2 { + p := Problem{ + Pos: DisplayPosition(pkg.Fset, c.Pos()), + Message: "malformed linter directive; missing the required reason field?", + Severity: Error, + Check: "compile", + } + problems = append(problems, p) + continue + } + default: + // unknown directive, ignore + continue + } + checks := strings.Split(args[0], ",") + pos := DisplayPosition(pkg.Fset, node.Pos()) + var ig Ignore + switch cmd { + case "ignore": + ig = &LineIgnore{ + File: pos.Filename, + Line: pos.Line, + Checks: checks, + Pos: c.Pos(), + } + case "file-ignore": + ig = &FileIgnore{ + File: pos.Filename, + Checks: checks, + } + } + ignores = append(ignores, ig) + } + } + } + } + + return ignores, problems +} + +// packageHash computes a package's hash. The hash is based on all Go +// files that make up the package, as well as the hashes of imported +// packages. +func packageHash(pkg *Package) (string, error) { + key := cache.NewHash("package hash") + fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath) + for _, f := range pkg.CompiledGoFiles { + h, err := cache.FileHash(f) + if err != nil { + return "", err + } + fmt.Fprintf(key, "file %s %x\n", f, h) + } + + imps := make([]*Package, len(pkg.Imports)) + copy(imps, pkg.Imports) + sort.Slice(imps, func(i, j int) bool { + return imps[i].PkgPath < imps[j].PkgPath + }) + for _, dep := range imps { + if dep.PkgPath == "unsafe" { + continue + } + + fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, dep.hash) + } + h := key.Sum() + return hex.EncodeToString(h[:]), nil +} + +// passActionID computes an ActionID for an analysis pass. +func passActionID(pkg *Package, analyzer *analysis.Analyzer) (cache.ActionID, error) { + key := cache.NewHash("action ID") + fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath) + fmt.Fprintf(key, "pkghash %s\n", pkg.hash) + fmt.Fprintf(key, "analyzer %s\n", analyzer.Name) + + return key.Sum(), nil +} diff --git a/vendor/honnef.co/go/tools/lint/stats.go b/vendor/honnef.co/go/tools/lint/stats.go new file mode 100644 index 000000000..2f6508559 --- /dev/null +++ b/vendor/honnef.co/go/tools/lint/stats.go @@ -0,0 +1,20 @@ +package lint + +const ( + StateInitializing = 0 + StateGraph = 1 + StateProcessing = 2 + StateCumulative = 3 +) + +type Stats struct { + State uint32 + + InitialPackages uint32 + TotalPackages uint32 + ProcessedPackages uint32 + ProcessedInitialPackages uint32 + Problems uint32 + ActiveWorkers uint32 + TotalWorkers uint32 +} diff --git a/vendor/honnef.co/go/tools/loader/loader.go b/vendor/honnef.co/go/tools/loader/loader.go new file mode 100644 index 000000000..9c6885d48 --- /dev/null +++ b/vendor/honnef.co/go/tools/loader/loader.go @@ -0,0 +1,197 @@ +package loader + +import ( + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "log" + "os" + "sync" + + "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/go/packages" +) + +type Loader struct { + exportMu sync.RWMutex +} + +// Graph resolves patterns and returns packages with all the +// information required to later load type information, and optionally +// syntax trees. +// +// The provided config can set any setting with the exception of Mode. +func (ld *Loader) Graph(cfg packages.Config, patterns ...string) ([]*packages.Package, error) { + cfg.Mode = packages.NeedName | packages.NeedImports | packages.NeedDeps | packages.NeedExportsFile | packages.NeedFiles | packages.NeedCompiledGoFiles | packages.NeedTypesSizes + pkgs, err := packages.Load(&cfg, patterns...) + if err != nil { + return nil, err + } + fset := token.NewFileSet() + packages.Visit(pkgs, nil, func(pkg *packages.Package) { + pkg.Fset = fset + }) + return pkgs, nil +} + +// LoadFromExport loads a package from export data. All of its +// dependencies must have been loaded already. +func (ld *Loader) LoadFromExport(pkg *packages.Package) error { + ld.exportMu.Lock() + defer ld.exportMu.Unlock() + + pkg.IllTyped = true + for path, pkg := range pkg.Imports { + if pkg.Types == nil { + return fmt.Errorf("dependency %q hasn't been loaded yet", path) + } + } + if pkg.ExportFile == "" { + return fmt.Errorf("no export data for %q", pkg.ID) + } + f, err := os.Open(pkg.ExportFile) + if err != nil { + return err + } + defer f.Close() + + r, err := gcexportdata.NewReader(f) + if err != nil { + return err + } + + view := make(map[string]*types.Package) // view seen by gcexportdata + seen := make(map[*packages.Package]bool) // all visited packages + var visit func(pkgs map[string]*packages.Package) + visit = func(pkgs map[string]*packages.Package) { + for _, pkg := range pkgs { + if !seen[pkg] { + seen[pkg] = true + view[pkg.PkgPath] = pkg.Types + visit(pkg.Imports) + } + } + } + visit(pkg.Imports) + tpkg, err := gcexportdata.Read(r, pkg.Fset, view, pkg.PkgPath) + if err != nil { + return err + } + pkg.Types = tpkg + pkg.IllTyped = false + return nil +} + +// LoadFromSource loads a package from source. All of its dependencies +// must have been loaded already. +func (ld *Loader) LoadFromSource(pkg *packages.Package) error { + ld.exportMu.RLock() + defer ld.exportMu.RUnlock() + + pkg.IllTyped = true + pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name) + + // OPT(dh): many packages have few files, much fewer than there + // are CPU cores. Additionally, parsing each individual file is + // very fast. A naive parallel implementation of this loop won't + // be faster, and tends to be slower due to extra scheduling, + // bookkeeping and potentially false sharing of cache lines. + pkg.Syntax = make([]*ast.File, len(pkg.CompiledGoFiles)) + for i, file := range pkg.CompiledGoFiles { + f, err := parser.ParseFile(pkg.Fset, file, nil, parser.ParseComments) + if err != nil { + pkg.Errors = append(pkg.Errors, convertError(err)...) + return err + } + pkg.Syntax[i] = f + } + pkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + + importer := func(path string) (*types.Package, error) { + if path == "unsafe" { + return types.Unsafe, nil + } + imp := pkg.Imports[path] + if imp == nil { + return nil, nil + } + if len(imp.Errors) > 0 { + return nil, imp.Errors[0] + } + return imp.Types, nil + } + tc := &types.Config{ + Importer: importerFunc(importer), + Error: func(err error) { + pkg.Errors = append(pkg.Errors, convertError(err)...) + }, + } + err := types.NewChecker(tc, pkg.Fset, pkg.Types, pkg.TypesInfo).Files(pkg.Syntax) + if err != nil { + return err + } + pkg.IllTyped = false + return nil +} + +func convertError(err error) []packages.Error { + var errs []packages.Error + // taken from go/packages + switch err := err.(type) { + case packages.Error: + // from driver + errs = append(errs, err) + + case *os.PathError: + // from parser + errs = append(errs, packages.Error{ + Pos: err.Path + ":1", + Msg: err.Err.Error(), + Kind: packages.ParseError, + }) + + case scanner.ErrorList: + // from parser + for _, err := range err { + errs = append(errs, packages.Error{ + Pos: err.Pos.String(), + Msg: err.Msg, + Kind: packages.ParseError, + }) + } + + case types.Error: + // from type checker + errs = append(errs, packages.Error{ + Pos: err.Fset.Position(err.Pos).String(), + Msg: err.Msg, + Kind: packages.TypeError, + }) + + default: + // unexpected impoverished error from parser? + errs = append(errs, packages.Error{ + Pos: "-", + Msg: err.Error(), + Kind: packages.UnknownError, + }) + + // If you see this error message, please file a bug. + log.Printf("internal error: error %q (%T) without position", err, err) + } + return errs +} + +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } diff --git a/vendor/honnef.co/go/tools/printf/fuzz.go b/vendor/honnef.co/go/tools/printf/fuzz.go new file mode 100644 index 000000000..8ebf357fb --- /dev/null +++ b/vendor/honnef.co/go/tools/printf/fuzz.go @@ -0,0 +1,11 @@ +// +build gofuzz + +package printf + +func Fuzz(data []byte) int { + _, err := Parse(string(data)) + if err == nil { + return 1 + } + return 0 +} diff --git a/vendor/honnef.co/go/tools/printf/printf.go b/vendor/honnef.co/go/tools/printf/printf.go new file mode 100644 index 000000000..754db9b16 --- /dev/null +++ b/vendor/honnef.co/go/tools/printf/printf.go @@ -0,0 +1,197 @@ +// Package printf implements a parser for fmt.Printf-style format +// strings. +// +// It parses verbs according to the following syntax: +// Numeric -> '0'-'9' +// Letter -> 'a'-'z' | 'A'-'Z' +// Index -> '[' Numeric+ ']' +// Star -> '*' +// Star -> Index '*' +// +// Precision -> Numeric+ | Star +// Width -> Numeric+ | Star +// +// WidthAndPrecision -> Width '.' Precision +// WidthAndPrecision -> Width '.' +// WidthAndPrecision -> Width +// WidthAndPrecision -> '.' Precision +// WidthAndPrecision -> '.' +// +// Flag -> '+' | '-' | '#' | ' ' | '0' +// Verb -> Letter | '%' +// +// Input -> '%' [ Flag+ ] [ WidthAndPrecision ] [ Index ] Verb +package printf + +import ( + "errors" + "regexp" + "strconv" + "strings" +) + +// ErrInvalid is returned for invalid format strings or verbs. +var ErrInvalid = errors.New("invalid format string") + +type Verb struct { + Letter rune + Flags string + + Width Argument + Precision Argument + // Which value in the argument list the verb uses. + // -1 denotes the next argument, + // values > 0 denote explicit arguments. + // The value 0 denotes that no argument is consumed. This is the case for %%. + Value int + + Raw string +} + +// Argument is an implicit or explicit width or precision. +type Argument interface { + isArgument() +} + +// The Default value, when no width or precision is provided. +type Default struct{} + +// Zero is the implicit zero value. +// This value may only appear for precisions in format strings like %6.f +type Zero struct{} + +// Star is a * value, which may either refer to the next argument (Index == -1) or an explicit argument. +type Star struct{ Index int } + +// A Literal value, such as 6 in %6d. +type Literal int + +func (Default) isArgument() {} +func (Zero) isArgument() {} +func (Star) isArgument() {} +func (Literal) isArgument() {} + +// Parse parses f and returns a list of actions. +// An action may either be a literal string, or a Verb. +func Parse(f string) ([]interface{}, error) { + var out []interface{} + for len(f) > 0 { + if f[0] == '%' { + v, n, err := ParseVerb(f) + if err != nil { + return nil, err + } + f = f[n:] + out = append(out, v) + } else { + n := strings.IndexByte(f, '%') + if n > -1 { + out = append(out, f[:n]) + f = f[n:] + } else { + out = append(out, f) + f = "" + } + } + } + + return out, nil +} + +func atoi(s string) int { + n, _ := strconv.Atoi(s) + return n +} + +// ParseVerb parses the verb at the beginning of f. +// It returns the verb, how much of the input was consumed, and an error, if any. +func ParseVerb(f string) (Verb, int, error) { + if len(f) < 2 { + return Verb{}, 0, ErrInvalid + } + const ( + flags = 1 + + width = 2 + widthStar = 3 + widthIndex = 5 + + dot = 6 + prec = 7 + precStar = 8 + precIndex = 10 + + verbIndex = 11 + verb = 12 + ) + + m := re.FindStringSubmatch(f) + if m == nil { + return Verb{}, 0, ErrInvalid + } + + v := Verb{ + Letter: []rune(m[verb])[0], + Flags: m[flags], + Raw: m[0], + } + + if m[width] != "" { + // Literal width + v.Width = Literal(atoi(m[width])) + } else if m[widthStar] != "" { + // Star width + if m[widthIndex] != "" { + v.Width = Star{atoi(m[widthIndex])} + } else { + v.Width = Star{-1} + } + } else { + // Default width + v.Width = Default{} + } + + if m[dot] == "" { + // default precision + v.Precision = Default{} + } else { + if m[prec] != "" { + // Literal precision + v.Precision = Literal(atoi(m[prec])) + } else if m[precStar] != "" { + // Star precision + if m[precIndex] != "" { + v.Precision = Star{atoi(m[precIndex])} + } else { + v.Precision = Star{-1} + } + } else { + // Zero precision + v.Precision = Zero{} + } + } + + if m[verb] == "%" { + v.Value = 0 + } else if m[verbIndex] != "" { + v.Value = atoi(m[verbIndex]) + } else { + v.Value = -1 + } + + return v, len(m[0]), nil +} + +const ( + flags = `([+#0 -]*)` + verb = `([a-zA-Z%])` + index = `(?:\[([0-9]+)\])` + star = `((` + index + `)?\*)` + width1 = `([0-9]+)` + width2 = star + width = `(?:` + width1 + `|` + width2 + `)` + precision = width + widthAndPrecision = `(?:(?:` + width + `)?(?:(\.)(?:` + precision + `)?)?)` +) + +var re = regexp.MustCompile(`^%` + flags + widthAndPrecision + `?` + index + `?` + verb) diff --git a/vendor/honnef.co/go/tools/simple/CONTRIBUTING.md b/vendor/honnef.co/go/tools/simple/CONTRIBUTING.md new file mode 100644 index 000000000..c54c6c50a --- /dev/null +++ b/vendor/honnef.co/go/tools/simple/CONTRIBUTING.md @@ -0,0 +1,15 @@ +# Contributing to gosimple + +## Before filing an issue: + +### Are you having trouble building gosimple? + +Check you have the latest version of its dependencies. Run +``` +go get -u honnef.co/go/tools/simple +``` +If you still have problems, consider searching for existing issues before filing a new issue. + +## Before sending a pull request: + +Have you understood the purpose of gosimple? Make sure to carefully read `README`. diff --git a/vendor/honnef.co/go/tools/simple/analysis.go b/vendor/honnef.co/go/tools/simple/analysis.go new file mode 100644 index 000000000..abb1648fa --- /dev/null +++ b/vendor/honnef.co/go/tools/simple/analysis.go @@ -0,0 +1,223 @@ +package simple + +import ( + "flag" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "honnef.co/go/tools/facts" + "honnef.co/go/tools/internal/passes/buildssa" + "honnef.co/go/tools/lint/lintutil" +) + +func newFlagSet() flag.FlagSet { + fs := flag.NewFlagSet("", flag.PanicOnError) + fs.Var(lintutil.NewVersionFlag(), "go", "Target Go version") + return *fs +} + +var Analyzers = map[string]*analysis.Analyzer{ + "S1000": { + Name: "S1000", + Run: LintSingleCaseSelect, + Doc: Docs["S1000"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1001": { + Name: "S1001", + Run: LintLoopCopy, + Doc: Docs["S1001"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1002": { + Name: "S1002", + Run: LintIfBoolCmp, + Doc: Docs["S1002"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1003": { + Name: "S1003", + Run: LintStringsContains, + Doc: Docs["S1003"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1004": { + Name: "S1004", + Run: LintBytesCompare, + Doc: Docs["S1004"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1005": { + Name: "S1005", + Run: LintUnnecessaryBlank, + Doc: Docs["S1005"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1006": { + Name: "S1006", + Run: LintForTrue, + Doc: Docs["S1006"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1007": { + Name: "S1007", + Run: LintRegexpRaw, + Doc: Docs["S1007"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1008": { + Name: "S1008", + Run: LintIfReturn, + Doc: Docs["S1008"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1009": { + Name: "S1009", + Run: LintRedundantNilCheckWithLen, + Doc: Docs["S1009"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1010": { + Name: "S1010", + Run: LintSlicing, + Doc: Docs["S1010"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1011": { + Name: "S1011", + Run: LintLoopAppend, + Doc: Docs["S1011"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1012": { + Name: "S1012", + Run: LintTimeSince, + Doc: Docs["S1012"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1016": { + Name: "S1016", + Run: LintSimplerStructConversion, + Doc: Docs["S1016"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1017": { + Name: "S1017", + Run: LintTrim, + Doc: Docs["S1017"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1018": { + Name: "S1018", + Run: LintLoopSlide, + Doc: Docs["S1018"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1019": { + Name: "S1019", + Run: LintMakeLenCap, + Doc: Docs["S1019"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1020": { + Name: "S1020", + Run: LintAssertNotNil, + Doc: Docs["S1020"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1021": { + Name: "S1021", + Run: LintDeclareAssign, + Doc: Docs["S1021"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1023": { + Name: "S1023", + Run: LintRedundantBreak, + Doc: Docs["S1023"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1024": { + Name: "S1024", + Run: LintTimeUntil, + Doc: Docs["S1024"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1025": { + Name: "S1025", + Run: LintRedundantSprintf, + Doc: Docs["S1025"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1028": { + Name: "S1028", + Run: LintErrorsNewSprintf, + Doc: Docs["S1028"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1029": { + Name: "S1029", + Run: LintRangeStringRunes, + Doc: Docs["S1029"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + Flags: newFlagSet(), + }, + "S1030": { + Name: "S1030", + Run: LintBytesBufferConversions, + Doc: Docs["S1030"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1031": { + Name: "S1031", + Run: LintNilCheckAroundRange, + Doc: Docs["S1031"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1032": { + Name: "S1032", + Run: LintSortHelpers, + Doc: Docs["S1032"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1033": { + Name: "S1033", + Run: LintGuardedDelete, + Doc: Docs["S1033"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "S1034": { + Name: "S1034", + Run: LintSimplifyTypeSwitch, + Doc: Docs["S1034"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, +} diff --git a/vendor/honnef.co/go/tools/simple/doc.go b/vendor/honnef.co/go/tools/simple/doc.go new file mode 100644 index 000000000..eb0072de5 --- /dev/null +++ b/vendor/honnef.co/go/tools/simple/doc.go @@ -0,0 +1,425 @@ +package simple + +import "honnef.co/go/tools/lint" + +var Docs = map[string]*lint.Documentation{ + "S1000": &lint.Documentation{ + Title: `Use plain channel send or receive instead of single-case select`, + Text: `Select statements with a single case can be replaced with a simple +send or receive. + +Before: + + select { + case x := <-ch: + fmt.Println(x) + } + +After: + + x := <-ch + fmt.Println(x)`, + Since: "2017.1", + }, + + "S1001": &lint.Documentation{ + Title: `Replace for loop with call to copy`, + Text: `Use copy() for copying elements from one slice to another. + +Before: + + for i, x := range src { + dst[i] = x + } + +After: + + copy(dst, src)`, + Since: "2017.1", + }, + + "S1002": &lint.Documentation{ + Title: `Omit comparison with boolean constant`, + Text: `Before: + + if x == true {} + +After: + + if x {}`, + Since: "2017.1", + }, + + "S1003": &lint.Documentation{ + Title: `Replace call to strings.Index with strings.Contains`, + Text: `Before: + + if strings.Index(x, y) != -1 {} + +After: + + if strings.Contains(x, y) {}`, + Since: "2017.1", + }, + + "S1004": &lint.Documentation{ + Title: `Replace call to bytes.Compare with bytes.Equal`, + Text: `Before: + + if bytes.Compare(x, y) == 0 {} + +After: + + if bytes.Equal(x, y) {}`, + Since: "2017.1", + }, + + "S1005": &lint.Documentation{ + Title: `Drop unnecessary use of the blank identifier`, + Text: `In many cases, assigning to the blank identifier is unnecessary. + +Before: + + for _ = range s {} + x, _ = someMap[key] + _ = <-ch + +After: + + for range s{} + x = someMap[key] + <-ch`, + Since: "2017.1", + }, + + "S1006": &lint.Documentation{ + Title: `Use for { ... } for infinite loops`, + Text: `For infinite loops, using for { ... } is the most idiomatic choice.`, + Since: "2017.1", + }, + + "S1007": &lint.Documentation{ + Title: `Simplify regular expression by using raw string literal`, + Text: `Raw string literals use ` + "`" + ` instead of " and do not support +any escape sequences. This means that the backslash (\) can be used +freely, without the need of escaping. + +Since regular expressions have their own escape sequences, raw strings +can improve their readability. + +Before: + + regexp.Compile("\\A(\\w+) profile: total \\d+\\n\\z") + +After: + + regexp.Compile(` + "`" + `\A(\w+) profile: total \d+\n\z` + "`" + `)`, + Since: "2017.1", + }, + + "S1008": &lint.Documentation{ + Title: `Simplify returning boolean expression`, + Text: `Before: + + if { + return true + } + return false + +After: + + return `, + Since: "2017.1", + }, + + "S1009": &lint.Documentation{ + Title: `Omit redundant nil check on slices`, + Text: `The len function is defined for all slices, even nil ones, which have +a length of zero. It is not necessary to check if a slice is not nil +before checking that its length is not zero. + +Before: + + if x != nil && len(x) != 0 {} + +After: + + if len(x) != 0 {}`, + Since: "2017.1", + }, + + "S1010": &lint.Documentation{ + Title: `Omit default slice index`, + Text: `When slicing, the second index defaults to the length of the value, +making s[n:len(s)] and s[n:] equivalent.`, + Since: "2017.1", + }, + + "S1011": &lint.Documentation{ + Title: `Use a single append to concatenate two slices`, + Text: `Before: + + for _, e := range y { + x = append(x, e) + } + +After: + + x = append(x, y...)`, + Since: "2017.1", + }, + + "S1012": &lint.Documentation{ + Title: `Replace time.Now().Sub(x) with time.Since(x)`, + Text: `The time.Since helper has the same effect as using time.Now().Sub(x) +but is easier to read. + +Before: + + time.Now().Sub(x) + +After: + + time.Since(x)`, + Since: "2017.1", + }, + + "S1016": &lint.Documentation{ + Title: `Use a type conversion instead of manually copying struct fields`, + Text: `Two struct types with identical fields can be converted between each +other. In older versions of Go, the fields had to have identical +struct tags. Since Go 1.8, however, struct tags are ignored during +conversions. It is thus not necessary to manually copy every field +individually. + +Before: + + var x T1 + y := T2{ + Field1: x.Field1, + Field2: x.Field2, + } + +After: + + var x T1 + y := T2(x)`, + Since: "2017.1", + }, + + "S1017": &lint.Documentation{ + Title: `Replace manual trimming with strings.TrimPrefix`, + Text: `Instead of using strings.HasPrefix and manual slicing, use the +strings.TrimPrefix function. If the string doesn't start with the +prefix, the original string will be returned. Using strings.TrimPrefix +reduces complexity, and avoids common bugs, such as off-by-one +mistakes. + +Before: + + if strings.HasPrefix(str, prefix) { + str = str[len(prefix):] + } + +After: + + str = strings.TrimPrefix(str, prefix)`, + Since: "2017.1", + }, + + "S1018": &lint.Documentation{ + Title: `Use copy for sliding elements`, + Text: `copy() permits using the same source and destination slice, even with +overlapping ranges. This makes it ideal for sliding elements in a +slice. + +Before: + + for i := 0; i < n; i++ { + bs[i] = bs[offset+i] + } + +After: + + copy(bs[:n], bs[offset:])`, + Since: "2017.1", + }, + + "S1019": &lint.Documentation{ + Title: `Simplify make call by omitting redundant arguments`, + Text: `The make function has default values for the length and capacity +arguments. For channels and maps, the length defaults to zero. +Additionally, for slices the capacity defaults to the length.`, + Since: "2017.1", + }, + + "S1020": &lint.Documentation{ + Title: `Omit redundant nil check in type assertion`, + Text: `Before: + + if _, ok := i.(T); ok && i != nil {} + +After: + + if _, ok := i.(T); ok {}`, + Since: "2017.1", + }, + + "S1021": &lint.Documentation{ + Title: `Merge variable declaration and assignment`, + Text: `Before: + + var x uint + x = 1 + +After: + + var x uint = 1`, + Since: "2017.1", + }, + + "S1023": &lint.Documentation{ + Title: `Omit redundant control flow`, + Text: `Functions that have no return value do not need a return statement as +the final statement of the function. + +Switches in Go do not have automatic fallthrough, unlike languages +like C. It is not necessary to have a break statement as the final +statement in a case block.`, + Since: "2017.1", + }, + + "S1024": &lint.Documentation{ + Title: `Replace x.Sub(time.Now()) with time.Until(x)`, + Text: `The time.Until helper has the same effect as using x.Sub(time.Now()) +but is easier to read. + +Before: + + x.Sub(time.Now()) + +After: + + time.Until(x)`, + Since: "2017.1", + }, + + "S1025": &lint.Documentation{ + Title: `Don't use fmt.Sprintf("%s", x) unnecessarily`, + Text: `In many instances, there are easier and more efficient ways of getting +a value's string representation. Whenever a value's underlying type is +a string already, or the type has a String method, they should be used +directly. + +Given the following shared definitions + + type T1 string + type T2 int + + func (T2) String() string { return "Hello, world" } + + var x string + var y T1 + var z T2 + +we can simplify the following + + fmt.Sprintf("%s", x) + fmt.Sprintf("%s", y) + fmt.Sprintf("%s", z) + +to + + x + string(y) + z.String()`, + Since: "2017.1", + }, + + "S1028": &lint.Documentation{ + Title: `Simplify error construction with fmt.Errorf`, + Text: `Before: + + errors.New(fmt.Sprintf(...)) + +After: + + fmt.Errorf(...)`, + Since: "2017.1", + }, + + "S1029": &lint.Documentation{ + Title: `Range over the string directly`, + Text: `Ranging over a string will yield byte offsets and runes. If the offset +isn't used, this is functionally equivalent to converting the string +to a slice of runes and ranging over that. Ranging directly over the +string will be more performant, however, as it avoids allocating a new +slice, the size of which depends on the length of the string. + +Before: + + for _, r := range []rune(s) {} + +After: + + for _, r := range s {}`, + Since: "2017.1", + }, + + "S1030": &lint.Documentation{ + Title: `Use bytes.Buffer.String or bytes.Buffer.Bytes`, + Text: `bytes.Buffer has both a String and a Bytes method. It is never +necessary to use string(buf.Bytes()) or []byte(buf.String()) – simply +use the other method.`, + Since: "2017.1", + }, + + "S1031": &lint.Documentation{ + Title: `Omit redundant nil check around loop`, + Text: `You can use range on nil slices and maps, the loop will simply never +execute. This makes an additional nil check around the loop +unnecessary. + +Before: + + if s != nil { + for _, x := range s { + ... + } + } + +After: + + for _, x := range s { + ... + }`, + Since: "2017.1", + }, + + "S1032": &lint.Documentation{ + Title: `Use sort.Ints(x), sort.Float64s(x), and sort.Strings(x)`, + Text: `The sort.Ints, sort.Float64s and sort.Strings functions are easier to +read than sort.Sort(sort.IntSlice(x)), sort.Sort(sort.Float64Slice(x)) +and sort.Sort(sort.StringSlice(x)). + +Before: + + sort.Sort(sort.StringSlice(x)) + +After: + + sort.Strings(x)`, + Since: "2019.1", + }, + + "S1033": &lint.Documentation{ + Title: `Unnecessary guard around call to delete`, + Text: `Calling delete on a nil map is a no-op.`, + Since: "2019.2", + }, + + "S1034": &lint.Documentation{ + Title: `Use result of type assertion to simplify cases`, + Since: "2019.2", + }, +} diff --git a/vendor/honnef.co/go/tools/simple/lint.go b/vendor/honnef.co/go/tools/simple/lint.go new file mode 100644 index 000000000..c78a7bb7a --- /dev/null +++ b/vendor/honnef.co/go/tools/simple/lint.go @@ -0,0 +1,1816 @@ +// Package simple contains a linter for Go source code. +package simple // import "honnef.co/go/tools/simple" + +import ( + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "reflect" + "sort" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" + . "honnef.co/go/tools/arg" + "honnef.co/go/tools/internal/passes/buildssa" + "honnef.co/go/tools/internal/sharedcheck" + "honnef.co/go/tools/lint" + . "honnef.co/go/tools/lint/lintdsl" +) + +func LintSingleCaseSelect(pass *analysis.Pass) (interface{}, error) { + isSingleSelect := func(node ast.Node) bool { + v, ok := node.(*ast.SelectStmt) + if !ok { + return false + } + return len(v.Body.List) == 1 + } + + seen := map[ast.Node]struct{}{} + fn := func(node ast.Node) { + switch v := node.(type) { + case *ast.ForStmt: + if len(v.Body.List) != 1 { + return + } + if !isSingleSelect(v.Body.List[0]) { + return + } + if _, ok := v.Body.List[0].(*ast.SelectStmt).Body.List[0].(*ast.CommClause).Comm.(*ast.SendStmt); ok { + // Don't suggest using range for channel sends + return + } + seen[v.Body.List[0]] = struct{}{} + ReportNodefFG(pass, node, "should use for range instead of for { select {} }") + case *ast.SelectStmt: + if _, ok := seen[v]; ok { + return + } + if !isSingleSelect(v) { + return + } + ReportNodefFG(pass, node, "should use a simple channel send/receive instead of select with a single case") + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.ForStmt)(nil), (*ast.SelectStmt)(nil)}, fn) + return nil, nil +} + +func LintLoopCopy(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + loop := node.(*ast.RangeStmt) + + if loop.Key == nil { + return + } + if len(loop.Body.List) != 1 { + return + } + stmt, ok := loop.Body.List[0].(*ast.AssignStmt) + if !ok { + return + } + if stmt.Tok != token.ASSIGN || len(stmt.Lhs) != 1 || len(stmt.Rhs) != 1 { + return + } + lhs, ok := stmt.Lhs[0].(*ast.IndexExpr) + if !ok { + return + } + + if _, ok := pass.TypesInfo.TypeOf(lhs.X).(*types.Slice); !ok { + return + } + lidx, ok := lhs.Index.(*ast.Ident) + if !ok { + return + } + key, ok := loop.Key.(*ast.Ident) + if !ok { + return + } + if pass.TypesInfo.TypeOf(lhs) == nil || pass.TypesInfo.TypeOf(stmt.Rhs[0]) == nil { + return + } + if pass.TypesInfo.ObjectOf(lidx) != pass.TypesInfo.ObjectOf(key) { + return + } + if !types.Identical(pass.TypesInfo.TypeOf(lhs), pass.TypesInfo.TypeOf(stmt.Rhs[0])) { + return + } + if _, ok := pass.TypesInfo.TypeOf(loop.X).(*types.Slice); !ok { + return + } + + if rhs, ok := stmt.Rhs[0].(*ast.IndexExpr); ok { + rx, ok := rhs.X.(*ast.Ident) + _ = rx + if !ok { + return + } + ridx, ok := rhs.Index.(*ast.Ident) + if !ok { + return + } + if pass.TypesInfo.ObjectOf(ridx) != pass.TypesInfo.ObjectOf(key) { + return + } + } else if rhs, ok := stmt.Rhs[0].(*ast.Ident); ok { + value, ok := loop.Value.(*ast.Ident) + if !ok { + return + } + if pass.TypesInfo.ObjectOf(rhs) != pass.TypesInfo.ObjectOf(value) { + return + } + } else { + return + } + ReportNodefFG(pass, loop, "should use copy() instead of a loop") + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.RangeStmt)(nil)}, fn) + return nil, nil +} + +func LintIfBoolCmp(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + expr := node.(*ast.BinaryExpr) + if expr.Op != token.EQL && expr.Op != token.NEQ { + return + } + x := IsBoolConst(pass, expr.X) + y := IsBoolConst(pass, expr.Y) + if !x && !y { + return + } + var other ast.Expr + var val bool + if x { + val = BoolConst(pass, expr.X) + other = expr.Y + } else { + val = BoolConst(pass, expr.Y) + other = expr.X + } + basic, ok := pass.TypesInfo.TypeOf(other).Underlying().(*types.Basic) + if !ok || basic.Kind() != types.Bool { + return + } + op := "" + if (expr.Op == token.EQL && !val) || (expr.Op == token.NEQ && val) { + op = "!" + } + r := op + Render(pass, other) + l1 := len(r) + r = strings.TrimLeft(r, "!") + if (l1-len(r))%2 == 1 { + r = "!" + r + } + if IsInTest(pass, node) { + return + } + ReportNodefFG(pass, expr, "should omit comparison to bool constant, can be simplified to %s", r) + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.BinaryExpr)(nil)}, fn) + return nil, nil +} + +func LintBytesBufferConversions(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + call := node.(*ast.CallExpr) + if len(call.Args) != 1 { + return + } + + argCall, ok := call.Args[0].(*ast.CallExpr) + if !ok { + return + } + sel, ok := argCall.Fun.(*ast.SelectorExpr) + if !ok { + return + } + + typ := pass.TypesInfo.TypeOf(call.Fun) + if typ == types.Universe.Lookup("string").Type() && IsCallToAST(pass, call.Args[0], "(*bytes.Buffer).Bytes") { + ReportNodefFG(pass, call, "should use %v.String() instead of %v", Render(pass, sel.X), Render(pass, call)) + } else if typ, ok := typ.(*types.Slice); ok && typ.Elem() == types.Universe.Lookup("byte").Type() && IsCallToAST(pass, call.Args[0], "(*bytes.Buffer).String") { + ReportNodefFG(pass, call, "should use %v.Bytes() instead of %v", Render(pass, sel.X), Render(pass, call)) + } + + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.CallExpr)(nil)}, fn) + return nil, nil +} + +func LintStringsContains(pass *analysis.Pass) (interface{}, error) { + // map of value to token to bool value + allowed := map[int64]map[token.Token]bool{ + -1: {token.GTR: true, token.NEQ: true, token.EQL: false}, + 0: {token.GEQ: true, token.LSS: false}, + } + fn := func(node ast.Node) { + expr := node.(*ast.BinaryExpr) + switch expr.Op { + case token.GEQ, token.GTR, token.NEQ, token.LSS, token.EQL: + default: + return + } + + value, ok := ExprToInt(pass, expr.Y) + if !ok { + return + } + + allowedOps, ok := allowed[value] + if !ok { + return + } + b, ok := allowedOps[expr.Op] + if !ok { + return + } + + call, ok := expr.X.(*ast.CallExpr) + if !ok { + return + } + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + return + } + pkgIdent, ok := sel.X.(*ast.Ident) + if !ok { + return + } + funIdent := sel.Sel + if pkgIdent.Name != "strings" && pkgIdent.Name != "bytes" { + return + } + newFunc := "" + switch funIdent.Name { + case "IndexRune": + newFunc = "ContainsRune" + case "IndexAny": + newFunc = "ContainsAny" + case "Index": + newFunc = "Contains" + default: + return + } + + prefix := "" + if !b { + prefix = "!" + } + ReportNodefFG(pass, node, "should use %s%s.%s(%s) instead", prefix, pkgIdent.Name, newFunc, RenderArgs(pass, call.Args)) + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.BinaryExpr)(nil)}, fn) + return nil, nil +} + +func LintBytesCompare(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + expr := node.(*ast.BinaryExpr) + if expr.Op != token.NEQ && expr.Op != token.EQL { + return + } + call, ok := expr.X.(*ast.CallExpr) + if !ok { + return + } + if !IsCallToAST(pass, call, "bytes.Compare") { + return + } + value, ok := ExprToInt(pass, expr.Y) + if !ok || value != 0 { + return + } + args := RenderArgs(pass, call.Args) + prefix := "" + if expr.Op == token.NEQ { + prefix = "!" + } + ReportNodefFG(pass, node, "should use %sbytes.Equal(%s) instead", prefix, args) + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.BinaryExpr)(nil)}, fn) + return nil, nil +} + +func LintForTrue(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + loop := node.(*ast.ForStmt) + if loop.Init != nil || loop.Post != nil { + return + } + if !IsBoolConst(pass, loop.Cond) || !BoolConst(pass, loop.Cond) { + return + } + ReportNodefFG(pass, loop, "should use for {} instead of for true {}") + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.ForStmt)(nil)}, fn) + return nil, nil +} + +func LintRegexpRaw(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + call := node.(*ast.CallExpr) + if !IsCallToAST(pass, call, "regexp.MustCompile") && + !IsCallToAST(pass, call, "regexp.Compile") { + return + } + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + return + } + if len(call.Args) != 1 { + // invalid function call + return + } + lit, ok := call.Args[Arg("regexp.Compile.expr")].(*ast.BasicLit) + if !ok { + // TODO(dominikh): support string concat, maybe support constants + return + } + if lit.Kind != token.STRING { + // invalid function call + return + } + if lit.Value[0] != '"' { + // already a raw string + return + } + val := lit.Value + if !strings.Contains(val, `\\`) { + return + } + if strings.Contains(val, "`") { + return + } + + bs := false + for _, c := range val { + if !bs && c == '\\' { + bs = true + continue + } + if bs && c == '\\' { + bs = false + continue + } + if bs { + // backslash followed by non-backslash -> escape sequence + return + } + } + + ReportNodefFG(pass, call, "should use raw string (`...`) with regexp.%s to avoid having to escape twice", sel.Sel.Name) + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.CallExpr)(nil)}, fn) + return nil, nil +} + +func LintIfReturn(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + block := node.(*ast.BlockStmt) + l := len(block.List) + if l < 2 { + return + } + n1, n2 := block.List[l-2], block.List[l-1] + + if len(block.List) >= 3 { + if _, ok := block.List[l-3].(*ast.IfStmt); ok { + // Do not flag a series of if statements + return + } + } + // if statement with no init, no else, a single condition + // checking an identifier or function call and just a return + // statement in the body, that returns a boolean constant + ifs, ok := n1.(*ast.IfStmt) + if !ok { + return + } + if ifs.Else != nil || ifs.Init != nil { + return + } + if len(ifs.Body.List) != 1 { + return + } + if op, ok := ifs.Cond.(*ast.BinaryExpr); ok { + switch op.Op { + case token.EQL, token.LSS, token.GTR, token.NEQ, token.LEQ, token.GEQ: + default: + return + } + } + ret1, ok := ifs.Body.List[0].(*ast.ReturnStmt) + if !ok { + return + } + if len(ret1.Results) != 1 { + return + } + if !IsBoolConst(pass, ret1.Results[0]) { + return + } + + ret2, ok := n2.(*ast.ReturnStmt) + if !ok { + return + } + if len(ret2.Results) != 1 { + return + } + if !IsBoolConst(pass, ret2.Results[0]) { + return + } + + if ret1.Results[0].(*ast.Ident).Name == ret2.Results[0].(*ast.Ident).Name { + // we want the function to return true and false, not the + // same value both times. + return + } + + ReportNodefFG(pass, n1, "should use 'return ' instead of 'if { return }; return '") + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.BlockStmt)(nil)}, fn) + return nil, nil +} + +// LintRedundantNilCheckWithLen checks for the following reduntant nil-checks: +// +// if x == nil || len(x) == 0 {} +// if x != nil && len(x) != 0 {} +// if x != nil && len(x) == N {} (where N != 0) +// if x != nil && len(x) > N {} +// if x != nil && len(x) >= N {} (where N != 0) +// +func LintRedundantNilCheckWithLen(pass *analysis.Pass) (interface{}, error) { + isConstZero := func(expr ast.Expr) (isConst bool, isZero bool) { + _, ok := expr.(*ast.BasicLit) + if ok { + return true, IsZero(expr) + } + id, ok := expr.(*ast.Ident) + if !ok { + return false, false + } + c, ok := pass.TypesInfo.ObjectOf(id).(*types.Const) + if !ok { + return false, false + } + return true, c.Val().Kind() == constant.Int && c.Val().String() == "0" + } + + fn := func(node ast.Node) { + // check that expr is "x || y" or "x && y" + expr := node.(*ast.BinaryExpr) + if expr.Op != token.LOR && expr.Op != token.LAND { + return + } + eqNil := expr.Op == token.LOR + + // check that x is "xx == nil" or "xx != nil" + x, ok := expr.X.(*ast.BinaryExpr) + if !ok { + return + } + if eqNil && x.Op != token.EQL { + return + } + if !eqNil && x.Op != token.NEQ { + return + } + xx, ok := x.X.(*ast.Ident) + if !ok { + return + } + if !IsNil(pass, x.Y) { + return + } + + // check that y is "len(xx) == 0" or "len(xx) ... " + y, ok := expr.Y.(*ast.BinaryExpr) + if !ok { + return + } + if eqNil && y.Op != token.EQL { // must be len(xx) *==* 0 + return + } + yx, ok := y.X.(*ast.CallExpr) + if !ok { + return + } + yxFun, ok := yx.Fun.(*ast.Ident) + if !ok || yxFun.Name != "len" || len(yx.Args) != 1 { + return + } + yxArg, ok := yx.Args[Arg("len.v")].(*ast.Ident) + if !ok { + return + } + if yxArg.Name != xx.Name { + return + } + + if eqNil && !IsZero(y.Y) { // must be len(x) == *0* + return + } + + if !eqNil { + isConst, isZero := isConstZero(y.Y) + if !isConst { + return + } + switch y.Op { + case token.EQL: + // avoid false positive for "xx != nil && len(xx) == 0" + if isZero { + return + } + case token.GEQ: + // avoid false positive for "xx != nil && len(xx) >= 0" + if isZero { + return + } + case token.NEQ: + // avoid false positive for "xx != nil && len(xx) != " + if !isZero { + return + } + case token.GTR: + // ok + default: + return + } + } + + // finally check that xx type is one of array, slice, map or chan + // this is to prevent false positive in case if xx is a pointer to an array + var nilType string + switch pass.TypesInfo.TypeOf(xx).(type) { + case *types.Slice: + nilType = "nil slices" + case *types.Map: + nilType = "nil maps" + case *types.Chan: + nilType = "nil channels" + default: + return + } + ReportNodefFG(pass, expr, "should omit nil check; len() for %s is defined as zero", nilType) + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.BinaryExpr)(nil)}, fn) + return nil, nil +} + +func LintSlicing(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + n := node.(*ast.SliceExpr) + if n.Max != nil { + return + } + s, ok := n.X.(*ast.Ident) + if !ok || s.Obj == nil { + return + } + call, ok := n.High.(*ast.CallExpr) + if !ok || len(call.Args) != 1 || call.Ellipsis.IsValid() { + return + } + fun, ok := call.Fun.(*ast.Ident) + if !ok || fun.Name != "len" { + return + } + if _, ok := pass.TypesInfo.ObjectOf(fun).(*types.Builtin); !ok { + return + } + arg, ok := call.Args[Arg("len.v")].(*ast.Ident) + if !ok || arg.Obj != s.Obj { + return + } + ReportNodefFG(pass, n, "should omit second index in slice, s[a:len(s)] is identical to s[a:]") + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.SliceExpr)(nil)}, fn) + return nil, nil +} + +func refersTo(pass *analysis.Pass, expr ast.Expr, ident *ast.Ident) bool { + found := false + fn := func(node ast.Node) bool { + ident2, ok := node.(*ast.Ident) + if !ok { + return true + } + if pass.TypesInfo.ObjectOf(ident) == pass.TypesInfo.ObjectOf(ident2) { + found = true + return false + } + return true + } + ast.Inspect(expr, fn) + return found +} + +func LintLoopAppend(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + loop := node.(*ast.RangeStmt) + if !IsBlank(loop.Key) { + return + } + val, ok := loop.Value.(*ast.Ident) + if !ok { + return + } + if len(loop.Body.List) != 1 { + return + } + stmt, ok := loop.Body.List[0].(*ast.AssignStmt) + if !ok { + return + } + if stmt.Tok != token.ASSIGN || len(stmt.Lhs) != 1 || len(stmt.Rhs) != 1 { + return + } + if refersTo(pass, stmt.Lhs[0], val) { + return + } + call, ok := stmt.Rhs[0].(*ast.CallExpr) + if !ok { + return + } + if len(call.Args) != 2 || call.Ellipsis.IsValid() { + return + } + fun, ok := call.Fun.(*ast.Ident) + if !ok { + return + } + obj := pass.TypesInfo.ObjectOf(fun) + fn, ok := obj.(*types.Builtin) + if !ok || fn.Name() != "append" { + return + } + + src := pass.TypesInfo.TypeOf(loop.X) + dst := pass.TypesInfo.TypeOf(call.Args[Arg("append.slice")]) + // TODO(dominikh) remove nil check once Go issue #15173 has + // been fixed + if src == nil { + return + } + if !types.Identical(src, dst) { + return + } + + if Render(pass, stmt.Lhs[0]) != Render(pass, call.Args[Arg("append.slice")]) { + return + } + + el, ok := call.Args[Arg("append.elems")].(*ast.Ident) + if !ok { + return + } + if pass.TypesInfo.ObjectOf(val) != pass.TypesInfo.ObjectOf(el) { + return + } + ReportNodefFG(pass, loop, "should replace loop with %s = append(%s, %s...)", + Render(pass, stmt.Lhs[0]), Render(pass, call.Args[Arg("append.slice")]), Render(pass, loop.X)) + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.RangeStmt)(nil)}, fn) + return nil, nil +} + +func LintTimeSince(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + call := node.(*ast.CallExpr) + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + return + } + if !IsCallToAST(pass, sel.X, "time.Now") { + return + } + if sel.Sel.Name != "Sub" { + return + } + ReportNodefFG(pass, call, "should use time.Since instead of time.Now().Sub") + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.CallExpr)(nil)}, fn) + return nil, nil +} + +func LintTimeUntil(pass *analysis.Pass) (interface{}, error) { + if !IsGoVersion(pass, 8) { + return nil, nil + } + fn := func(node ast.Node) { + call := node.(*ast.CallExpr) + if !IsCallToAST(pass, call, "(time.Time).Sub") { + return + } + if !IsCallToAST(pass, call.Args[Arg("(time.Time).Sub.u")], "time.Now") { + return + } + ReportNodefFG(pass, call, "should use time.Until instead of t.Sub(time.Now())") + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.CallExpr)(nil)}, fn) + return nil, nil +} + +func LintUnnecessaryBlank(pass *analysis.Pass) (interface{}, error) { + fn1 := func(node ast.Node) { + assign := node.(*ast.AssignStmt) + if len(assign.Lhs) != 2 || len(assign.Rhs) != 1 { + return + } + if !IsBlank(assign.Lhs[1]) { + return + } + switch rhs := assign.Rhs[0].(type) { + case *ast.IndexExpr: + // The type-checker should make sure that it's a map, but + // let's be safe. + if _, ok := pass.TypesInfo.TypeOf(rhs.X).Underlying().(*types.Map); !ok { + return + } + case *ast.UnaryExpr: + if rhs.Op != token.ARROW { + return + } + default: + return + } + cp := *assign + cp.Lhs = cp.Lhs[0:1] + ReportNodefFG(pass, assign, "should write %s instead of %s", Render(pass, &cp), Render(pass, assign)) + } + + fn2 := func(node ast.Node) { + stmt := node.(*ast.AssignStmt) + if len(stmt.Lhs) != len(stmt.Rhs) { + return + } + for i, lh := range stmt.Lhs { + rh := stmt.Rhs[i] + if !IsBlank(lh) { + continue + } + expr, ok := rh.(*ast.UnaryExpr) + if !ok { + continue + } + if expr.Op != token.ARROW { + continue + } + ReportNodefFG(pass, lh, "'_ = <-ch' can be simplified to '<-ch'") + } + } + + fn3 := func(node ast.Node) { + rs := node.(*ast.RangeStmt) + + // for x, _ + if !IsBlank(rs.Key) && IsBlank(rs.Value) { + ReportNodefFG(pass, rs.Value, "should omit value from range; this loop is equivalent to `for %s %s range ...`", Render(pass, rs.Key), rs.Tok) + } + // for _, _ || for _ + if IsBlank(rs.Key) && (IsBlank(rs.Value) || rs.Value == nil) { + ReportNodefFG(pass, rs.Key, "should omit values from range; this loop is equivalent to `for range ...`") + } + } + + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.AssignStmt)(nil)}, fn1) + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.AssignStmt)(nil)}, fn2) + if IsGoVersion(pass, 4) { + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.RangeStmt)(nil)}, fn3) + } + return nil, nil +} + +func LintSimplerStructConversion(pass *analysis.Pass) (interface{}, error) { + var skip ast.Node + fn := func(node ast.Node) { + // Do not suggest type conversion between pointers + if unary, ok := node.(*ast.UnaryExpr); ok && unary.Op == token.AND { + if lit, ok := unary.X.(*ast.CompositeLit); ok { + skip = lit + } + return + } + + if node == skip { + return + } + + lit, ok := node.(*ast.CompositeLit) + if !ok { + return + } + typ1, _ := pass.TypesInfo.TypeOf(lit.Type).(*types.Named) + if typ1 == nil { + return + } + s1, ok := typ1.Underlying().(*types.Struct) + if !ok { + return + } + + var typ2 *types.Named + var ident *ast.Ident + getSelType := func(expr ast.Expr) (types.Type, *ast.Ident, bool) { + sel, ok := expr.(*ast.SelectorExpr) + if !ok { + return nil, nil, false + } + ident, ok := sel.X.(*ast.Ident) + if !ok { + return nil, nil, false + } + typ := pass.TypesInfo.TypeOf(sel.X) + return typ, ident, typ != nil + } + if len(lit.Elts) == 0 { + return + } + if s1.NumFields() != len(lit.Elts) { + return + } + for i, elt := range lit.Elts { + var t types.Type + var id *ast.Ident + var ok bool + switch elt := elt.(type) { + case *ast.SelectorExpr: + t, id, ok = getSelType(elt) + if !ok { + return + } + if i >= s1.NumFields() || s1.Field(i).Name() != elt.Sel.Name { + return + } + case *ast.KeyValueExpr: + var sel *ast.SelectorExpr + sel, ok = elt.Value.(*ast.SelectorExpr) + if !ok { + return + } + + if elt.Key.(*ast.Ident).Name != sel.Sel.Name { + return + } + t, id, ok = getSelType(elt.Value) + } + if !ok { + return + } + // All fields must be initialized from the same object + if ident != nil && ident.Obj != id.Obj { + return + } + typ2, _ = t.(*types.Named) + if typ2 == nil { + return + } + ident = id + } + + if typ2 == nil { + return + } + + if typ1.Obj().Pkg() != typ2.Obj().Pkg() { + // Do not suggest type conversions between different + // packages. Types in different packages might only match + // by coincidence. Furthermore, if the dependency ever + // adds more fields to its type, it could break the code + // that relies on the type conversion to work. + return + } + + s2, ok := typ2.Underlying().(*types.Struct) + if !ok { + return + } + if typ1 == typ2 { + return + } + if IsGoVersion(pass, 8) { + if !types.IdenticalIgnoreTags(s1, s2) { + return + } + } else { + if !types.Identical(s1, s2) { + return + } + } + ReportNodefFG(pass, node, "should convert %s (type %s) to %s instead of using struct literal", + ident.Name, typ2.Obj().Name(), typ1.Obj().Name()) + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.UnaryExpr)(nil), (*ast.CompositeLit)(nil)}, fn) + return nil, nil +} + +func LintTrim(pass *analysis.Pass) (interface{}, error) { + sameNonDynamic := func(node1, node2 ast.Node) bool { + if reflect.TypeOf(node1) != reflect.TypeOf(node2) { + return false + } + + switch node1 := node1.(type) { + case *ast.Ident: + return node1.Obj == node2.(*ast.Ident).Obj + case *ast.SelectorExpr: + return Render(pass, node1) == Render(pass, node2) + case *ast.IndexExpr: + return Render(pass, node1) == Render(pass, node2) + } + return false + } + + isLenOnIdent := func(fn ast.Expr, ident ast.Expr) bool { + call, ok := fn.(*ast.CallExpr) + if !ok { + return false + } + if fn, ok := call.Fun.(*ast.Ident); !ok || fn.Name != "len" { + return false + } + if len(call.Args) != 1 { + return false + } + return sameNonDynamic(call.Args[Arg("len.v")], ident) + } + + fn := func(node ast.Node) { + var pkg string + var fun string + + ifstmt := node.(*ast.IfStmt) + if ifstmt.Init != nil { + return + } + if ifstmt.Else != nil { + return + } + if len(ifstmt.Body.List) != 1 { + return + } + condCall, ok := ifstmt.Cond.(*ast.CallExpr) + if !ok { + return + } + switch { + case IsCallToAST(pass, condCall, "strings.HasPrefix"): + pkg = "strings" + fun = "HasPrefix" + case IsCallToAST(pass, condCall, "strings.HasSuffix"): + pkg = "strings" + fun = "HasSuffix" + case IsCallToAST(pass, condCall, "strings.Contains"): + pkg = "strings" + fun = "Contains" + case IsCallToAST(pass, condCall, "bytes.HasPrefix"): + pkg = "bytes" + fun = "HasPrefix" + case IsCallToAST(pass, condCall, "bytes.HasSuffix"): + pkg = "bytes" + fun = "HasSuffix" + case IsCallToAST(pass, condCall, "bytes.Contains"): + pkg = "bytes" + fun = "Contains" + default: + return + } + + assign, ok := ifstmt.Body.List[0].(*ast.AssignStmt) + if !ok { + return + } + if assign.Tok != token.ASSIGN { + return + } + if len(assign.Lhs) != 1 || len(assign.Rhs) != 1 { + return + } + if !sameNonDynamic(condCall.Args[0], assign.Lhs[0]) { + return + } + + switch rhs := assign.Rhs[0].(type) { + case *ast.CallExpr: + if len(rhs.Args) < 2 || !sameNonDynamic(condCall.Args[0], rhs.Args[0]) || !sameNonDynamic(condCall.Args[1], rhs.Args[1]) { + return + } + if IsCallToAST(pass, condCall, "strings.HasPrefix") && IsCallToAST(pass, rhs, "strings.TrimPrefix") || + IsCallToAST(pass, condCall, "strings.HasSuffix") && IsCallToAST(pass, rhs, "strings.TrimSuffix") || + IsCallToAST(pass, condCall, "strings.Contains") && IsCallToAST(pass, rhs, "strings.Replace") || + IsCallToAST(pass, condCall, "bytes.HasPrefix") && IsCallToAST(pass, rhs, "bytes.TrimPrefix") || + IsCallToAST(pass, condCall, "bytes.HasSuffix") && IsCallToAST(pass, rhs, "bytes.TrimSuffix") || + IsCallToAST(pass, condCall, "bytes.Contains") && IsCallToAST(pass, rhs, "bytes.Replace") { + ReportNodefFG(pass, ifstmt, "should replace this if statement with an unconditional %s", CallNameAST(pass, rhs)) + } + return + case *ast.SliceExpr: + slice := rhs + if !ok { + return + } + if slice.Slice3 { + return + } + if !sameNonDynamic(slice.X, condCall.Args[0]) { + return + } + var index ast.Expr + switch fun { + case "HasPrefix": + // TODO(dh) We could detect a High that is len(s), but another + // rule will already flag that, anyway. + if slice.High != nil { + return + } + index = slice.Low + case "HasSuffix": + if slice.Low != nil { + n, ok := ExprToInt(pass, slice.Low) + if !ok || n != 0 { + return + } + } + index = slice.High + } + + switch index := index.(type) { + case *ast.CallExpr: + if fun != "HasPrefix" { + return + } + if fn, ok := index.Fun.(*ast.Ident); !ok || fn.Name != "len" { + return + } + if len(index.Args) != 1 { + return + } + id3 := index.Args[Arg("len.v")] + switch oid3 := condCall.Args[1].(type) { + case *ast.BasicLit: + if pkg != "strings" { + return + } + lit, ok := id3.(*ast.BasicLit) + if !ok { + return + } + s1, ok1 := ExprToString(pass, lit) + s2, ok2 := ExprToString(pass, condCall.Args[1]) + if !ok1 || !ok2 || s1 != s2 { + return + } + default: + if !sameNonDynamic(id3, oid3) { + return + } + } + case *ast.BasicLit, *ast.Ident: + if fun != "HasPrefix" { + return + } + if pkg != "strings" { + return + } + string, ok1 := ExprToString(pass, condCall.Args[1]) + int, ok2 := ExprToInt(pass, slice.Low) + if !ok1 || !ok2 || int != int64(len(string)) { + return + } + case *ast.BinaryExpr: + if fun != "HasSuffix" { + return + } + if index.Op != token.SUB { + return + } + if !isLenOnIdent(index.X, condCall.Args[0]) || + !isLenOnIdent(index.Y, condCall.Args[1]) { + return + } + default: + return + } + + var replacement string + switch fun { + case "HasPrefix": + replacement = "TrimPrefix" + case "HasSuffix": + replacement = "TrimSuffix" + } + ReportNodefFG(pass, ifstmt, "should replace this if statement with an unconditional %s.%s", pkg, replacement) + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.IfStmt)(nil)}, fn) + return nil, nil +} + +func LintLoopSlide(pass *analysis.Pass) (interface{}, error) { + // TODO(dh): detect bs[i+offset] in addition to bs[offset+i] + // TODO(dh): consider merging this function with LintLoopCopy + // TODO(dh): detect length that is an expression, not a variable name + // TODO(dh): support sliding to a different offset than the beginning of the slice + + fn := func(node ast.Node) { + /* + for i := 0; i < n; i++ { + bs[i] = bs[offset+i] + } + + ↓ + + copy(bs[:n], bs[offset:offset+n]) + */ + + loop := node.(*ast.ForStmt) + if len(loop.Body.List) != 1 || loop.Init == nil || loop.Cond == nil || loop.Post == nil { + return + } + assign, ok := loop.Init.(*ast.AssignStmt) + if !ok || len(assign.Lhs) != 1 || len(assign.Rhs) != 1 || !IsZero(assign.Rhs[0]) { + return + } + initvar, ok := assign.Lhs[0].(*ast.Ident) + if !ok { + return + } + post, ok := loop.Post.(*ast.IncDecStmt) + if !ok || post.Tok != token.INC { + return + } + postvar, ok := post.X.(*ast.Ident) + if !ok || pass.TypesInfo.ObjectOf(postvar) != pass.TypesInfo.ObjectOf(initvar) { + return + } + bin, ok := loop.Cond.(*ast.BinaryExpr) + if !ok || bin.Op != token.LSS { + return + } + binx, ok := bin.X.(*ast.Ident) + if !ok || pass.TypesInfo.ObjectOf(binx) != pass.TypesInfo.ObjectOf(initvar) { + return + } + biny, ok := bin.Y.(*ast.Ident) + if !ok { + return + } + + assign, ok = loop.Body.List[0].(*ast.AssignStmt) + if !ok || len(assign.Lhs) != 1 || len(assign.Rhs) != 1 || assign.Tok != token.ASSIGN { + return + } + lhs, ok := assign.Lhs[0].(*ast.IndexExpr) + if !ok { + return + } + rhs, ok := assign.Rhs[0].(*ast.IndexExpr) + if !ok { + return + } + + bs1, ok := lhs.X.(*ast.Ident) + if !ok { + return + } + bs2, ok := rhs.X.(*ast.Ident) + if !ok { + return + } + obj1 := pass.TypesInfo.ObjectOf(bs1) + obj2 := pass.TypesInfo.ObjectOf(bs2) + if obj1 != obj2 { + return + } + if _, ok := obj1.Type().Underlying().(*types.Slice); !ok { + return + } + + index1, ok := lhs.Index.(*ast.Ident) + if !ok || pass.TypesInfo.ObjectOf(index1) != pass.TypesInfo.ObjectOf(initvar) { + return + } + index2, ok := rhs.Index.(*ast.BinaryExpr) + if !ok || index2.Op != token.ADD { + return + } + add1, ok := index2.X.(*ast.Ident) + if !ok { + return + } + add2, ok := index2.Y.(*ast.Ident) + if !ok || pass.TypesInfo.ObjectOf(add2) != pass.TypesInfo.ObjectOf(initvar) { + return + } + + ReportNodefFG(pass, loop, "should use copy(%s[:%s], %s[%s:]) instead", Render(pass, bs1), Render(pass, biny), Render(pass, bs1), Render(pass, add1)) + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.ForStmt)(nil)}, fn) + return nil, nil +} + +func LintMakeLenCap(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + call := node.(*ast.CallExpr) + if fn, ok := call.Fun.(*ast.Ident); !ok || fn.Name != "make" { + // FIXME check whether make is indeed the built-in function + return + } + switch len(call.Args) { + case 2: + // make(T, len) + if _, ok := pass.TypesInfo.TypeOf(call.Args[Arg("make.t")]).Underlying().(*types.Slice); ok { + break + } + if IsZero(call.Args[Arg("make.size[0]")]) { + ReportNodefFG(pass, call.Args[Arg("make.size[0]")], "should use make(%s) instead", Render(pass, call.Args[Arg("make.t")])) + } + case 3: + // make(T, len, cap) + if Render(pass, call.Args[Arg("make.size[0]")]) == Render(pass, call.Args[Arg("make.size[1]")]) { + ReportNodefFG(pass, call.Args[Arg("make.size[0]")], + "should use make(%s, %s) instead", + Render(pass, call.Args[Arg("make.t")]), Render(pass, call.Args[Arg("make.size[0]")])) + } + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.CallExpr)(nil)}, fn) + return nil, nil +} + +func LintAssertNotNil(pass *analysis.Pass) (interface{}, error) { + isNilCheck := func(ident *ast.Ident, expr ast.Expr) bool { + xbinop, ok := expr.(*ast.BinaryExpr) + if !ok || xbinop.Op != token.NEQ { + return false + } + xident, ok := xbinop.X.(*ast.Ident) + if !ok || xident.Obj != ident.Obj { + return false + } + if !IsNil(pass, xbinop.Y) { + return false + } + return true + } + isOKCheck := func(ident *ast.Ident, expr ast.Expr) bool { + yident, ok := expr.(*ast.Ident) + if !ok || yident.Obj != ident.Obj { + return false + } + return true + } + fn1 := func(node ast.Node) { + ifstmt := node.(*ast.IfStmt) + assign, ok := ifstmt.Init.(*ast.AssignStmt) + if !ok || len(assign.Lhs) != 2 || len(assign.Rhs) != 1 || !IsBlank(assign.Lhs[0]) { + return + } + assert, ok := assign.Rhs[0].(*ast.TypeAssertExpr) + if !ok { + return + } + binop, ok := ifstmt.Cond.(*ast.BinaryExpr) + if !ok || binop.Op != token.LAND { + return + } + assertIdent, ok := assert.X.(*ast.Ident) + if !ok { + return + } + assignIdent, ok := assign.Lhs[1].(*ast.Ident) + if !ok { + return + } + if !(isNilCheck(assertIdent, binop.X) && isOKCheck(assignIdent, binop.Y)) && + !(isNilCheck(assertIdent, binop.Y) && isOKCheck(assignIdent, binop.X)) { + return + } + ReportNodefFG(pass, ifstmt, "when %s is true, %s can't be nil", Render(pass, assignIdent), Render(pass, assertIdent)) + } + fn2 := func(node ast.Node) { + // Check that outer ifstmt is an 'if x != nil {}' + ifstmt := node.(*ast.IfStmt) + if ifstmt.Init != nil { + return + } + if ifstmt.Else != nil { + return + } + if len(ifstmt.Body.List) != 1 { + return + } + binop, ok := ifstmt.Cond.(*ast.BinaryExpr) + if !ok { + return + } + if binop.Op != token.NEQ { + return + } + lhs, ok := binop.X.(*ast.Ident) + if !ok { + return + } + if !IsNil(pass, binop.Y) { + return + } + + // Check that inner ifstmt is an `if _, ok := x.(T); ok {}` + ifstmt, ok = ifstmt.Body.List[0].(*ast.IfStmt) + if !ok { + return + } + assign, ok := ifstmt.Init.(*ast.AssignStmt) + if !ok || len(assign.Lhs) != 2 || len(assign.Rhs) != 1 || !IsBlank(assign.Lhs[0]) { + return + } + assert, ok := assign.Rhs[0].(*ast.TypeAssertExpr) + if !ok { + return + } + assertIdent, ok := assert.X.(*ast.Ident) + if !ok { + return + } + if lhs.Obj != assertIdent.Obj { + return + } + assignIdent, ok := assign.Lhs[1].(*ast.Ident) + if !ok { + return + } + if !isOKCheck(assignIdent, ifstmt.Cond) { + return + } + ReportNodefFG(pass, ifstmt, "when %s is true, %s can't be nil", Render(pass, assignIdent), Render(pass, assertIdent)) + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.IfStmt)(nil)}, fn1) + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.IfStmt)(nil)}, fn2) + return nil, nil +} + +func LintDeclareAssign(pass *analysis.Pass) (interface{}, error) { + hasMultipleAssignments := func(root ast.Node, ident *ast.Ident) bool { + num := 0 + ast.Inspect(root, func(node ast.Node) bool { + if num >= 2 { + return false + } + assign, ok := node.(*ast.AssignStmt) + if !ok { + return true + } + for _, lhs := range assign.Lhs { + if oident, ok := lhs.(*ast.Ident); ok { + if oident.Obj == ident.Obj { + num++ + } + } + } + + return true + }) + return num >= 2 + } + fn := func(node ast.Node) { + block := node.(*ast.BlockStmt) + if len(block.List) < 2 { + return + } + for i, stmt := range block.List[:len(block.List)-1] { + _ = i + decl, ok := stmt.(*ast.DeclStmt) + if !ok { + continue + } + gdecl, ok := decl.Decl.(*ast.GenDecl) + if !ok || gdecl.Tok != token.VAR || len(gdecl.Specs) != 1 { + continue + } + vspec, ok := gdecl.Specs[0].(*ast.ValueSpec) + if !ok || len(vspec.Names) != 1 || len(vspec.Values) != 0 { + continue + } + + assign, ok := block.List[i+1].(*ast.AssignStmt) + if !ok || assign.Tok != token.ASSIGN { + continue + } + if len(assign.Lhs) != 1 || len(assign.Rhs) != 1 { + continue + } + ident, ok := assign.Lhs[0].(*ast.Ident) + if !ok { + continue + } + if vspec.Names[0].Obj != ident.Obj { + continue + } + + if refersTo(pass, assign.Rhs[0], ident) { + continue + } + if hasMultipleAssignments(block, ident) { + continue + } + + ReportNodefFG(pass, decl, "should merge variable declaration with assignment on next line") + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.BlockStmt)(nil)}, fn) + return nil, nil +} + +func LintRedundantBreak(pass *analysis.Pass) (interface{}, error) { + fn1 := func(node ast.Node) { + clause := node.(*ast.CaseClause) + if len(clause.Body) < 2 { + return + } + branch, ok := clause.Body[len(clause.Body)-1].(*ast.BranchStmt) + if !ok || branch.Tok != token.BREAK || branch.Label != nil { + return + } + ReportNodefFG(pass, branch, "redundant break statement") + } + fn2 := func(node ast.Node) { + var ret *ast.FieldList + var body *ast.BlockStmt + switch x := node.(type) { + case *ast.FuncDecl: + ret = x.Type.Results + body = x.Body + case *ast.FuncLit: + ret = x.Type.Results + body = x.Body + default: + panic(fmt.Sprintf("unreachable: %T", node)) + } + // if the func has results, a return can't be redundant. + // similarly, if there are no statements, there can be + // no return. + if ret != nil || body == nil || len(body.List) < 1 { + return + } + rst, ok := body.List[len(body.List)-1].(*ast.ReturnStmt) + if !ok { + return + } + // we don't need to check rst.Results as we already + // checked x.Type.Results to be nil. + ReportNodefFG(pass, rst, "redundant return statement") + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.CaseClause)(nil)}, fn1) + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.FuncDecl)(nil), (*ast.FuncLit)(nil)}, fn2) + return nil, nil +} + +func isStringer(T types.Type, msCache *typeutil.MethodSetCache) bool { + ms := msCache.MethodSet(T) + sel := ms.Lookup(nil, "String") + if sel == nil { + return false + } + fn, ok := sel.Obj().(*types.Func) + if !ok { + // should be unreachable + return false + } + sig := fn.Type().(*types.Signature) + if sig.Params().Len() != 0 { + return false + } + if sig.Results().Len() != 1 { + return false + } + if !IsType(sig.Results().At(0).Type(), "string") { + return false + } + return true +} + +func LintRedundantSprintf(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + call := node.(*ast.CallExpr) + if !IsCallToAST(pass, call, "fmt.Sprintf") { + return + } + if len(call.Args) != 2 { + return + } + if s, ok := ExprToString(pass, call.Args[Arg("fmt.Sprintf.format")]); !ok || s != "%s" { + return + } + arg := call.Args[Arg("fmt.Sprintf.a[0]")] + typ := pass.TypesInfo.TypeOf(arg) + + ssapkg := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).Pkg + if isStringer(typ, &ssapkg.Prog.MethodSets) { + ReportNodef(pass, call, "should use String() instead of fmt.Sprintf") + return + } + + if typ.Underlying() == types.Universe.Lookup("string").Type() { + if typ == types.Universe.Lookup("string").Type() { + ReportNodefFG(pass, call, "the argument is already a string, there's no need to use fmt.Sprintf") + } else { + ReportNodefFG(pass, call, "the argument's underlying type is a string, should use a simple conversion instead of fmt.Sprintf") + } + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.CallExpr)(nil)}, fn) + return nil, nil +} + +func LintErrorsNewSprintf(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + if !IsCallToAST(pass, node, "errors.New") { + return + } + call := node.(*ast.CallExpr) + if !IsCallToAST(pass, call.Args[Arg("errors.New.text")], "fmt.Sprintf") { + return + } + ReportNodefFG(pass, node, "should use fmt.Errorf(...) instead of errors.New(fmt.Sprintf(...))") + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.CallExpr)(nil)}, fn) + return nil, nil +} + +func LintRangeStringRunes(pass *analysis.Pass) (interface{}, error) { + return sharedcheck.CheckRangeStringRunes(pass) +} + +func LintNilCheckAroundRange(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + ifstmt := node.(*ast.IfStmt) + cond, ok := ifstmt.Cond.(*ast.BinaryExpr) + if !ok { + return + } + + if cond.Op != token.NEQ || !IsNil(pass, cond.Y) || len(ifstmt.Body.List) != 1 { + return + } + + loop, ok := ifstmt.Body.List[0].(*ast.RangeStmt) + if !ok { + return + } + ifXIdent, ok := cond.X.(*ast.Ident) + if !ok { + return + } + rangeXIdent, ok := loop.X.(*ast.Ident) + if !ok { + return + } + if ifXIdent.Obj != rangeXIdent.Obj { + return + } + switch pass.TypesInfo.TypeOf(rangeXIdent).(type) { + case *types.Slice, *types.Map: + ReportNodefFG(pass, node, "unnecessary nil check around range") + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.IfStmt)(nil)}, fn) + return nil, nil +} + +func isPermissibleSort(pass *analysis.Pass, node ast.Node) bool { + call := node.(*ast.CallExpr) + typeconv, ok := call.Args[0].(*ast.CallExpr) + if !ok { + return true + } + + sel, ok := typeconv.Fun.(*ast.SelectorExpr) + if !ok { + return true + } + name := SelectorName(pass, sel) + switch name { + case "sort.IntSlice", "sort.Float64Slice", "sort.StringSlice": + default: + return true + } + + return false +} + +func LintSortHelpers(pass *analysis.Pass) (interface{}, error) { + type Error struct { + node ast.Node + msg string + } + var allErrors []Error + fn := func(node ast.Node) { + var body *ast.BlockStmt + switch node := node.(type) { + case *ast.FuncLit: + body = node.Body + case *ast.FuncDecl: + body = node.Body + default: + panic(fmt.Sprintf("unreachable: %T", node)) + } + if body == nil { + return + } + + var errors []Error + permissible := false + fnSorts := func(node ast.Node) bool { + if permissible { + return false + } + if !IsCallToAST(pass, node, "sort.Sort") { + return true + } + if isPermissibleSort(pass, node) { + permissible = true + return false + } + call := node.(*ast.CallExpr) + typeconv := call.Args[Arg("sort.Sort.data")].(*ast.CallExpr) + sel := typeconv.Fun.(*ast.SelectorExpr) + name := SelectorName(pass, sel) + + switch name { + case "sort.IntSlice": + errors = append(errors, Error{node, "should use sort.Ints(...) instead of sort.Sort(sort.IntSlice(...))"}) + case "sort.Float64Slice": + errors = append(errors, Error{node, "should use sort.Float64s(...) instead of sort.Sort(sort.Float64Slice(...))"}) + case "sort.StringSlice": + errors = append(errors, Error{node, "should use sort.Strings(...) instead of sort.Sort(sort.StringSlice(...))"}) + } + return true + } + ast.Inspect(body, fnSorts) + + if permissible { + return + } + allErrors = append(allErrors, errors...) + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.FuncLit)(nil), (*ast.FuncDecl)(nil)}, fn) + sort.Slice(allErrors, func(i, j int) bool { + return allErrors[i].node.Pos() < allErrors[j].node.Pos() + }) + var prev token.Pos + for _, err := range allErrors { + if err.node.Pos() == prev { + continue + } + prev = err.node.Pos() + ReportNodefFG(pass, err.node, "%s", err.msg) + } + return nil, nil +} + +func LintGuardedDelete(pass *analysis.Pass) (interface{}, error) { + isCommaOkMapIndex := func(stmt ast.Stmt) (b *ast.Ident, m ast.Expr, key ast.Expr, ok bool) { + // Has to be of the form `_, = [] + + assign, ok := stmt.(*ast.AssignStmt) + if !ok { + return nil, nil, nil, false + } + if len(assign.Lhs) != 2 || len(assign.Rhs) != 1 { + return nil, nil, nil, false + } + if !IsBlank(assign.Lhs[0]) { + return nil, nil, nil, false + } + ident, ok := assign.Lhs[1].(*ast.Ident) + if !ok { + return nil, nil, nil, false + } + index, ok := assign.Rhs[0].(*ast.IndexExpr) + if !ok { + return nil, nil, nil, false + } + if _, ok := pass.TypesInfo.TypeOf(index.X).(*types.Map); !ok { + return nil, nil, nil, false + } + key = index.Index + return ident, index.X, key, true + } + fn := func(node ast.Node) { + stmt := node.(*ast.IfStmt) + if len(stmt.Body.List) != 1 { + return + } + if stmt.Else != nil { + return + } + expr, ok := stmt.Body.List[0].(*ast.ExprStmt) + if !ok { + return + } + call, ok := expr.X.(*ast.CallExpr) + if !ok { + return + } + if !IsCallToAST(pass, call, "delete") { + return + } + b, m, key, ok := isCommaOkMapIndex(stmt.Init) + if !ok { + return + } + if cond, ok := stmt.Cond.(*ast.Ident); !ok || pass.TypesInfo.ObjectOf(cond) != pass.TypesInfo.ObjectOf(b) { + return + } + if Render(pass, call.Args[0]) != Render(pass, m) || Render(pass, call.Args[1]) != Render(pass, key) { + return + } + ReportNodefFG(pass, stmt, "unnecessary guard around call to delete") + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.IfStmt)(nil)}, fn) + return nil, nil +} + +func LintSimplifyTypeSwitch(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + stmt := node.(*ast.TypeSwitchStmt) + if stmt.Init != nil { + // bailing out for now, can't anticipate how type switches with initializers are being used + return + } + expr, ok := stmt.Assign.(*ast.ExprStmt) + if !ok { + // the user is in fact assigning the result + return + } + assert := expr.X.(*ast.TypeAssertExpr) + ident, ok := assert.X.(*ast.Ident) + if !ok { + return + } + x := pass.TypesInfo.ObjectOf(ident) + var allOffenders []ast.Node + for _, clause := range stmt.Body.List { + clause := clause.(*ast.CaseClause) + if len(clause.List) != 1 { + continue + } + hasUnrelatedAssertion := false + var offenders []ast.Node + ast.Inspect(clause, func(node ast.Node) bool { + assert2, ok := node.(*ast.TypeAssertExpr) + if !ok { + return true + } + ident, ok := assert2.X.(*ast.Ident) + if !ok { + hasUnrelatedAssertion = true + return false + } + if pass.TypesInfo.ObjectOf(ident) != x { + hasUnrelatedAssertion = true + return false + } + + if !types.Identical(pass.TypesInfo.TypeOf(clause.List[0]), pass.TypesInfo.TypeOf(assert2.Type)) { + hasUnrelatedAssertion = true + return false + } + offenders = append(offenders, assert2) + return true + }) + if !hasUnrelatedAssertion { + // don't flag cases that have other type assertions + // unrelated to the one in the case clause. often + // times, this is done for symmetry, when two + // different values have to be asserted to the same + // type. + allOffenders = append(allOffenders, offenders...) + } + } + if len(allOffenders) != 0 { + at := "" + for _, offender := range allOffenders { + pos := lint.DisplayPosition(pass.Fset, offender.Pos()) + at += "\n\t" + pos.String() + } + ReportNodefFG(pass, expr, "assigning the result of this type assertion to a variable (switch %s := %s.(type)) could eliminate the following type assertions:%s", Render(pass, ident), Render(pass, ident), at) + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.TypeSwitchStmt)(nil)}, fn) + return nil, nil +} diff --git a/vendor/honnef.co/go/tools/ssa/LICENSE b/vendor/honnef.co/go/tools/ssa/LICENSE new file mode 100644 index 000000000..aee48041e --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright (c) 2016 Dominik Honnef. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/honnef.co/go/tools/ssa/blockopt.go b/vendor/honnef.co/go/tools/ssa/blockopt.go new file mode 100644 index 000000000..22c9a4c0d --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/blockopt.go @@ -0,0 +1,195 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// Simple block optimizations to simplify the control flow graph. + +// TODO(adonovan): opt: instead of creating several "unreachable" blocks +// per function in the Builder, reuse a single one (e.g. at Blocks[1]) +// to reduce garbage. + +import ( + "fmt" + "os" +) + +// If true, perform sanity checking and show progress at each +// successive iteration of optimizeBlocks. Very verbose. +const debugBlockOpt = false + +// markReachable sets Index=-1 for all blocks reachable from b. +func markReachable(b *BasicBlock) { + b.Index = -1 + for _, succ := range b.Succs { + if succ.Index == 0 { + markReachable(succ) + } + } +} + +func DeleteUnreachableBlocks(f *Function) { + deleteUnreachableBlocks(f) +} + +// deleteUnreachableBlocks marks all reachable blocks of f and +// eliminates (nils) all others, including possibly cyclic subgraphs. +// +func deleteUnreachableBlocks(f *Function) { + const white, black = 0, -1 + // We borrow b.Index temporarily as the mark bit. + for _, b := range f.Blocks { + b.Index = white + } + markReachable(f.Blocks[0]) + if f.Recover != nil { + markReachable(f.Recover) + } + for i, b := range f.Blocks { + if b.Index == white { + for _, c := range b.Succs { + if c.Index == black { + c.removePred(b) // delete white->black edge + } + } + if debugBlockOpt { + fmt.Fprintln(os.Stderr, "unreachable", b) + } + f.Blocks[i] = nil // delete b + } + } + f.removeNilBlocks() +} + +// jumpThreading attempts to apply simple jump-threading to block b, +// in which a->b->c become a->c if b is just a Jump. +// The result is true if the optimization was applied. +// +func jumpThreading(f *Function, b *BasicBlock) bool { + if b.Index == 0 { + return false // don't apply to entry block + } + if b.Instrs == nil { + return false + } + if _, ok := b.Instrs[0].(*Jump); !ok { + return false // not just a jump + } + c := b.Succs[0] + if c == b { + return false // don't apply to degenerate jump-to-self. + } + if c.hasPhi() { + return false // not sound without more effort + } + for j, a := range b.Preds { + a.replaceSucc(b, c) + + // If a now has two edges to c, replace its degenerate If by Jump. + if len(a.Succs) == 2 && a.Succs[0] == c && a.Succs[1] == c { + jump := new(Jump) + jump.setBlock(a) + a.Instrs[len(a.Instrs)-1] = jump + a.Succs = a.Succs[:1] + c.removePred(b) + } else { + if j == 0 { + c.replacePred(b, a) + } else { + c.Preds = append(c.Preds, a) + } + } + + if debugBlockOpt { + fmt.Fprintln(os.Stderr, "jumpThreading", a, b, c) + } + } + f.Blocks[b.Index] = nil // delete b + return true +} + +// fuseBlocks attempts to apply the block fusion optimization to block +// a, in which a->b becomes ab if len(a.Succs)==len(b.Preds)==1. +// The result is true if the optimization was applied. +// +func fuseBlocks(f *Function, a *BasicBlock) bool { + if len(a.Succs) != 1 { + return false + } + b := a.Succs[0] + if len(b.Preds) != 1 { + return false + } + + // Degenerate &&/|| ops may result in a straight-line CFG + // containing φ-nodes. (Ideally we'd replace such them with + // their sole operand but that requires Referrers, built later.) + if b.hasPhi() { + return false // not sound without further effort + } + + // Eliminate jump at end of A, then copy all of B across. + a.Instrs = append(a.Instrs[:len(a.Instrs)-1], b.Instrs...) + for _, instr := range b.Instrs { + instr.setBlock(a) + } + + // A inherits B's successors + a.Succs = append(a.succs2[:0], b.Succs...) + + // Fix up Preds links of all successors of B. + for _, c := range b.Succs { + c.replacePred(b, a) + } + + if debugBlockOpt { + fmt.Fprintln(os.Stderr, "fuseBlocks", a, b) + } + + f.Blocks[b.Index] = nil // delete b + return true +} + +func OptimizeBlocks(f *Function) { + optimizeBlocks(f) +} + +// optimizeBlocks() performs some simple block optimizations on a +// completed function: dead block elimination, block fusion, jump +// threading. +// +func optimizeBlocks(f *Function) { + deleteUnreachableBlocks(f) + + // Loop until no further progress. + changed := true + for changed { + changed = false + + if debugBlockOpt { + f.WriteTo(os.Stderr) + mustSanityCheck(f, nil) + } + + for _, b := range f.Blocks { + // f.Blocks will temporarily contain nils to indicate + // deleted blocks; we remove them at the end. + if b == nil { + continue + } + + // Fuse blocks. b->c becomes bc. + if fuseBlocks(f, b) { + changed = true + } + + // a->b->c becomes a->c if b contains only a Jump. + if jumpThreading(f, b) { + changed = true + continue // (b was disconnected) + } + } + } + f.removeNilBlocks() +} diff --git a/vendor/honnef.co/go/tools/ssa/builder.go b/vendor/honnef.co/go/tools/ssa/builder.go new file mode 100644 index 000000000..317ac0611 --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/builder.go @@ -0,0 +1,2379 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file implements the BUILD phase of SSA construction. +// +// SSA construction has two phases, CREATE and BUILD. In the CREATE phase +// (create.go), all packages are constructed and type-checked and +// definitions of all package members are created, method-sets are +// computed, and wrapper methods are synthesized. +// ssa.Packages are created in arbitrary order. +// +// In the BUILD phase (builder.go), the builder traverses the AST of +// each Go source function and generates SSA instructions for the +// function body. Initializer expressions for package-level variables +// are emitted to the package's init() function in the order specified +// by go/types.Info.InitOrder, then code for each function in the +// package is generated in lexical order. +// The BUILD phases for distinct packages are independent and are +// executed in parallel. +// +// TODO(adonovan): indeed, building functions is now embarrassingly parallel. +// Audit for concurrency then benchmark using more goroutines. +// +// The builder's and Program's indices (maps) are populated and +// mutated during the CREATE phase, but during the BUILD phase they +// remain constant. The sole exception is Prog.methodSets and its +// related maps, which are protected by a dedicated mutex. + +import ( + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "os" + "sync" +) + +type opaqueType struct { + types.Type + name string +} + +func (t *opaqueType) String() string { return t.name } + +var ( + varOk = newVar("ok", tBool) + varIndex = newVar("index", tInt) + + // Type constants. + tBool = types.Typ[types.Bool] + tByte = types.Typ[types.Byte] + tInt = types.Typ[types.Int] + tInvalid = types.Typ[types.Invalid] + tString = types.Typ[types.String] + tUntypedNil = types.Typ[types.UntypedNil] + tRangeIter = &opaqueType{nil, "iter"} // the type of all "range" iterators + tEface = types.NewInterfaceType(nil, nil).Complete() + + // SSA Value constants. + vZero = intConst(0) + vOne = intConst(1) + vTrue = NewConst(constant.MakeBool(true), tBool) +) + +// builder holds state associated with the package currently being built. +// Its methods contain all the logic for AST-to-SSA conversion. +type builder struct{} + +// cond emits to fn code to evaluate boolean condition e and jump +// to t or f depending on its value, performing various simplifications. +// +// Postcondition: fn.currentBlock is nil. +// +func (b *builder) cond(fn *Function, e ast.Expr, t, f *BasicBlock) { + switch e := e.(type) { + case *ast.ParenExpr: + b.cond(fn, e.X, t, f) + return + + case *ast.BinaryExpr: + switch e.Op { + case token.LAND: + ltrue := fn.newBasicBlock("cond.true") + b.cond(fn, e.X, ltrue, f) + fn.currentBlock = ltrue + b.cond(fn, e.Y, t, f) + return + + case token.LOR: + lfalse := fn.newBasicBlock("cond.false") + b.cond(fn, e.X, t, lfalse) + fn.currentBlock = lfalse + b.cond(fn, e.Y, t, f) + return + } + + case *ast.UnaryExpr: + if e.Op == token.NOT { + b.cond(fn, e.X, f, t) + return + } + } + + // A traditional compiler would simplify "if false" (etc) here + // but we do not, for better fidelity to the source code. + // + // The value of a constant condition may be platform-specific, + // and may cause blocks that are reachable in some configuration + // to be hidden from subsequent analyses such as bug-finding tools. + emitIf(fn, b.expr(fn, e), t, f) +} + +// logicalBinop emits code to fn to evaluate e, a &&- or +// ||-expression whose reified boolean value is wanted. +// The value is returned. +// +func (b *builder) logicalBinop(fn *Function, e *ast.BinaryExpr) Value { + rhs := fn.newBasicBlock("binop.rhs") + done := fn.newBasicBlock("binop.done") + + // T(e) = T(e.X) = T(e.Y) after untyped constants have been + // eliminated. + // TODO(adonovan): not true; MyBool==MyBool yields UntypedBool. + t := fn.Pkg.typeOf(e) + + var short Value // value of the short-circuit path + switch e.Op { + case token.LAND: + b.cond(fn, e.X, rhs, done) + short = NewConst(constant.MakeBool(false), t) + + case token.LOR: + b.cond(fn, e.X, done, rhs) + short = NewConst(constant.MakeBool(true), t) + } + + // Is rhs unreachable? + if rhs.Preds == nil { + // Simplify false&&y to false, true||y to true. + fn.currentBlock = done + return short + } + + // Is done unreachable? + if done.Preds == nil { + // Simplify true&&y (or false||y) to y. + fn.currentBlock = rhs + return b.expr(fn, e.Y) + } + + // All edges from e.X to done carry the short-circuit value. + var edges []Value + for range done.Preds { + edges = append(edges, short) + } + + // The edge from e.Y to done carries the value of e.Y. + fn.currentBlock = rhs + edges = append(edges, b.expr(fn, e.Y)) + emitJump(fn, done) + fn.currentBlock = done + + phi := &Phi{Edges: edges, Comment: e.Op.String()} + phi.pos = e.OpPos + phi.typ = t + return done.emit(phi) +} + +// exprN lowers a multi-result expression e to SSA form, emitting code +// to fn and returning a single Value whose type is a *types.Tuple. +// The caller must access the components via Extract. +// +// Multi-result expressions include CallExprs in a multi-value +// assignment or return statement, and "value,ok" uses of +// TypeAssertExpr, IndexExpr (when X is a map), and UnaryExpr (when Op +// is token.ARROW). +// +func (b *builder) exprN(fn *Function, e ast.Expr) Value { + typ := fn.Pkg.typeOf(e).(*types.Tuple) + switch e := e.(type) { + case *ast.ParenExpr: + return b.exprN(fn, e.X) + + case *ast.CallExpr: + // Currently, no built-in function nor type conversion + // has multiple results, so we can avoid some of the + // cases for single-valued CallExpr. + var c Call + b.setCall(fn, e, &c.Call) + c.typ = typ + return fn.emit(&c) + + case *ast.IndexExpr: + mapt := fn.Pkg.typeOf(e.X).Underlying().(*types.Map) + lookup := &Lookup{ + X: b.expr(fn, e.X), + Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key()), + CommaOk: true, + } + lookup.setType(typ) + lookup.setPos(e.Lbrack) + return fn.emit(lookup) + + case *ast.TypeAssertExpr: + return emitTypeTest(fn, b.expr(fn, e.X), typ.At(0).Type(), e.Lparen) + + case *ast.UnaryExpr: // must be receive <- + unop := &UnOp{ + Op: token.ARROW, + X: b.expr(fn, e.X), + CommaOk: true, + } + unop.setType(typ) + unop.setPos(e.OpPos) + return fn.emit(unop) + } + panic(fmt.Sprintf("exprN(%T) in %s", e, fn)) +} + +// builtin emits to fn SSA instructions to implement a call to the +// built-in function obj with the specified arguments +// and return type. It returns the value defined by the result. +// +// The result is nil if no special handling was required; in this case +// the caller should treat this like an ordinary library function +// call. +// +func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ types.Type, pos token.Pos) Value { + switch obj.Name() { + case "make": + switch typ.Underlying().(type) { + case *types.Slice: + n := b.expr(fn, args[1]) + m := n + if len(args) == 3 { + m = b.expr(fn, args[2]) + } + if m, ok := m.(*Const); ok { + // treat make([]T, n, m) as new([m]T)[:n] + cap := m.Int64() + at := types.NewArray(typ.Underlying().(*types.Slice).Elem(), cap) + alloc := emitNew(fn, at, pos) + alloc.Comment = "makeslice" + v := &Slice{ + X: alloc, + High: n, + } + v.setPos(pos) + v.setType(typ) + return fn.emit(v) + } + v := &MakeSlice{ + Len: n, + Cap: m, + } + v.setPos(pos) + v.setType(typ) + return fn.emit(v) + + case *types.Map: + var res Value + if len(args) == 2 { + res = b.expr(fn, args[1]) + } + v := &MakeMap{Reserve: res} + v.setPos(pos) + v.setType(typ) + return fn.emit(v) + + case *types.Chan: + var sz Value = vZero + if len(args) == 2 { + sz = b.expr(fn, args[1]) + } + v := &MakeChan{Size: sz} + v.setPos(pos) + v.setType(typ) + return fn.emit(v) + } + + case "new": + alloc := emitNew(fn, deref(typ), pos) + alloc.Comment = "new" + return alloc + + case "len", "cap": + // Special case: len or cap of an array or *array is + // based on the type, not the value which may be nil. + // We must still evaluate the value, though. (If it + // was side-effect free, the whole call would have + // been constant-folded.) + t := deref(fn.Pkg.typeOf(args[0])).Underlying() + if at, ok := t.(*types.Array); ok { + b.expr(fn, args[0]) // for effects only + return intConst(at.Len()) + } + // Otherwise treat as normal. + + case "panic": + fn.emit(&Panic{ + X: emitConv(fn, b.expr(fn, args[0]), tEface), + pos: pos, + }) + fn.currentBlock = fn.newBasicBlock("unreachable") + return vTrue // any non-nil Value will do + } + return nil // treat all others as a regular function call +} + +// addr lowers a single-result addressable expression e to SSA form, +// emitting code to fn and returning the location (an lvalue) defined +// by the expression. +// +// If escaping is true, addr marks the base variable of the +// addressable expression e as being a potentially escaping pointer +// value. For example, in this code: +// +// a := A{ +// b: [1]B{B{c: 1}} +// } +// return &a.b[0].c +// +// the application of & causes a.b[0].c to have its address taken, +// which means that ultimately the local variable a must be +// heap-allocated. This is a simple but very conservative escape +// analysis. +// +// Operations forming potentially escaping pointers include: +// - &x, including when implicit in method call or composite literals. +// - a[:] iff a is an array (not *array) +// - references to variables in lexically enclosing functions. +// +func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue { + switch e := e.(type) { + case *ast.Ident: + if isBlankIdent(e) { + return blank{} + } + obj := fn.Pkg.objectOf(e) + v := fn.Prog.packageLevelValue(obj) // var (address) + if v == nil { + v = fn.lookup(obj, escaping) + } + return &address{addr: v, pos: e.Pos(), expr: e} + + case *ast.CompositeLit: + t := deref(fn.Pkg.typeOf(e)) + var v *Alloc + if escaping { + v = emitNew(fn, t, e.Lbrace) + } else { + v = fn.addLocal(t, e.Lbrace) + } + v.Comment = "complit" + var sb storebuf + b.compLit(fn, v, e, true, &sb) + sb.emit(fn) + return &address{addr: v, pos: e.Lbrace, expr: e} + + case *ast.ParenExpr: + return b.addr(fn, e.X, escaping) + + case *ast.SelectorExpr: + sel, ok := fn.Pkg.info.Selections[e] + if !ok { + // qualified identifier + return b.addr(fn, e.Sel, escaping) + } + if sel.Kind() != types.FieldVal { + panic(sel) + } + wantAddr := true + v := b.receiver(fn, e.X, wantAddr, escaping, sel) + last := len(sel.Index()) - 1 + return &address{ + addr: emitFieldSelection(fn, v, sel.Index()[last], true, e.Sel), + pos: e.Sel.Pos(), + expr: e.Sel, + } + + case *ast.IndexExpr: + var x Value + var et types.Type + switch t := fn.Pkg.typeOf(e.X).Underlying().(type) { + case *types.Array: + x = b.addr(fn, e.X, escaping).address(fn) + et = types.NewPointer(t.Elem()) + case *types.Pointer: // *array + x = b.expr(fn, e.X) + et = types.NewPointer(t.Elem().Underlying().(*types.Array).Elem()) + case *types.Slice: + x = b.expr(fn, e.X) + et = types.NewPointer(t.Elem()) + case *types.Map: + return &element{ + m: b.expr(fn, e.X), + k: emitConv(fn, b.expr(fn, e.Index), t.Key()), + t: t.Elem(), + pos: e.Lbrack, + } + default: + panic("unexpected container type in IndexExpr: " + t.String()) + } + v := &IndexAddr{ + X: x, + Index: emitConv(fn, b.expr(fn, e.Index), tInt), + } + v.setPos(e.Lbrack) + v.setType(et) + return &address{addr: fn.emit(v), pos: e.Lbrack, expr: e} + + case *ast.StarExpr: + return &address{addr: b.expr(fn, e.X), pos: e.Star, expr: e} + } + + panic(fmt.Sprintf("unexpected address expression: %T", e)) +} + +type store struct { + lhs lvalue + rhs Value +} + +type storebuf struct{ stores []store } + +func (sb *storebuf) store(lhs lvalue, rhs Value) { + sb.stores = append(sb.stores, store{lhs, rhs}) +} + +func (sb *storebuf) emit(fn *Function) { + for _, s := range sb.stores { + s.lhs.store(fn, s.rhs) + } +} + +// assign emits to fn code to initialize the lvalue loc with the value +// of expression e. If isZero is true, assign assumes that loc holds +// the zero value for its type. +// +// This is equivalent to loc.store(fn, b.expr(fn, e)), but may generate +// better code in some cases, e.g., for composite literals in an +// addressable location. +// +// If sb is not nil, assign generates code to evaluate expression e, but +// not to update loc. Instead, the necessary stores are appended to the +// storebuf sb so that they can be executed later. This allows correct +// in-place update of existing variables when the RHS is a composite +// literal that may reference parts of the LHS. +// +func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb *storebuf) { + // Can we initialize it in place? + if e, ok := unparen(e).(*ast.CompositeLit); ok { + // A CompositeLit never evaluates to a pointer, + // so if the type of the location is a pointer, + // an &-operation is implied. + if _, ok := loc.(blank); !ok { // avoid calling blank.typ() + if isPointer(loc.typ()) { + ptr := b.addr(fn, e, true).address(fn) + // copy address + if sb != nil { + sb.store(loc, ptr) + } else { + loc.store(fn, ptr) + } + return + } + } + + if _, ok := loc.(*address); ok { + if isInterface(loc.typ()) { + // e.g. var x interface{} = T{...} + // Can't in-place initialize an interface value. + // Fall back to copying. + } else { + // x = T{...} or x := T{...} + addr := loc.address(fn) + if sb != nil { + b.compLit(fn, addr, e, isZero, sb) + } else { + var sb storebuf + b.compLit(fn, addr, e, isZero, &sb) + sb.emit(fn) + } + + // Subtle: emit debug ref for aggregate types only; + // slice and map are handled by store ops in compLit. + switch loc.typ().Underlying().(type) { + case *types.Struct, *types.Array: + emitDebugRef(fn, e, addr, true) + } + + return + } + } + } + + // simple case: just copy + rhs := b.expr(fn, e) + if sb != nil { + sb.store(loc, rhs) + } else { + loc.store(fn, rhs) + } +} + +// expr lowers a single-result expression e to SSA form, emitting code +// to fn and returning the Value defined by the expression. +// +func (b *builder) expr(fn *Function, e ast.Expr) Value { + e = unparen(e) + + tv := fn.Pkg.info.Types[e] + + // Is expression a constant? + if tv.Value != nil { + return NewConst(tv.Value, tv.Type) + } + + var v Value + if tv.Addressable() { + // Prefer pointer arithmetic ({Index,Field}Addr) followed + // by Load over subelement extraction (e.g. Index, Field), + // to avoid large copies. + v = b.addr(fn, e, false).load(fn) + } else { + v = b.expr0(fn, e, tv) + } + if fn.debugInfo() { + emitDebugRef(fn, e, v, false) + } + return v +} + +func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { + switch e := e.(type) { + case *ast.BasicLit: + panic("non-constant BasicLit") // unreachable + + case *ast.FuncLit: + fn2 := &Function{ + name: fmt.Sprintf("%s$%d", fn.Name(), 1+len(fn.AnonFuncs)), + Signature: fn.Pkg.typeOf(e.Type).Underlying().(*types.Signature), + pos: e.Type.Func, + parent: fn, + Pkg: fn.Pkg, + Prog: fn.Prog, + syntax: e, + } + fn.AnonFuncs = append(fn.AnonFuncs, fn2) + b.buildFunction(fn2) + if fn2.FreeVars == nil { + return fn2 + } + v := &MakeClosure{Fn: fn2} + v.setType(tv.Type) + for _, fv := range fn2.FreeVars { + v.Bindings = append(v.Bindings, fv.outer) + fv.outer = nil + } + return fn.emit(v) + + case *ast.TypeAssertExpr: // single-result form only + return emitTypeAssert(fn, b.expr(fn, e.X), tv.Type, e.Lparen) + + case *ast.CallExpr: + if fn.Pkg.info.Types[e.Fun].IsType() { + // Explicit type conversion, e.g. string(x) or big.Int(x) + x := b.expr(fn, e.Args[0]) + y := emitConv(fn, x, tv.Type) + if y != x { + switch y := y.(type) { + case *Convert: + y.pos = e.Lparen + case *ChangeType: + y.pos = e.Lparen + case *MakeInterface: + y.pos = e.Lparen + } + } + return y + } + // Call to "intrinsic" built-ins, e.g. new, make, panic. + if id, ok := unparen(e.Fun).(*ast.Ident); ok { + if obj, ok := fn.Pkg.info.Uses[id].(*types.Builtin); ok { + if v := b.builtin(fn, obj, e.Args, tv.Type, e.Lparen); v != nil { + return v + } + } + } + // Regular function call. + var v Call + b.setCall(fn, e, &v.Call) + v.setType(tv.Type) + return fn.emit(&v) + + case *ast.UnaryExpr: + switch e.Op { + case token.AND: // &X --- potentially escaping. + addr := b.addr(fn, e.X, true) + if _, ok := unparen(e.X).(*ast.StarExpr); ok { + // &*p must panic if p is nil (http://golang.org/s/go12nil). + // For simplicity, we'll just (suboptimally) rely + // on the side effects of a load. + // TODO(adonovan): emit dedicated nilcheck. + addr.load(fn) + } + return addr.address(fn) + case token.ADD: + return b.expr(fn, e.X) + case token.NOT, token.ARROW, token.SUB, token.XOR: // ! <- - ^ + v := &UnOp{ + Op: e.Op, + X: b.expr(fn, e.X), + } + v.setPos(e.OpPos) + v.setType(tv.Type) + return fn.emit(v) + default: + panic(e.Op) + } + + case *ast.BinaryExpr: + switch e.Op { + case token.LAND, token.LOR: + return b.logicalBinop(fn, e) + case token.SHL, token.SHR: + fallthrough + case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT: + return emitArith(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), tv.Type, e.OpPos) + + case token.EQL, token.NEQ, token.GTR, token.LSS, token.LEQ, token.GEQ: + cmp := emitCompare(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), e.OpPos) + // The type of x==y may be UntypedBool. + return emitConv(fn, cmp, DefaultType(tv.Type)) + default: + panic("illegal op in BinaryExpr: " + e.Op.String()) + } + + case *ast.SliceExpr: + var low, high, max Value + var x Value + switch fn.Pkg.typeOf(e.X).Underlying().(type) { + case *types.Array: + // Potentially escaping. + x = b.addr(fn, e.X, true).address(fn) + case *types.Basic, *types.Slice, *types.Pointer: // *array + x = b.expr(fn, e.X) + default: + panic("unreachable") + } + if e.High != nil { + high = b.expr(fn, e.High) + } + if e.Low != nil { + low = b.expr(fn, e.Low) + } + if e.Slice3 { + max = b.expr(fn, e.Max) + } + v := &Slice{ + X: x, + Low: low, + High: high, + Max: max, + } + v.setPos(e.Lbrack) + v.setType(tv.Type) + return fn.emit(v) + + case *ast.Ident: + obj := fn.Pkg.info.Uses[e] + // Universal built-in or nil? + switch obj := obj.(type) { + case *types.Builtin: + return &Builtin{name: obj.Name(), sig: tv.Type.(*types.Signature)} + case *types.Nil: + return nilConst(tv.Type) + } + // Package-level func or var? + if v := fn.Prog.packageLevelValue(obj); v != nil { + if _, ok := obj.(*types.Var); ok { + return emitLoad(fn, v) // var (address) + } + return v // (func) + } + // Local var. + return emitLoad(fn, fn.lookup(obj, false)) // var (address) + + case *ast.SelectorExpr: + sel, ok := fn.Pkg.info.Selections[e] + if !ok { + // qualified identifier + return b.expr(fn, e.Sel) + } + switch sel.Kind() { + case types.MethodExpr: + // (*T).f or T.f, the method f from the method-set of type T. + // The result is a "thunk". + return emitConv(fn, makeThunk(fn.Prog, sel), tv.Type) + + case types.MethodVal: + // e.f where e is an expression and f is a method. + // The result is a "bound". + obj := sel.Obj().(*types.Func) + rt := recvType(obj) + wantAddr := isPointer(rt) + escaping := true + v := b.receiver(fn, e.X, wantAddr, escaping, sel) + if isInterface(rt) { + // If v has interface type I, + // we must emit a check that v is non-nil. + // We use: typeassert v.(I). + emitTypeAssert(fn, v, rt, token.NoPos) + } + c := &MakeClosure{ + Fn: makeBound(fn.Prog, obj), + Bindings: []Value{v}, + } + c.setPos(e.Sel.Pos()) + c.setType(tv.Type) + return fn.emit(c) + + case types.FieldVal: + indices := sel.Index() + last := len(indices) - 1 + v := b.expr(fn, e.X) + v = emitImplicitSelections(fn, v, indices[:last]) + v = emitFieldSelection(fn, v, indices[last], false, e.Sel) + return v + } + + panic("unexpected expression-relative selector") + + case *ast.IndexExpr: + switch t := fn.Pkg.typeOf(e.X).Underlying().(type) { + case *types.Array: + // Non-addressable array (in a register). + v := &Index{ + X: b.expr(fn, e.X), + Index: emitConv(fn, b.expr(fn, e.Index), tInt), + } + v.setPos(e.Lbrack) + v.setType(t.Elem()) + return fn.emit(v) + + case *types.Map: + // Maps are not addressable. + mapt := fn.Pkg.typeOf(e.X).Underlying().(*types.Map) + v := &Lookup{ + X: b.expr(fn, e.X), + Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key()), + } + v.setPos(e.Lbrack) + v.setType(mapt.Elem()) + return fn.emit(v) + + case *types.Basic: // => string + // Strings are not addressable. + v := &Lookup{ + X: b.expr(fn, e.X), + Index: b.expr(fn, e.Index), + } + v.setPos(e.Lbrack) + v.setType(tByte) + return fn.emit(v) + + case *types.Slice, *types.Pointer: // *array + // Addressable slice/array; use IndexAddr and Load. + return b.addr(fn, e, false).load(fn) + + default: + panic("unexpected container type in IndexExpr: " + t.String()) + } + + case *ast.CompositeLit, *ast.StarExpr: + // Addressable types (lvalues) + return b.addr(fn, e, false).load(fn) + } + + panic(fmt.Sprintf("unexpected expr: %T", e)) +} + +// stmtList emits to fn code for all statements in list. +func (b *builder) stmtList(fn *Function, list []ast.Stmt) { + for _, s := range list { + b.stmt(fn, s) + } +} + +// receiver emits to fn code for expression e in the "receiver" +// position of selection e.f (where f may be a field or a method) and +// returns the effective receiver after applying the implicit field +// selections of sel. +// +// wantAddr requests that the result is an an address. If +// !sel.Indirect(), this may require that e be built in addr() mode; it +// must thus be addressable. +// +// escaping is defined as per builder.addr(). +// +func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, sel *types.Selection) Value { + var v Value + if wantAddr && !sel.Indirect() && !isPointer(fn.Pkg.typeOf(e)) { + v = b.addr(fn, e, escaping).address(fn) + } else { + v = b.expr(fn, e) + } + + last := len(sel.Index()) - 1 + v = emitImplicitSelections(fn, v, sel.Index()[:last]) + if !wantAddr && isPointer(v.Type()) { + v = emitLoad(fn, v) + } + return v +} + +// setCallFunc populates the function parts of a CallCommon structure +// (Func, Method, Recv, Args[0]) based on the kind of invocation +// occurring in e. +// +func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) { + c.pos = e.Lparen + + // Is this a method call? + if selector, ok := unparen(e.Fun).(*ast.SelectorExpr); ok { + sel, ok := fn.Pkg.info.Selections[selector] + if ok && sel.Kind() == types.MethodVal { + obj := sel.Obj().(*types.Func) + recv := recvType(obj) + wantAddr := isPointer(recv) + escaping := true + v := b.receiver(fn, selector.X, wantAddr, escaping, sel) + if isInterface(recv) { + // Invoke-mode call. + c.Value = v + c.Method = obj + } else { + // "Call"-mode call. + c.Value = fn.Prog.declaredFunc(obj) + c.Args = append(c.Args, v) + } + return + } + + // sel.Kind()==MethodExpr indicates T.f() or (*T).f(): + // a statically dispatched call to the method f in the + // method-set of T or *T. T may be an interface. + // + // e.Fun would evaluate to a concrete method, interface + // wrapper function, or promotion wrapper. + // + // For now, we evaluate it in the usual way. + // + // TODO(adonovan): opt: inline expr() here, to make the + // call static and to avoid generation of wrappers. + // It's somewhat tricky as it may consume the first + // actual parameter if the call is "invoke" mode. + // + // Examples: + // type T struct{}; func (T) f() {} // "call" mode + // type T interface { f() } // "invoke" mode + // + // type S struct{ T } + // + // var s S + // S.f(s) + // (*S).f(&s) + // + // Suggested approach: + // - consume the first actual parameter expression + // and build it with b.expr(). + // - apply implicit field selections. + // - use MethodVal logic to populate fields of c. + } + + // Evaluate the function operand in the usual way. + c.Value = b.expr(fn, e.Fun) +} + +// emitCallArgs emits to f code for the actual parameters of call e to +// a (possibly built-in) function of effective type sig. +// The argument values are appended to args, which is then returned. +// +func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallExpr, args []Value) []Value { + // f(x, y, z...): pass slice z straight through. + if e.Ellipsis != 0 { + for i, arg := range e.Args { + v := emitConv(fn, b.expr(fn, arg), sig.Params().At(i).Type()) + args = append(args, v) + } + return args + } + + offset := len(args) // 1 if call has receiver, 0 otherwise + + // Evaluate actual parameter expressions. + // + // If this is a chained call of the form f(g()) where g has + // multiple return values (MRV), they are flattened out into + // args; a suffix of them may end up in a varargs slice. + for _, arg := range e.Args { + v := b.expr(fn, arg) + if ttuple, ok := v.Type().(*types.Tuple); ok { // MRV chain + for i, n := 0, ttuple.Len(); i < n; i++ { + args = append(args, emitExtract(fn, v, i)) + } + } else { + args = append(args, v) + } + } + + // Actual->formal assignability conversions for normal parameters. + np := sig.Params().Len() // number of normal parameters + if sig.Variadic() { + np-- + } + for i := 0; i < np; i++ { + args[offset+i] = emitConv(fn, args[offset+i], sig.Params().At(i).Type()) + } + + // Actual->formal assignability conversions for variadic parameter, + // and construction of slice. + if sig.Variadic() { + varargs := args[offset+np:] + st := sig.Params().At(np).Type().(*types.Slice) + vt := st.Elem() + if len(varargs) == 0 { + args = append(args, nilConst(st)) + } else { + // Replace a suffix of args with a slice containing it. + at := types.NewArray(vt, int64(len(varargs))) + a := emitNew(fn, at, token.NoPos) + a.setPos(e.Rparen) + a.Comment = "varargs" + for i, arg := range varargs { + iaddr := &IndexAddr{ + X: a, + Index: intConst(int64(i)), + } + iaddr.setType(types.NewPointer(vt)) + fn.emit(iaddr) + emitStore(fn, iaddr, arg, arg.Pos()) + } + s := &Slice{X: a} + s.setType(st) + args[offset+np] = fn.emit(s) + args = args[:offset+np+1] + } + } + return args +} + +// setCall emits to fn code to evaluate all the parameters of a function +// call e, and populates *c with those values. +// +func (b *builder) setCall(fn *Function, e *ast.CallExpr, c *CallCommon) { + // First deal with the f(...) part and optional receiver. + b.setCallFunc(fn, e, c) + + // Then append the other actual parameters. + sig, _ := fn.Pkg.typeOf(e.Fun).Underlying().(*types.Signature) + if sig == nil { + panic(fmt.Sprintf("no signature for call of %s", e.Fun)) + } + c.Args = b.emitCallArgs(fn, sig, e, c.Args) +} + +// assignOp emits to fn code to perform loc = val. +func (b *builder) assignOp(fn *Function, loc lvalue, val Value, op token.Token, pos token.Pos) { + oldv := loc.load(fn) + loc.store(fn, emitArith(fn, op, oldv, emitConv(fn, val, oldv.Type()), loc.typ(), pos)) +} + +// localValueSpec emits to fn code to define all of the vars in the +// function-local ValueSpec, spec. +// +func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) { + switch { + case len(spec.Values) == len(spec.Names): + // e.g. var x, y = 0, 1 + // 1:1 assignment + for i, id := range spec.Names { + if !isBlankIdent(id) { + fn.addLocalForIdent(id) + } + lval := b.addr(fn, id, false) // non-escaping + b.assign(fn, lval, spec.Values[i], true, nil) + } + + case len(spec.Values) == 0: + // e.g. var x, y int + // Locals are implicitly zero-initialized. + for _, id := range spec.Names { + if !isBlankIdent(id) { + lhs := fn.addLocalForIdent(id) + if fn.debugInfo() { + emitDebugRef(fn, id, lhs, true) + } + } + } + + default: + // e.g. var x, y = pos() + tuple := b.exprN(fn, spec.Values[0]) + for i, id := range spec.Names { + if !isBlankIdent(id) { + fn.addLocalForIdent(id) + lhs := b.addr(fn, id, false) // non-escaping + lhs.store(fn, emitExtract(fn, tuple, i)) + } + } + } +} + +// assignStmt emits code to fn for a parallel assignment of rhss to lhss. +// isDef is true if this is a short variable declaration (:=). +// +// Note the similarity with localValueSpec. +// +func (b *builder) assignStmt(fn *Function, lhss, rhss []ast.Expr, isDef bool) { + // Side effects of all LHSs and RHSs must occur in left-to-right order. + lvals := make([]lvalue, len(lhss)) + isZero := make([]bool, len(lhss)) + for i, lhs := range lhss { + var lval lvalue = blank{} + if !isBlankIdent(lhs) { + if isDef { + if obj := fn.Pkg.info.Defs[lhs.(*ast.Ident)]; obj != nil { + fn.addNamedLocal(obj) + isZero[i] = true + } + } + lval = b.addr(fn, lhs, false) // non-escaping + } + lvals[i] = lval + } + if len(lhss) == len(rhss) { + // Simple assignment: x = f() (!isDef) + // Parallel assignment: x, y = f(), g() (!isDef) + // or short var decl: x, y := f(), g() (isDef) + // + // In all cases, the RHSs may refer to the LHSs, + // so we need a storebuf. + var sb storebuf + for i := range rhss { + b.assign(fn, lvals[i], rhss[i], isZero[i], &sb) + } + sb.emit(fn) + } else { + // e.g. x, y = pos() + tuple := b.exprN(fn, rhss[0]) + emitDebugRef(fn, rhss[0], tuple, false) + for i, lval := range lvals { + lval.store(fn, emitExtract(fn, tuple, i)) + } + } +} + +// arrayLen returns the length of the array whose composite literal elements are elts. +func (b *builder) arrayLen(fn *Function, elts []ast.Expr) int64 { + var max int64 = -1 + var i int64 = -1 + for _, e := range elts { + if kv, ok := e.(*ast.KeyValueExpr); ok { + i = b.expr(fn, kv.Key).(*Const).Int64() + } else { + i++ + } + if i > max { + max = i + } + } + return max + 1 +} + +// compLit emits to fn code to initialize a composite literal e at +// address addr with type typ. +// +// Nested composite literals are recursively initialized in place +// where possible. If isZero is true, compLit assumes that addr +// holds the zero value for typ. +// +// Because the elements of a composite literal may refer to the +// variables being updated, as in the second line below, +// x := T{a: 1} +// x = T{a: x.a} +// all the reads must occur before all the writes. Thus all stores to +// loc are emitted to the storebuf sb for later execution. +// +// A CompositeLit may have pointer type only in the recursive (nested) +// case when the type name is implicit. e.g. in []*T{{}}, the inner +// literal has type *T behaves like &T{}. +// In that case, addr must hold a T, not a *T. +// +func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero bool, sb *storebuf) { + typ := deref(fn.Pkg.typeOf(e)) + switch t := typ.Underlying().(type) { + case *types.Struct: + if !isZero && len(e.Elts) != t.NumFields() { + // memclear + sb.store(&address{addr, e.Lbrace, nil}, + zeroValue(fn, deref(addr.Type()))) + isZero = true + } + for i, e := range e.Elts { + fieldIndex := i + pos := e.Pos() + if kv, ok := e.(*ast.KeyValueExpr); ok { + fname := kv.Key.(*ast.Ident).Name + for i, n := 0, t.NumFields(); i < n; i++ { + sf := t.Field(i) + if sf.Name() == fname { + fieldIndex = i + pos = kv.Colon + e = kv.Value + break + } + } + } + sf := t.Field(fieldIndex) + faddr := &FieldAddr{ + X: addr, + Field: fieldIndex, + } + faddr.setType(types.NewPointer(sf.Type())) + fn.emit(faddr) + b.assign(fn, &address{addr: faddr, pos: pos, expr: e}, e, isZero, sb) + } + + case *types.Array, *types.Slice: + var at *types.Array + var array Value + switch t := t.(type) { + case *types.Slice: + at = types.NewArray(t.Elem(), b.arrayLen(fn, e.Elts)) + alloc := emitNew(fn, at, e.Lbrace) + alloc.Comment = "slicelit" + array = alloc + case *types.Array: + at = t + array = addr + + if !isZero && int64(len(e.Elts)) != at.Len() { + // memclear + sb.store(&address{array, e.Lbrace, nil}, + zeroValue(fn, deref(array.Type()))) + } + } + + var idx *Const + for _, e := range e.Elts { + pos := e.Pos() + if kv, ok := e.(*ast.KeyValueExpr); ok { + idx = b.expr(fn, kv.Key).(*Const) + pos = kv.Colon + e = kv.Value + } else { + var idxval int64 + if idx != nil { + idxval = idx.Int64() + 1 + } + idx = intConst(idxval) + } + iaddr := &IndexAddr{ + X: array, + Index: idx, + } + iaddr.setType(types.NewPointer(at.Elem())) + fn.emit(iaddr) + if t != at { // slice + // backing array is unaliased => storebuf not needed. + b.assign(fn, &address{addr: iaddr, pos: pos, expr: e}, e, true, nil) + } else { + b.assign(fn, &address{addr: iaddr, pos: pos, expr: e}, e, true, sb) + } + } + + if t != at { // slice + s := &Slice{X: array} + s.setPos(e.Lbrace) + s.setType(typ) + sb.store(&address{addr: addr, pos: e.Lbrace, expr: e}, fn.emit(s)) + } + + case *types.Map: + m := &MakeMap{Reserve: intConst(int64(len(e.Elts)))} + m.setPos(e.Lbrace) + m.setType(typ) + fn.emit(m) + for _, e := range e.Elts { + e := e.(*ast.KeyValueExpr) + + // If a key expression in a map literal is itself a + // composite literal, the type may be omitted. + // For example: + // map[*struct{}]bool{{}: true} + // An &-operation may be implied: + // map[*struct{}]bool{&struct{}{}: true} + var key Value + if _, ok := unparen(e.Key).(*ast.CompositeLit); ok && isPointer(t.Key()) { + // A CompositeLit never evaluates to a pointer, + // so if the type of the location is a pointer, + // an &-operation is implied. + key = b.addr(fn, e.Key, true).address(fn) + } else { + key = b.expr(fn, e.Key) + } + + loc := element{ + m: m, + k: emitConv(fn, key, t.Key()), + t: t.Elem(), + pos: e.Colon, + } + + // We call assign() only because it takes care + // of any &-operation required in the recursive + // case, e.g., + // map[int]*struct{}{0: {}} implies &struct{}{}. + // In-place update is of course impossible, + // and no storebuf is needed. + b.assign(fn, &loc, e.Value, true, nil) + } + sb.store(&address{addr: addr, pos: e.Lbrace, expr: e}, m) + + default: + panic("unexpected CompositeLit type: " + t.String()) + } +} + +// switchStmt emits to fn code for the switch statement s, optionally +// labelled by label. +// +func (b *builder) switchStmt(fn *Function, s *ast.SwitchStmt, label *lblock) { + // We treat SwitchStmt like a sequential if-else chain. + // Multiway dispatch can be recovered later by ssautil.Switches() + // to those cases that are free of side effects. + if s.Init != nil { + b.stmt(fn, s.Init) + } + var tag Value = vTrue + if s.Tag != nil { + tag = b.expr(fn, s.Tag) + } + done := fn.newBasicBlock("switch.done") + if label != nil { + label._break = done + } + // We pull the default case (if present) down to the end. + // But each fallthrough label must point to the next + // body block in source order, so we preallocate a + // body block (fallthru) for the next case. + // Unfortunately this makes for a confusing block order. + var dfltBody *[]ast.Stmt + var dfltFallthrough *BasicBlock + var fallthru, dfltBlock *BasicBlock + ncases := len(s.Body.List) + for i, clause := range s.Body.List { + body := fallthru + if body == nil { + body = fn.newBasicBlock("switch.body") // first case only + } + + // Preallocate body block for the next case. + fallthru = done + if i+1 < ncases { + fallthru = fn.newBasicBlock("switch.body") + } + + cc := clause.(*ast.CaseClause) + if cc.List == nil { + // Default case. + dfltBody = &cc.Body + dfltFallthrough = fallthru + dfltBlock = body + continue + } + + var nextCond *BasicBlock + for _, cond := range cc.List { + nextCond = fn.newBasicBlock("switch.next") + // TODO(adonovan): opt: when tag==vTrue, we'd + // get better code if we use b.cond(cond) + // instead of BinOp(EQL, tag, b.expr(cond)) + // followed by If. Don't forget conversions + // though. + cond := emitCompare(fn, token.EQL, tag, b.expr(fn, cond), cond.Pos()) + emitIf(fn, cond, body, nextCond) + fn.currentBlock = nextCond + } + fn.currentBlock = body + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _fallthrough: fallthru, + } + b.stmtList(fn, cc.Body) + fn.targets = fn.targets.tail + emitJump(fn, done) + fn.currentBlock = nextCond + } + if dfltBlock != nil { + emitJump(fn, dfltBlock) + fn.currentBlock = dfltBlock + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _fallthrough: dfltFallthrough, + } + b.stmtList(fn, *dfltBody) + fn.targets = fn.targets.tail + } + emitJump(fn, done) + fn.currentBlock = done +} + +// typeSwitchStmt emits to fn code for the type switch statement s, optionally +// labelled by label. +// +func (b *builder) typeSwitchStmt(fn *Function, s *ast.TypeSwitchStmt, label *lblock) { + // We treat TypeSwitchStmt like a sequential if-else chain. + // Multiway dispatch can be recovered later by ssautil.Switches(). + + // Typeswitch lowering: + // + // var x X + // switch y := x.(type) { + // case T1, T2: S1 // >1 (y := x) + // case nil: SN // nil (y := x) + // default: SD // 0 types (y := x) + // case T3: S3 // 1 type (y := x.(T3)) + // } + // + // ...s.Init... + // x := eval x + // .caseT1: + // t1, ok1 := typeswitch,ok x + // if ok1 then goto S1 else goto .caseT2 + // .caseT2: + // t2, ok2 := typeswitch,ok x + // if ok2 then goto S1 else goto .caseNil + // .S1: + // y := x + // ...S1... + // goto done + // .caseNil: + // if t2, ok2 := typeswitch,ok x + // if x == nil then goto SN else goto .caseT3 + // .SN: + // y := x + // ...SN... + // goto done + // .caseT3: + // t3, ok3 := typeswitch,ok x + // if ok3 then goto S3 else goto default + // .S3: + // y := t3 + // ...S3... + // goto done + // .default: + // y := x + // ...SD... + // goto done + // .done: + + if s.Init != nil { + b.stmt(fn, s.Init) + } + + var x Value + switch ass := s.Assign.(type) { + case *ast.ExprStmt: // x.(type) + x = b.expr(fn, unparen(ass.X).(*ast.TypeAssertExpr).X) + case *ast.AssignStmt: // y := x.(type) + x = b.expr(fn, unparen(ass.Rhs[0]).(*ast.TypeAssertExpr).X) + } + + done := fn.newBasicBlock("typeswitch.done") + if label != nil { + label._break = done + } + var default_ *ast.CaseClause + for _, clause := range s.Body.List { + cc := clause.(*ast.CaseClause) + if cc.List == nil { + default_ = cc + continue + } + body := fn.newBasicBlock("typeswitch.body") + var next *BasicBlock + var casetype types.Type + var ti Value // ti, ok := typeassert,ok x + for _, cond := range cc.List { + next = fn.newBasicBlock("typeswitch.next") + casetype = fn.Pkg.typeOf(cond) + var condv Value + if casetype == tUntypedNil { + condv = emitCompare(fn, token.EQL, x, nilConst(x.Type()), token.NoPos) + ti = x + } else { + yok := emitTypeTest(fn, x, casetype, cc.Case) + ti = emitExtract(fn, yok, 0) + condv = emitExtract(fn, yok, 1) + } + emitIf(fn, condv, body, next) + fn.currentBlock = next + } + if len(cc.List) != 1 { + ti = x + } + fn.currentBlock = body + b.typeCaseBody(fn, cc, ti, done) + fn.currentBlock = next + } + if default_ != nil { + b.typeCaseBody(fn, default_, x, done) + } else { + emitJump(fn, done) + } + fn.currentBlock = done +} + +func (b *builder) typeCaseBody(fn *Function, cc *ast.CaseClause, x Value, done *BasicBlock) { + if obj := fn.Pkg.info.Implicits[cc]; obj != nil { + // In a switch y := x.(type), each case clause + // implicitly declares a distinct object y. + // In a single-type case, y has that type. + // In multi-type cases, 'case nil' and default, + // y has the same type as the interface operand. + emitStore(fn, fn.addNamedLocal(obj), x, obj.Pos()) + } + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + b.stmtList(fn, cc.Body) + fn.targets = fn.targets.tail + emitJump(fn, done) +} + +// selectStmt emits to fn code for the select statement s, optionally +// labelled by label. +// +func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { + // A blocking select of a single case degenerates to a + // simple send or receive. + // TODO(adonovan): opt: is this optimization worth its weight? + if len(s.Body.List) == 1 { + clause := s.Body.List[0].(*ast.CommClause) + if clause.Comm != nil { + b.stmt(fn, clause.Comm) + done := fn.newBasicBlock("select.done") + if label != nil { + label._break = done + } + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + b.stmtList(fn, clause.Body) + fn.targets = fn.targets.tail + emitJump(fn, done) + fn.currentBlock = done + return + } + } + + // First evaluate all channels in all cases, and find + // the directions of each state. + var states []*SelectState + blocking := true + debugInfo := fn.debugInfo() + for _, clause := range s.Body.List { + var st *SelectState + switch comm := clause.(*ast.CommClause).Comm.(type) { + case nil: // default case + blocking = false + continue + + case *ast.SendStmt: // ch<- i + ch := b.expr(fn, comm.Chan) + st = &SelectState{ + Dir: types.SendOnly, + Chan: ch, + Send: emitConv(fn, b.expr(fn, comm.Value), + ch.Type().Underlying().(*types.Chan).Elem()), + Pos: comm.Arrow, + } + if debugInfo { + st.DebugNode = comm + } + + case *ast.AssignStmt: // x := <-ch + recv := unparen(comm.Rhs[0]).(*ast.UnaryExpr) + st = &SelectState{ + Dir: types.RecvOnly, + Chan: b.expr(fn, recv.X), + Pos: recv.OpPos, + } + if debugInfo { + st.DebugNode = recv + } + + case *ast.ExprStmt: // <-ch + recv := unparen(comm.X).(*ast.UnaryExpr) + st = &SelectState{ + Dir: types.RecvOnly, + Chan: b.expr(fn, recv.X), + Pos: recv.OpPos, + } + if debugInfo { + st.DebugNode = recv + } + } + states = append(states, st) + } + + // We dispatch on the (fair) result of Select using a + // sequential if-else chain, in effect: + // + // idx, recvOk, r0...r_n-1 := select(...) + // if idx == 0 { // receive on channel 0 (first receive => r0) + // x, ok := r0, recvOk + // ...state0... + // } else if v == 1 { // send on channel 1 + // ...state1... + // } else { + // ...default... + // } + sel := &Select{ + States: states, + Blocking: blocking, + } + sel.setPos(s.Select) + var vars []*types.Var + vars = append(vars, varIndex, varOk) + for _, st := range states { + if st.Dir == types.RecvOnly { + tElem := st.Chan.Type().Underlying().(*types.Chan).Elem() + vars = append(vars, anonVar(tElem)) + } + } + sel.setType(types.NewTuple(vars...)) + + fn.emit(sel) + idx := emitExtract(fn, sel, 0) + + done := fn.newBasicBlock("select.done") + if label != nil { + label._break = done + } + + var defaultBody *[]ast.Stmt + state := 0 + r := 2 // index in 'sel' tuple of value; increments if st.Dir==RECV + for _, cc := range s.Body.List { + clause := cc.(*ast.CommClause) + if clause.Comm == nil { + defaultBody = &clause.Body + continue + } + body := fn.newBasicBlock("select.body") + next := fn.newBasicBlock("select.next") + emitIf(fn, emitCompare(fn, token.EQL, idx, intConst(int64(state)), token.NoPos), body, next) + fn.currentBlock = body + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + switch comm := clause.Comm.(type) { + case *ast.ExprStmt: // <-ch + if debugInfo { + v := emitExtract(fn, sel, r) + emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false) + } + r++ + + case *ast.AssignStmt: // x := <-states[state].Chan + if comm.Tok == token.DEFINE { + fn.addLocalForIdent(comm.Lhs[0].(*ast.Ident)) + } + x := b.addr(fn, comm.Lhs[0], false) // non-escaping + v := emitExtract(fn, sel, r) + if debugInfo { + emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false) + } + x.store(fn, v) + + if len(comm.Lhs) == 2 { // x, ok := ... + if comm.Tok == token.DEFINE { + fn.addLocalForIdent(comm.Lhs[1].(*ast.Ident)) + } + ok := b.addr(fn, comm.Lhs[1], false) // non-escaping + ok.store(fn, emitExtract(fn, sel, 1)) + } + r++ + } + b.stmtList(fn, clause.Body) + fn.targets = fn.targets.tail + emitJump(fn, done) + fn.currentBlock = next + state++ + } + if defaultBody != nil { + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + b.stmtList(fn, *defaultBody) + fn.targets = fn.targets.tail + } else { + // A blocking select must match some case. + // (This should really be a runtime.errorString, not a string.) + fn.emit(&Panic{ + X: emitConv(fn, stringConst("blocking select matched no case"), tEface), + }) + fn.currentBlock = fn.newBasicBlock("unreachable") + } + emitJump(fn, done) + fn.currentBlock = done +} + +// forStmt emits to fn code for the for statement s, optionally +// labelled by label. +// +func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) { + // ...init... + // jump loop + // loop: + // if cond goto body else done + // body: + // ...body... + // jump post + // post: (target of continue) + // ...post... + // jump loop + // done: (target of break) + if s.Init != nil { + b.stmt(fn, s.Init) + } + body := fn.newBasicBlock("for.body") + done := fn.newBasicBlock("for.done") // target of 'break' + loop := body // target of back-edge + if s.Cond != nil { + loop = fn.newBasicBlock("for.loop") + } + cont := loop // target of 'continue' + if s.Post != nil { + cont = fn.newBasicBlock("for.post") + } + if label != nil { + label._break = done + label._continue = cont + } + emitJump(fn, loop) + fn.currentBlock = loop + if loop != body { + b.cond(fn, s.Cond, body, done) + fn.currentBlock = body + } + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _continue: cont, + } + b.stmt(fn, s.Body) + fn.targets = fn.targets.tail + emitJump(fn, cont) + + if s.Post != nil { + fn.currentBlock = cont + b.stmt(fn, s.Post) + emitJump(fn, loop) // back-edge + } + fn.currentBlock = done +} + +// rangeIndexed emits to fn the header for an integer-indexed loop +// over array, *array or slice value x. +// The v result is defined only if tv is non-nil. +// forPos is the position of the "for" token. +// +func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) { + // + // length = len(x) + // index = -1 + // loop: (target of continue) + // index++ + // if index < length goto body else done + // body: + // k = index + // v = x[index] + // ...body... + // jump loop + // done: (target of break) + + // Determine number of iterations. + var length Value + if arr, ok := deref(x.Type()).Underlying().(*types.Array); ok { + // For array or *array, the number of iterations is + // known statically thanks to the type. We avoid a + // data dependence upon x, permitting later dead-code + // elimination if x is pure, static unrolling, etc. + // Ranging over a nil *array may have >0 iterations. + // We still generate code for x, in case it has effects. + length = intConst(arr.Len()) + } else { + // length = len(x). + var c Call + c.Call.Value = makeLen(x.Type()) + c.Call.Args = []Value{x} + c.setType(tInt) + length = fn.emit(&c) + } + + index := fn.addLocal(tInt, token.NoPos) + emitStore(fn, index, intConst(-1), pos) + + loop = fn.newBasicBlock("rangeindex.loop") + emitJump(fn, loop) + fn.currentBlock = loop + + incr := &BinOp{ + Op: token.ADD, + X: emitLoad(fn, index), + Y: vOne, + } + incr.setType(tInt) + emitStore(fn, index, fn.emit(incr), pos) + + body := fn.newBasicBlock("rangeindex.body") + done = fn.newBasicBlock("rangeindex.done") + emitIf(fn, emitCompare(fn, token.LSS, incr, length, token.NoPos), body, done) + fn.currentBlock = body + + k = emitLoad(fn, index) + if tv != nil { + switch t := x.Type().Underlying().(type) { + case *types.Array: + instr := &Index{ + X: x, + Index: k, + } + instr.setType(t.Elem()) + v = fn.emit(instr) + + case *types.Pointer: // *array + instr := &IndexAddr{ + X: x, + Index: k, + } + instr.setType(types.NewPointer(t.Elem().Underlying().(*types.Array).Elem())) + v = emitLoad(fn, fn.emit(instr)) + + case *types.Slice: + instr := &IndexAddr{ + X: x, + Index: k, + } + instr.setType(types.NewPointer(t.Elem())) + v = emitLoad(fn, fn.emit(instr)) + + default: + panic("rangeIndexed x:" + t.String()) + } + } + return +} + +// rangeIter emits to fn the header for a loop using +// Range/Next/Extract to iterate over map or string value x. +// tk and tv are the types of the key/value results k and v, or nil +// if the respective component is not wanted. +// +func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) { + // + // it = range x + // loop: (target of continue) + // okv = next it (ok, key, value) + // ok = extract okv #0 + // if ok goto body else done + // body: + // k = extract okv #1 + // v = extract okv #2 + // ...body... + // jump loop + // done: (target of break) + // + + if tk == nil { + tk = tInvalid + } + if tv == nil { + tv = tInvalid + } + + rng := &Range{X: x} + rng.setPos(pos) + rng.setType(tRangeIter) + it := fn.emit(rng) + + loop = fn.newBasicBlock("rangeiter.loop") + emitJump(fn, loop) + fn.currentBlock = loop + + _, isString := x.Type().Underlying().(*types.Basic) + + okv := &Next{ + Iter: it, + IsString: isString, + } + okv.setType(types.NewTuple( + varOk, + newVar("k", tk), + newVar("v", tv), + )) + fn.emit(okv) + + body := fn.newBasicBlock("rangeiter.body") + done = fn.newBasicBlock("rangeiter.done") + emitIf(fn, emitExtract(fn, okv, 0), body, done) + fn.currentBlock = body + + if tk != tInvalid { + k = emitExtract(fn, okv, 1) + } + if tv != tInvalid { + v = emitExtract(fn, okv, 2) + } + return +} + +// rangeChan emits to fn the header for a loop that receives from +// channel x until it fails. +// tk is the channel's element type, or nil if the k result is +// not wanted +// pos is the position of the '=' or ':=' token. +// +func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, pos token.Pos) (k Value, loop, done *BasicBlock) { + // + // loop: (target of continue) + // ko = <-x (key, ok) + // ok = extract ko #1 + // if ok goto body else done + // body: + // k = extract ko #0 + // ... + // goto loop + // done: (target of break) + + loop = fn.newBasicBlock("rangechan.loop") + emitJump(fn, loop) + fn.currentBlock = loop + recv := &UnOp{ + Op: token.ARROW, + X: x, + CommaOk: true, + } + recv.setPos(pos) + recv.setType(types.NewTuple( + newVar("k", x.Type().Underlying().(*types.Chan).Elem()), + varOk, + )) + ko := fn.emit(recv) + body := fn.newBasicBlock("rangechan.body") + done = fn.newBasicBlock("rangechan.done") + emitIf(fn, emitExtract(fn, ko, 1), body, done) + fn.currentBlock = body + if tk != nil { + k = emitExtract(fn, ko, 0) + } + return +} + +// rangeStmt emits to fn code for the range statement s, optionally +// labelled by label. +// +func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) { + var tk, tv types.Type + if s.Key != nil && !isBlankIdent(s.Key) { + tk = fn.Pkg.typeOf(s.Key) + } + if s.Value != nil && !isBlankIdent(s.Value) { + tv = fn.Pkg.typeOf(s.Value) + } + + // If iteration variables are defined (:=), this + // occurs once outside the loop. + // + // Unlike a short variable declaration, a RangeStmt + // using := never redeclares an existing variable; it + // always creates a new one. + if s.Tok == token.DEFINE { + if tk != nil { + fn.addLocalForIdent(s.Key.(*ast.Ident)) + } + if tv != nil { + fn.addLocalForIdent(s.Value.(*ast.Ident)) + } + } + + x := b.expr(fn, s.X) + + var k, v Value + var loop, done *BasicBlock + switch rt := x.Type().Underlying().(type) { + case *types.Slice, *types.Array, *types.Pointer: // *array + k, v, loop, done = b.rangeIndexed(fn, x, tv, s.For) + + case *types.Chan: + k, loop, done = b.rangeChan(fn, x, tk, s.For) + + case *types.Map, *types.Basic: // string + k, v, loop, done = b.rangeIter(fn, x, tk, tv, s.For) + + default: + panic("Cannot range over: " + rt.String()) + } + + // Evaluate both LHS expressions before we update either. + var kl, vl lvalue + if tk != nil { + kl = b.addr(fn, s.Key, false) // non-escaping + } + if tv != nil { + vl = b.addr(fn, s.Value, false) // non-escaping + } + if tk != nil { + kl.store(fn, k) + } + if tv != nil { + vl.store(fn, v) + } + + if label != nil { + label._break = done + label._continue = loop + } + + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _continue: loop, + } + b.stmt(fn, s.Body) + fn.targets = fn.targets.tail + emitJump(fn, loop) // back-edge + fn.currentBlock = done +} + +// stmt lowers statement s to SSA form, emitting code to fn. +func (b *builder) stmt(fn *Function, _s ast.Stmt) { + // The label of the current statement. If non-nil, its _goto + // target is always set; its _break and _continue are set only + // within the body of switch/typeswitch/select/for/range. + // It is effectively an additional default-nil parameter of stmt(). + var label *lblock +start: + switch s := _s.(type) { + case *ast.EmptyStmt: + // ignore. (Usually removed by gofmt.) + + case *ast.DeclStmt: // Con, Var or Typ + d := s.Decl.(*ast.GenDecl) + if d.Tok == token.VAR { + for _, spec := range d.Specs { + if vs, ok := spec.(*ast.ValueSpec); ok { + b.localValueSpec(fn, vs) + } + } + } + + case *ast.LabeledStmt: + label = fn.labelledBlock(s.Label) + emitJump(fn, label._goto) + fn.currentBlock = label._goto + _s = s.Stmt + goto start // effectively: tailcall stmt(fn, s.Stmt, label) + + case *ast.ExprStmt: + b.expr(fn, s.X) + + case *ast.SendStmt: + fn.emit(&Send{ + Chan: b.expr(fn, s.Chan), + X: emitConv(fn, b.expr(fn, s.Value), + fn.Pkg.typeOf(s.Chan).Underlying().(*types.Chan).Elem()), + pos: s.Arrow, + }) + + case *ast.IncDecStmt: + op := token.ADD + if s.Tok == token.DEC { + op = token.SUB + } + loc := b.addr(fn, s.X, false) + b.assignOp(fn, loc, NewConst(constant.MakeInt64(1), loc.typ()), op, s.Pos()) + + case *ast.AssignStmt: + switch s.Tok { + case token.ASSIGN, token.DEFINE: + b.assignStmt(fn, s.Lhs, s.Rhs, s.Tok == token.DEFINE) + + default: // +=, etc. + op := s.Tok + token.ADD - token.ADD_ASSIGN + b.assignOp(fn, b.addr(fn, s.Lhs[0], false), b.expr(fn, s.Rhs[0]), op, s.Pos()) + } + + case *ast.GoStmt: + // The "intrinsics" new/make/len/cap are forbidden here. + // panic is treated like an ordinary function call. + v := Go{pos: s.Go} + b.setCall(fn, s.Call, &v.Call) + fn.emit(&v) + + case *ast.DeferStmt: + // The "intrinsics" new/make/len/cap are forbidden here. + // panic is treated like an ordinary function call. + v := Defer{pos: s.Defer} + b.setCall(fn, s.Call, &v.Call) + fn.emit(&v) + + // A deferred call can cause recovery from panic, + // and control resumes at the Recover block. + createRecoverBlock(fn) + + case *ast.ReturnStmt: + var results []Value + if len(s.Results) == 1 && fn.Signature.Results().Len() > 1 { + // Return of one expression in a multi-valued function. + tuple := b.exprN(fn, s.Results[0]) + ttuple := tuple.Type().(*types.Tuple) + for i, n := 0, ttuple.Len(); i < n; i++ { + results = append(results, + emitConv(fn, emitExtract(fn, tuple, i), + fn.Signature.Results().At(i).Type())) + } + } else { + // 1:1 return, or no-arg return in non-void function. + for i, r := range s.Results { + v := emitConv(fn, b.expr(fn, r), fn.Signature.Results().At(i).Type()) + results = append(results, v) + } + } + if fn.namedResults != nil { + // Function has named result parameters (NRPs). + // Perform parallel assignment of return operands to NRPs. + for i, r := range results { + emitStore(fn, fn.namedResults[i], r, s.Return) + } + } + // Run function calls deferred in this + // function when explicitly returning from it. + fn.emit(new(RunDefers)) + if fn.namedResults != nil { + // Reload NRPs to form the result tuple. + results = results[:0] + for _, r := range fn.namedResults { + results = append(results, emitLoad(fn, r)) + } + } + fn.emit(&Return{Results: results, pos: s.Return}) + fn.currentBlock = fn.newBasicBlock("unreachable") + + case *ast.BranchStmt: + var block *BasicBlock + switch s.Tok { + case token.BREAK: + if s.Label != nil { + block = fn.labelledBlock(s.Label)._break + } else { + for t := fn.targets; t != nil && block == nil; t = t.tail { + block = t._break + } + } + + case token.CONTINUE: + if s.Label != nil { + block = fn.labelledBlock(s.Label)._continue + } else { + for t := fn.targets; t != nil && block == nil; t = t.tail { + block = t._continue + } + } + + case token.FALLTHROUGH: + for t := fn.targets; t != nil && block == nil; t = t.tail { + block = t._fallthrough + } + + case token.GOTO: + block = fn.labelledBlock(s.Label)._goto + } + emitJump(fn, block) + fn.currentBlock = fn.newBasicBlock("unreachable") + + case *ast.BlockStmt: + b.stmtList(fn, s.List) + + case *ast.IfStmt: + if s.Init != nil { + b.stmt(fn, s.Init) + } + then := fn.newBasicBlock("if.then") + done := fn.newBasicBlock("if.done") + els := done + if s.Else != nil { + els = fn.newBasicBlock("if.else") + } + b.cond(fn, s.Cond, then, els) + fn.currentBlock = then + b.stmt(fn, s.Body) + emitJump(fn, done) + + if s.Else != nil { + fn.currentBlock = els + b.stmt(fn, s.Else) + emitJump(fn, done) + } + + fn.currentBlock = done + + case *ast.SwitchStmt: + b.switchStmt(fn, s, label) + + case *ast.TypeSwitchStmt: + b.typeSwitchStmt(fn, s, label) + + case *ast.SelectStmt: + b.selectStmt(fn, s, label) + + case *ast.ForStmt: + b.forStmt(fn, s, label) + + case *ast.RangeStmt: + b.rangeStmt(fn, s, label) + + default: + panic(fmt.Sprintf("unexpected statement kind: %T", s)) + } +} + +// buildFunction builds SSA code for the body of function fn. Idempotent. +func (b *builder) buildFunction(fn *Function) { + if fn.Blocks != nil { + return // building already started + } + + var recvField *ast.FieldList + var body *ast.BlockStmt + var functype *ast.FuncType + switch n := fn.syntax.(type) { + case nil: + return // not a Go source function. (Synthetic, or from object file.) + case *ast.FuncDecl: + functype = n.Type + recvField = n.Recv + body = n.Body + case *ast.FuncLit: + functype = n.Type + body = n.Body + default: + panic(n) + } + + if body == nil { + // External function. + if fn.Params == nil { + // This condition ensures we add a non-empty + // params list once only, but we may attempt + // the degenerate empty case repeatedly. + // TODO(adonovan): opt: don't do that. + + // We set Function.Params even though there is no body + // code to reference them. This simplifies clients. + if recv := fn.Signature.Recv(); recv != nil { + fn.addParamObj(recv) + } + params := fn.Signature.Params() + for i, n := 0, params.Len(); i < n; i++ { + fn.addParamObj(params.At(i)) + } + } + return + } + if fn.Prog.mode&LogSource != 0 { + defer logStack("build function %s @ %s", fn, fn.Prog.Fset.Position(fn.pos))() + } + fn.startBody() + fn.createSyntacticParams(recvField, functype) + b.stmt(fn, body) + if cb := fn.currentBlock; cb != nil && (cb == fn.Blocks[0] || cb == fn.Recover || cb.Preds != nil) { + // Control fell off the end of the function's body block. + // + // Block optimizations eliminate the current block, if + // unreachable. It is a builder invariant that + // if this no-arg return is ill-typed for + // fn.Signature.Results, this block must be + // unreachable. The sanity checker checks this. + fn.emit(new(RunDefers)) + fn.emit(new(Return)) + } + fn.finishBody() +} + +// buildFuncDecl builds SSA code for the function or method declared +// by decl in package pkg. +// +func (b *builder) buildFuncDecl(pkg *Package, decl *ast.FuncDecl) { + id := decl.Name + if isBlankIdent(id) { + return // discard + } + fn := pkg.values[pkg.info.Defs[id]].(*Function) + if decl.Recv == nil && id.Name == "init" { + var v Call + v.Call.Value = fn + v.setType(types.NewTuple()) + pkg.init.emit(&v) + } + b.buildFunction(fn) +} + +// Build calls Package.Build for each package in prog. +// Building occurs in parallel unless the BuildSerially mode flag was set. +// +// Build is intended for whole-program analysis; a typical compiler +// need only build a single package. +// +// Build is idempotent and thread-safe. +// +func (prog *Program) Build() { + var wg sync.WaitGroup + for _, p := range prog.packages { + if prog.mode&BuildSerially != 0 { + p.Build() + } else { + wg.Add(1) + go func(p *Package) { + p.Build() + wg.Done() + }(p) + } + } + wg.Wait() +} + +// Build builds SSA code for all functions and vars in package p. +// +// Precondition: CreatePackage must have been called for all of p's +// direct imports (and hence its direct imports must have been +// error-free). +// +// Build is idempotent and thread-safe. +// +func (p *Package) Build() { p.buildOnce.Do(p.build) } + +func (p *Package) build() { + if p.info == nil { + return // synthetic package, e.g. "testmain" + } + + // Ensure we have runtime type info for all exported members. + // TODO(adonovan): ideally belongs in memberFromObject, but + // that would require package creation in topological order. + for name, mem := range p.Members { + if ast.IsExported(name) { + p.Prog.needMethodsOf(mem.Type()) + } + } + if p.Prog.mode&LogSource != 0 { + defer logStack("build %s", p)() + } + init := p.init + init.startBody() + + var done *BasicBlock + + if p.Prog.mode&BareInits == 0 { + // Make init() skip if package is already initialized. + initguard := p.Var("init$guard") + doinit := init.newBasicBlock("init.start") + done = init.newBasicBlock("init.done") + emitIf(init, emitLoad(init, initguard), done, doinit) + init.currentBlock = doinit + emitStore(init, initguard, vTrue, token.NoPos) + + // Call the init() function of each package we import. + for _, pkg := range p.Pkg.Imports() { + prereq := p.Prog.packages[pkg] + if prereq == nil { + panic(fmt.Sprintf("Package(%q).Build(): unsatisfied import: Program.CreatePackage(%q) was not called", p.Pkg.Path(), pkg.Path())) + } + var v Call + v.Call.Value = prereq.init + v.Call.pos = init.pos + v.setType(types.NewTuple()) + init.emit(&v) + } + } + + var b builder + + // Initialize package-level vars in correct order. + for _, varinit := range p.info.InitOrder { + if init.Prog.mode&LogSource != 0 { + fmt.Fprintf(os.Stderr, "build global initializer %v @ %s\n", + varinit.Lhs, p.Prog.Fset.Position(varinit.Rhs.Pos())) + } + if len(varinit.Lhs) == 1 { + // 1:1 initialization: var x, y = a(), b() + var lval lvalue + if v := varinit.Lhs[0]; v.Name() != "_" { + lval = &address{addr: p.values[v].(*Global), pos: v.Pos()} + } else { + lval = blank{} + } + b.assign(init, lval, varinit.Rhs, true, nil) + } else { + // n:1 initialization: var x, y := f() + tuple := b.exprN(init, varinit.Rhs) + for i, v := range varinit.Lhs { + if v.Name() == "_" { + continue + } + emitStore(init, p.values[v].(*Global), emitExtract(init, tuple, i), v.Pos()) + } + } + } + + // Build all package-level functions, init functions + // and methods, including unreachable/blank ones. + // We build them in source order, but it's not significant. + for _, file := range p.files { + for _, decl := range file.Decls { + if decl, ok := decl.(*ast.FuncDecl); ok { + b.buildFuncDecl(p, decl) + } + } + } + + // Finish up init(). + if p.Prog.mode&BareInits == 0 { + emitJump(init, done) + init.currentBlock = done + } + init.emit(new(Return)) + init.finishBody() + + p.info = nil // We no longer need ASTs or go/types deductions. + + if p.Prog.mode&SanityCheckFunctions != 0 { + sanityCheckPackage(p) + } +} + +// Like ObjectOf, but panics instead of returning nil. +// Only valid during p's create and build phases. +func (p *Package) objectOf(id *ast.Ident) types.Object { + if o := p.info.ObjectOf(id); o != nil { + return o + } + panic(fmt.Sprintf("no types.Object for ast.Ident %s @ %s", + id.Name, p.Prog.Fset.Position(id.Pos()))) +} + +// Like TypeOf, but panics instead of returning nil. +// Only valid during p's create and build phases. +func (p *Package) typeOf(e ast.Expr) types.Type { + if T := p.info.TypeOf(e); T != nil { + return T + } + panic(fmt.Sprintf("no type for %T @ %s", + e, p.Prog.Fset.Position(e.Pos()))) +} diff --git a/vendor/honnef.co/go/tools/ssa/const.go b/vendor/honnef.co/go/tools/ssa/const.go new file mode 100644 index 000000000..f95d9e114 --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/const.go @@ -0,0 +1,169 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines the Const SSA value type. + +import ( + "fmt" + "go/constant" + "go/token" + "go/types" + "strconv" +) + +// NewConst returns a new constant of the specified value and type. +// val must be valid according to the specification of Const.Value. +// +func NewConst(val constant.Value, typ types.Type) *Const { + return &Const{typ, val} +} + +// intConst returns an 'int' constant that evaluates to i. +// (i is an int64 in case the host is narrower than the target.) +func intConst(i int64) *Const { + return NewConst(constant.MakeInt64(i), tInt) +} + +// nilConst returns a nil constant of the specified type, which may +// be any reference type, including interfaces. +// +func nilConst(typ types.Type) *Const { + return NewConst(nil, typ) +} + +// stringConst returns a 'string' constant that evaluates to s. +func stringConst(s string) *Const { + return NewConst(constant.MakeString(s), tString) +} + +// zeroConst returns a new "zero" constant of the specified type, +// which must not be an array or struct type: the zero values of +// aggregates are well-defined but cannot be represented by Const. +// +func zeroConst(t types.Type) *Const { + switch t := t.(type) { + case *types.Basic: + switch { + case t.Info()&types.IsBoolean != 0: + return NewConst(constant.MakeBool(false), t) + case t.Info()&types.IsNumeric != 0: + return NewConst(constant.MakeInt64(0), t) + case t.Info()&types.IsString != 0: + return NewConst(constant.MakeString(""), t) + case t.Kind() == types.UnsafePointer: + fallthrough + case t.Kind() == types.UntypedNil: + return nilConst(t) + default: + panic(fmt.Sprint("zeroConst for unexpected type:", t)) + } + case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature: + return nilConst(t) + case *types.Named: + return NewConst(zeroConst(t.Underlying()).Value, t) + case *types.Array, *types.Struct, *types.Tuple: + panic(fmt.Sprint("zeroConst applied to aggregate:", t)) + } + panic(fmt.Sprint("zeroConst: unexpected ", t)) +} + +func (c *Const) RelString(from *types.Package) string { + var s string + if c.Value == nil { + s = "nil" + } else if c.Value.Kind() == constant.String { + s = constant.StringVal(c.Value) + const max = 20 + // TODO(adonovan): don't cut a rune in half. + if len(s) > max { + s = s[:max-3] + "..." // abbreviate + } + s = strconv.Quote(s) + } else { + s = c.Value.String() + } + return s + ":" + relType(c.Type(), from) +} + +func (c *Const) Name() string { + return c.RelString(nil) +} + +func (c *Const) String() string { + return c.Name() +} + +func (c *Const) Type() types.Type { + return c.typ +} + +func (c *Const) Referrers() *[]Instruction { + return nil +} + +func (c *Const) Parent() *Function { return nil } + +func (c *Const) Pos() token.Pos { + return token.NoPos +} + +// IsNil returns true if this constant represents a typed or untyped nil value. +func (c *Const) IsNil() bool { + return c.Value == nil +} + +// TODO(adonovan): move everything below into honnef.co/go/tools/ssa/interp. + +// Int64 returns the numeric value of this constant truncated to fit +// a signed 64-bit integer. +// +func (c *Const) Int64() int64 { + switch x := constant.ToInt(c.Value); x.Kind() { + case constant.Int: + if i, ok := constant.Int64Val(x); ok { + return i + } + return 0 + case constant.Float: + f, _ := constant.Float64Val(x) + return int64(f) + } + panic(fmt.Sprintf("unexpected constant value: %T", c.Value)) +} + +// Uint64 returns the numeric value of this constant truncated to fit +// an unsigned 64-bit integer. +// +func (c *Const) Uint64() uint64 { + switch x := constant.ToInt(c.Value); x.Kind() { + case constant.Int: + if u, ok := constant.Uint64Val(x); ok { + return u + } + return 0 + case constant.Float: + f, _ := constant.Float64Val(x) + return uint64(f) + } + panic(fmt.Sprintf("unexpected constant value: %T", c.Value)) +} + +// Float64 returns the numeric value of this constant truncated to fit +// a float64. +// +func (c *Const) Float64() float64 { + f, _ := constant.Float64Val(c.Value) + return f +} + +// Complex128 returns the complex value of this constant truncated to +// fit a complex128. +// +func (c *Const) Complex128() complex128 { + re, _ := constant.Float64Val(constant.Real(c.Value)) + im, _ := constant.Float64Val(constant.Imag(c.Value)) + return complex(re, im) +} diff --git a/vendor/honnef.co/go/tools/ssa/create.go b/vendor/honnef.co/go/tools/ssa/create.go new file mode 100644 index 000000000..85163a0c5 --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/create.go @@ -0,0 +1,270 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file implements the CREATE phase of SSA construction. +// See builder.go for explanation. + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "os" + "sync" + + "golang.org/x/tools/go/types/typeutil" +) + +// NewProgram returns a new SSA Program. +// +// mode controls diagnostics and checking during SSA construction. +// +func NewProgram(fset *token.FileSet, mode BuilderMode) *Program { + prog := &Program{ + Fset: fset, + imported: make(map[string]*Package), + packages: make(map[*types.Package]*Package), + thunks: make(map[selectionKey]*Function), + bounds: make(map[*types.Func]*Function), + mode: mode, + } + + h := typeutil.MakeHasher() // protected by methodsMu, in effect + prog.methodSets.SetHasher(h) + prog.canon.SetHasher(h) + + return prog +} + +// memberFromObject populates package pkg with a member for the +// typechecker object obj. +// +// For objects from Go source code, syntax is the associated syntax +// tree (for funcs and vars only); it will be used during the build +// phase. +// +func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { + name := obj.Name() + switch obj := obj.(type) { + case *types.Builtin: + if pkg.Pkg != types.Unsafe { + panic("unexpected builtin object: " + obj.String()) + } + + case *types.TypeName: + pkg.Members[name] = &Type{ + object: obj, + pkg: pkg, + } + + case *types.Const: + c := &NamedConst{ + object: obj, + Value: NewConst(obj.Val(), obj.Type()), + pkg: pkg, + } + pkg.values[obj] = c.Value + pkg.Members[name] = c + + case *types.Var: + g := &Global{ + Pkg: pkg, + name: name, + object: obj, + typ: types.NewPointer(obj.Type()), // address + pos: obj.Pos(), + } + pkg.values[obj] = g + pkg.Members[name] = g + + case *types.Func: + sig := obj.Type().(*types.Signature) + if sig.Recv() == nil && name == "init" { + pkg.ninit++ + name = fmt.Sprintf("init#%d", pkg.ninit) + } + fn := &Function{ + name: name, + object: obj, + Signature: sig, + syntax: syntax, + pos: obj.Pos(), + Pkg: pkg, + Prog: pkg.Prog, + } + if syntax == nil { + fn.Synthetic = "loaded from gc object file" + } + + pkg.values[obj] = fn + if sig.Recv() == nil { + pkg.Members[name] = fn // package-level function + } + + default: // (incl. *types.Package) + panic("unexpected Object type: " + obj.String()) + } +} + +// membersFromDecl populates package pkg with members for each +// typechecker object (var, func, const or type) associated with the +// specified decl. +// +func membersFromDecl(pkg *Package, decl ast.Decl) { + switch decl := decl.(type) { + case *ast.GenDecl: // import, const, type or var + switch decl.Tok { + case token.CONST: + for _, spec := range decl.Specs { + for _, id := range spec.(*ast.ValueSpec).Names { + if !isBlankIdent(id) { + memberFromObject(pkg, pkg.info.Defs[id], nil) + } + } + } + + case token.VAR: + for _, spec := range decl.Specs { + for _, id := range spec.(*ast.ValueSpec).Names { + if !isBlankIdent(id) { + memberFromObject(pkg, pkg.info.Defs[id], spec) + } + } + } + + case token.TYPE: + for _, spec := range decl.Specs { + id := spec.(*ast.TypeSpec).Name + if !isBlankIdent(id) { + memberFromObject(pkg, pkg.info.Defs[id], nil) + } + } + } + + case *ast.FuncDecl: + id := decl.Name + if !isBlankIdent(id) { + memberFromObject(pkg, pkg.info.Defs[id], decl) + } + } +} + +// CreatePackage constructs and returns an SSA Package from the +// specified type-checked, error-free file ASTs, and populates its +// Members mapping. +// +// importable determines whether this package should be returned by a +// subsequent call to ImportedPackage(pkg.Path()). +// +// The real work of building SSA form for each function is not done +// until a subsequent call to Package.Build(). +// +func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *types.Info, importable bool) *Package { + p := &Package{ + Prog: prog, + Members: make(map[string]Member), + values: make(map[types.Object]Value), + Pkg: pkg, + info: info, // transient (CREATE and BUILD phases) + files: files, // transient (CREATE and BUILD phases) + } + + // Add init() function. + p.init = &Function{ + name: "init", + Signature: new(types.Signature), + Synthetic: "package initializer", + Pkg: p, + Prog: prog, + } + p.Members[p.init.name] = p.init + + // CREATE phase. + // Allocate all package members: vars, funcs, consts and types. + if len(files) > 0 { + // Go source package. + for _, file := range files { + for _, decl := range file.Decls { + membersFromDecl(p, decl) + } + } + } else { + // GC-compiled binary package (or "unsafe") + // No code. + // No position information. + scope := p.Pkg.Scope() + for _, name := range scope.Names() { + obj := scope.Lookup(name) + memberFromObject(p, obj, nil) + if obj, ok := obj.(*types.TypeName); ok { + if named, ok := obj.Type().(*types.Named); ok { + for i, n := 0, named.NumMethods(); i < n; i++ { + memberFromObject(p, named.Method(i), nil) + } + } + } + } + } + + if prog.mode&BareInits == 0 { + // Add initializer guard variable. + initguard := &Global{ + Pkg: p, + name: "init$guard", + typ: types.NewPointer(tBool), + } + p.Members[initguard.Name()] = initguard + } + + if prog.mode&GlobalDebug != 0 { + p.SetDebugMode(true) + } + + if prog.mode&PrintPackages != 0 { + printMu.Lock() + p.WriteTo(os.Stdout) + printMu.Unlock() + } + + if importable { + prog.imported[p.Pkg.Path()] = p + } + prog.packages[p.Pkg] = p + + return p +} + +// printMu serializes printing of Packages/Functions to stdout. +var printMu sync.Mutex + +// AllPackages returns a new slice containing all packages in the +// program prog in unspecified order. +// +func (prog *Program) AllPackages() []*Package { + pkgs := make([]*Package, 0, len(prog.packages)) + for _, pkg := range prog.packages { + pkgs = append(pkgs, pkg) + } + return pkgs +} + +// ImportedPackage returns the importable Package whose PkgPath +// is path, or nil if no such Package has been created. +// +// A parameter to CreatePackage determines whether a package should be +// considered importable. For example, no import declaration can resolve +// to the ad-hoc main package created by 'go build foo.go'. +// +// TODO(adonovan): rethink this function and the "importable" concept; +// most packages are importable. This function assumes that all +// types.Package.Path values are unique within the ssa.Program, which is +// false---yet this function remains very convenient. +// Clients should use (*Program).Package instead where possible. +// SSA doesn't really need a string-keyed map of packages. +// +func (prog *Program) ImportedPackage(path string) *Package { + return prog.imported[path] +} diff --git a/vendor/honnef.co/go/tools/ssa/doc.go b/vendor/honnef.co/go/tools/ssa/doc.go new file mode 100644 index 000000000..0f71fda00 --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/doc.go @@ -0,0 +1,125 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ssa defines a representation of the elements of Go programs +// (packages, types, functions, variables and constants) using a +// static single-assignment (SSA) form intermediate representation +// (IR) for the bodies of functions. +// +// THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE. +// +// For an introduction to SSA form, see +// http://en.wikipedia.org/wiki/Static_single_assignment_form. +// This page provides a broader reading list: +// http://www.dcs.gla.ac.uk/~jsinger/ssa.html. +// +// The level of abstraction of the SSA form is intentionally close to +// the source language to facilitate construction of source analysis +// tools. It is not intended for machine code generation. +// +// All looping, branching and switching constructs are replaced with +// unstructured control flow. Higher-level control flow constructs +// such as multi-way branch can be reconstructed as needed; see +// ssautil.Switches() for an example. +// +// The simplest way to create the SSA representation of a package is +// to load typed syntax trees using golang.org/x/tools/go/packages, then +// invoke the ssautil.Packages helper function. See ExampleLoadPackages +// and ExampleWholeProgram for examples. +// The resulting ssa.Program contains all the packages and their +// members, but SSA code is not created for function bodies until a +// subsequent call to (*Package).Build or (*Program).Build. +// +// The builder initially builds a naive SSA form in which all local +// variables are addresses of stack locations with explicit loads and +// stores. Registerisation of eligible locals and φ-node insertion +// using dominance and dataflow are then performed as a second pass +// called "lifting" to improve the accuracy and performance of +// subsequent analyses; this pass can be skipped by setting the +// NaiveForm builder flag. +// +// The primary interfaces of this package are: +// +// - Member: a named member of a Go package. +// - Value: an expression that yields a value. +// - Instruction: a statement that consumes values and performs computation. +// - Node: a Value or Instruction (emphasizing its membership in the SSA value graph) +// +// A computation that yields a result implements both the Value and +// Instruction interfaces. The following table shows for each +// concrete type which of these interfaces it implements. +// +// Value? Instruction? Member? +// *Alloc ✔ ✔ +// *BinOp ✔ ✔ +// *Builtin ✔ +// *Call ✔ ✔ +// *ChangeInterface ✔ ✔ +// *ChangeType ✔ ✔ +// *Const ✔ +// *Convert ✔ ✔ +// *DebugRef ✔ +// *Defer ✔ +// *Extract ✔ ✔ +// *Field ✔ ✔ +// *FieldAddr ✔ ✔ +// *FreeVar ✔ +// *Function ✔ ✔ (func) +// *Global ✔ ✔ (var) +// *Go ✔ +// *If ✔ +// *Index ✔ ✔ +// *IndexAddr ✔ ✔ +// *Jump ✔ +// *Lookup ✔ ✔ +// *MakeChan ✔ ✔ +// *MakeClosure ✔ ✔ +// *MakeInterface ✔ ✔ +// *MakeMap ✔ ✔ +// *MakeSlice ✔ ✔ +// *MapUpdate ✔ +// *NamedConst ✔ (const) +// *Next ✔ ✔ +// *Panic ✔ +// *Parameter ✔ +// *Phi ✔ ✔ +// *Range ✔ ✔ +// *Return ✔ +// *RunDefers ✔ +// *Select ✔ ✔ +// *Send ✔ +// *Slice ✔ ✔ +// *Store ✔ +// *Type ✔ (type) +// *TypeAssert ✔ ✔ +// *UnOp ✔ ✔ +// +// Other key types in this package include: Program, Package, Function +// and BasicBlock. +// +// The program representation constructed by this package is fully +// resolved internally, i.e. it does not rely on the names of Values, +// Packages, Functions, Types or BasicBlocks for the correct +// interpretation of the program. Only the identities of objects and +// the topology of the SSA and type graphs are semantically +// significant. (There is one exception: Ids, used to identify field +// and method names, contain strings.) Avoidance of name-based +// operations simplifies the implementation of subsequent passes and +// can make them very efficient. Many objects are nonetheless named +// to aid in debugging, but it is not essential that the names be +// either accurate or unambiguous. The public API exposes a number of +// name-based maps for client convenience. +// +// The ssa/ssautil package provides various utilities that depend only +// on the public API of this package. +// +// TODO(adonovan): Consider the exceptional control-flow implications +// of defer and recover(). +// +// TODO(adonovan): write a how-to document for all the various cases +// of trying to determine corresponding elements across the four +// domains of source locations, ast.Nodes, types.Objects, +// ssa.Values/Instructions. +// +package ssa // import "honnef.co/go/tools/ssa" diff --git a/vendor/honnef.co/go/tools/ssa/dom.go b/vendor/honnef.co/go/tools/ssa/dom.go new file mode 100644 index 000000000..a036be87c --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/dom.go @@ -0,0 +1,343 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines algorithms related to dominance. + +// Dominator tree construction ---------------------------------------- +// +// We use the algorithm described in Lengauer & Tarjan. 1979. A fast +// algorithm for finding dominators in a flowgraph. +// http://doi.acm.org/10.1145/357062.357071 +// +// We also apply the optimizations to SLT described in Georgiadis et +// al, Finding Dominators in Practice, JGAA 2006, +// http://jgaa.info/accepted/2006/GeorgiadisTarjanWerneck2006.10.1.pdf +// to avoid the need for buckets of size > 1. + +import ( + "bytes" + "fmt" + "math/big" + "os" + "sort" +) + +// Idom returns the block that immediately dominates b: +// its parent in the dominator tree, if any. +// Neither the entry node (b.Index==0) nor recover node +// (b==b.Parent().Recover()) have a parent. +// +func (b *BasicBlock) Idom() *BasicBlock { return b.dom.idom } + +// Dominees returns the list of blocks that b immediately dominates: +// its children in the dominator tree. +// +func (b *BasicBlock) Dominees() []*BasicBlock { return b.dom.children } + +// Dominates reports whether b dominates c. +func (b *BasicBlock) Dominates(c *BasicBlock) bool { + return b.dom.pre <= c.dom.pre && c.dom.post <= b.dom.post +} + +type byDomPreorder []*BasicBlock + +func (a byDomPreorder) Len() int { return len(a) } +func (a byDomPreorder) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byDomPreorder) Less(i, j int) bool { return a[i].dom.pre < a[j].dom.pre } + +// DomPreorder returns a new slice containing the blocks of f in +// dominator tree preorder. +// +func (f *Function) DomPreorder() []*BasicBlock { + n := len(f.Blocks) + order := make(byDomPreorder, n) + copy(order, f.Blocks) + sort.Sort(order) + return order +} + +// domInfo contains a BasicBlock's dominance information. +type domInfo struct { + idom *BasicBlock // immediate dominator (parent in domtree) + children []*BasicBlock // nodes immediately dominated by this one + pre, post int32 // pre- and post-order numbering within domtree +} + +// ltState holds the working state for Lengauer-Tarjan algorithm +// (during which domInfo.pre is repurposed for CFG DFS preorder number). +type ltState struct { + // Each slice is indexed by b.Index. + sdom []*BasicBlock // b's semidominator + parent []*BasicBlock // b's parent in DFS traversal of CFG + ancestor []*BasicBlock // b's ancestor with least sdom +} + +// dfs implements the depth-first search part of the LT algorithm. +func (lt *ltState) dfs(v *BasicBlock, i int32, preorder []*BasicBlock) int32 { + preorder[i] = v + v.dom.pre = i // For now: DFS preorder of spanning tree of CFG + i++ + lt.sdom[v.Index] = v + lt.link(nil, v) + for _, w := range v.Succs { + if lt.sdom[w.Index] == nil { + lt.parent[w.Index] = v + i = lt.dfs(w, i, preorder) + } + } + return i +} + +// eval implements the EVAL part of the LT algorithm. +func (lt *ltState) eval(v *BasicBlock) *BasicBlock { + // TODO(adonovan): opt: do path compression per simple LT. + u := v + for ; lt.ancestor[v.Index] != nil; v = lt.ancestor[v.Index] { + if lt.sdom[v.Index].dom.pre < lt.sdom[u.Index].dom.pre { + u = v + } + } + return u +} + +// link implements the LINK part of the LT algorithm. +func (lt *ltState) link(v, w *BasicBlock) { + lt.ancestor[w.Index] = v +} + +// buildDomTree computes the dominator tree of f using the LT algorithm. +// Precondition: all blocks are reachable (e.g. optimizeBlocks has been run). +// +func buildDomTree(f *Function) { + // The step numbers refer to the original LT paper; the + // reordering is due to Georgiadis. + + // Clear any previous domInfo. + for _, b := range f.Blocks { + b.dom = domInfo{} + } + + n := len(f.Blocks) + // Allocate space for 5 contiguous [n]*BasicBlock arrays: + // sdom, parent, ancestor, preorder, buckets. + space := make([]*BasicBlock, 5*n) + lt := ltState{ + sdom: space[0:n], + parent: space[n : 2*n], + ancestor: space[2*n : 3*n], + } + + // Step 1. Number vertices by depth-first preorder. + preorder := space[3*n : 4*n] + root := f.Blocks[0] + prenum := lt.dfs(root, 0, preorder) + recover := f.Recover + if recover != nil { + lt.dfs(recover, prenum, preorder) + } + + buckets := space[4*n : 5*n] + copy(buckets, preorder) + + // In reverse preorder... + for i := int32(n) - 1; i > 0; i-- { + w := preorder[i] + + // Step 3. Implicitly define the immediate dominator of each node. + for v := buckets[i]; v != w; v = buckets[v.dom.pre] { + u := lt.eval(v) + if lt.sdom[u.Index].dom.pre < i { + v.dom.idom = u + } else { + v.dom.idom = w + } + } + + // Step 2. Compute the semidominators of all nodes. + lt.sdom[w.Index] = lt.parent[w.Index] + for _, v := range w.Preds { + u := lt.eval(v) + if lt.sdom[u.Index].dom.pre < lt.sdom[w.Index].dom.pre { + lt.sdom[w.Index] = lt.sdom[u.Index] + } + } + + lt.link(lt.parent[w.Index], w) + + if lt.parent[w.Index] == lt.sdom[w.Index] { + w.dom.idom = lt.parent[w.Index] + } else { + buckets[i] = buckets[lt.sdom[w.Index].dom.pre] + buckets[lt.sdom[w.Index].dom.pre] = w + } + } + + // The final 'Step 3' is now outside the loop. + for v := buckets[0]; v != root; v = buckets[v.dom.pre] { + v.dom.idom = root + } + + // Step 4. Explicitly define the immediate dominator of each + // node, in preorder. + for _, w := range preorder[1:] { + if w == root || w == recover { + w.dom.idom = nil + } else { + if w.dom.idom != lt.sdom[w.Index] { + w.dom.idom = w.dom.idom.dom.idom + } + // Calculate Children relation as inverse of Idom. + w.dom.idom.dom.children = append(w.dom.idom.dom.children, w) + } + } + + pre, post := numberDomTree(root, 0, 0) + if recover != nil { + numberDomTree(recover, pre, post) + } + + // printDomTreeDot(os.Stderr, f) // debugging + // printDomTreeText(os.Stderr, root, 0) // debugging + + if f.Prog.mode&SanityCheckFunctions != 0 { + sanityCheckDomTree(f) + } +} + +// numberDomTree sets the pre- and post-order numbers of a depth-first +// traversal of the dominator tree rooted at v. These are used to +// answer dominance queries in constant time. +// +func numberDomTree(v *BasicBlock, pre, post int32) (int32, int32) { + v.dom.pre = pre + pre++ + for _, child := range v.dom.children { + pre, post = numberDomTree(child, pre, post) + } + v.dom.post = post + post++ + return pre, post +} + +// Testing utilities ---------------------------------------- + +// sanityCheckDomTree checks the correctness of the dominator tree +// computed by the LT algorithm by comparing against the dominance +// relation computed by a naive Kildall-style forward dataflow +// analysis (Algorithm 10.16 from the "Dragon" book). +// +func sanityCheckDomTree(f *Function) { + n := len(f.Blocks) + + // D[i] is the set of blocks that dominate f.Blocks[i], + // represented as a bit-set of block indices. + D := make([]big.Int, n) + + one := big.NewInt(1) + + // all is the set of all blocks; constant. + var all big.Int + all.Set(one).Lsh(&all, uint(n)).Sub(&all, one) + + // Initialization. + for i, b := range f.Blocks { + if i == 0 || b == f.Recover { + // A root is dominated only by itself. + D[i].SetBit(&D[0], 0, 1) + } else { + // All other blocks are (initially) dominated + // by every block. + D[i].Set(&all) + } + } + + // Iteration until fixed point. + for changed := true; changed; { + changed = false + for i, b := range f.Blocks { + if i == 0 || b == f.Recover { + continue + } + // Compute intersection across predecessors. + var x big.Int + x.Set(&all) + for _, pred := range b.Preds { + x.And(&x, &D[pred.Index]) + } + x.SetBit(&x, i, 1) // a block always dominates itself. + if D[i].Cmp(&x) != 0 { + D[i].Set(&x) + changed = true + } + } + } + + // Check the entire relation. O(n^2). + // The Recover block (if any) must be treated specially so we skip it. + ok := true + for i := 0; i < n; i++ { + for j := 0; j < n; j++ { + b, c := f.Blocks[i], f.Blocks[j] + if c == f.Recover { + continue + } + actual := b.Dominates(c) + expected := D[j].Bit(i) == 1 + if actual != expected { + fmt.Fprintf(os.Stderr, "dominates(%s, %s)==%t, want %t\n", b, c, actual, expected) + ok = false + } + } + } + + preorder := f.DomPreorder() + for _, b := range f.Blocks { + if got := preorder[b.dom.pre]; got != b { + fmt.Fprintf(os.Stderr, "preorder[%d]==%s, want %s\n", b.dom.pre, got, b) + ok = false + } + } + + if !ok { + panic("sanityCheckDomTree failed for " + f.String()) + } + +} + +// Printing functions ---------------------------------------- + +// printDomTree prints the dominator tree as text, using indentation. +//lint:ignore U1000 used during debugging +func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) { + fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v) + for _, child := range v.dom.children { + printDomTreeText(buf, child, indent+1) + } +} + +// printDomTreeDot prints the dominator tree of f in AT&T GraphViz +// (.dot) format. +//lint:ignore U1000 used during debugging +func printDomTreeDot(buf *bytes.Buffer, f *Function) { + fmt.Fprintln(buf, "//", f) + fmt.Fprintln(buf, "digraph domtree {") + for i, b := range f.Blocks { + v := b.dom + fmt.Fprintf(buf, "\tn%d [label=\"%s (%d, %d)\",shape=\"rectangle\"];\n", v.pre, b, v.pre, v.post) + // TODO(adonovan): improve appearance of edges + // belonging to both dominator tree and CFG. + + // Dominator tree edge. + if i != 0 { + fmt.Fprintf(buf, "\tn%d -> n%d [style=\"solid\",weight=100];\n", v.idom.dom.pre, v.pre) + } + // CFG edges. + for _, pred := range b.Preds { + fmt.Fprintf(buf, "\tn%d -> n%d [style=\"dotted\",weight=0];\n", pred.dom.pre, v.pre) + } + } + fmt.Fprintln(buf, "}") +} diff --git a/vendor/honnef.co/go/tools/ssa/emit.go b/vendor/honnef.co/go/tools/ssa/emit.go new file mode 100644 index 000000000..6bf9ec32d --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/emit.go @@ -0,0 +1,469 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// Helpers for emitting SSA instructions. + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" +) + +// emitNew emits to f a new (heap Alloc) instruction allocating an +// object of type typ. pos is the optional source location. +// +func emitNew(f *Function, typ types.Type, pos token.Pos) *Alloc { + v := &Alloc{Heap: true} + v.setType(types.NewPointer(typ)) + v.setPos(pos) + f.emit(v) + return v +} + +// emitLoad emits to f an instruction to load the address addr into a +// new temporary, and returns the value so defined. +// +func emitLoad(f *Function, addr Value) *UnOp { + v := &UnOp{Op: token.MUL, X: addr} + v.setType(deref(addr.Type())) + f.emit(v) + return v +} + +// emitDebugRef emits to f a DebugRef pseudo-instruction associating +// expression e with value v. +// +func emitDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) { + if !f.debugInfo() { + return // debugging not enabled + } + if v == nil || e == nil { + panic("nil") + } + var obj types.Object + e = unparen(e) + if id, ok := e.(*ast.Ident); ok { + if isBlankIdent(id) { + return + } + obj = f.Pkg.objectOf(id) + switch obj.(type) { + case *types.Nil, *types.Const, *types.Builtin: + return + } + } + f.emit(&DebugRef{ + X: v, + Expr: e, + IsAddr: isAddr, + object: obj, + }) +} + +// emitArith emits to f code to compute the binary operation op(x, y) +// where op is an eager shift, logical or arithmetic operation. +// (Use emitCompare() for comparisons and Builder.logicalBinop() for +// non-eager operations.) +// +func emitArith(f *Function, op token.Token, x, y Value, t types.Type, pos token.Pos) Value { + switch op { + case token.SHL, token.SHR: + x = emitConv(f, x, t) + // y may be signed or an 'untyped' constant. + // TODO(adonovan): whence signed values? + if b, ok := y.Type().Underlying().(*types.Basic); ok && b.Info()&types.IsUnsigned == 0 { + y = emitConv(f, y, types.Typ[types.Uint64]) + } + + case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT: + x = emitConv(f, x, t) + y = emitConv(f, y, t) + + default: + panic("illegal op in emitArith: " + op.String()) + + } + v := &BinOp{ + Op: op, + X: x, + Y: y, + } + v.setPos(pos) + v.setType(t) + return f.emit(v) +} + +// emitCompare emits to f code compute the boolean result of +// comparison comparison 'x op y'. +// +func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value { + xt := x.Type().Underlying() + yt := y.Type().Underlying() + + // Special case to optimise a tagless SwitchStmt so that + // these are equivalent + // switch { case e: ...} + // switch true { case e: ... } + // if e==true { ... } + // even in the case when e's type is an interface. + // TODO(adonovan): opt: generalise to x==true, false!=y, etc. + if x == vTrue && op == token.EQL { + if yt, ok := yt.(*types.Basic); ok && yt.Info()&types.IsBoolean != 0 { + return y + } + } + + if types.Identical(xt, yt) { + // no conversion necessary + } else if _, ok := xt.(*types.Interface); ok { + y = emitConv(f, y, x.Type()) + } else if _, ok := yt.(*types.Interface); ok { + x = emitConv(f, x, y.Type()) + } else if _, ok := x.(*Const); ok { + x = emitConv(f, x, y.Type()) + } else if _, ok := y.(*Const); ok { + y = emitConv(f, y, x.Type()) + //lint:ignore SA9003 no-op + } else { + // other cases, e.g. channels. No-op. + } + + v := &BinOp{ + Op: op, + X: x, + Y: y, + } + v.setPos(pos) + v.setType(tBool) + return f.emit(v) +} + +// isValuePreserving returns true if a conversion from ut_src to +// ut_dst is value-preserving, i.e. just a change of type. +// Precondition: neither argument is a named type. +// +func isValuePreserving(ut_src, ut_dst types.Type) bool { + // Identical underlying types? + if structTypesIdentical(ut_dst, ut_src) { + return true + } + + switch ut_dst.(type) { + case *types.Chan: + // Conversion between channel types? + _, ok := ut_src.(*types.Chan) + return ok + + case *types.Pointer: + // Conversion between pointers with identical base types? + _, ok := ut_src.(*types.Pointer) + return ok + } + return false +} + +// emitConv emits to f code to convert Value val to exactly type typ, +// and returns the converted value. Implicit conversions are required +// by language assignability rules in assignments, parameter passing, +// etc. Conversions cannot fail dynamically. +// +func emitConv(f *Function, val Value, typ types.Type) Value { + t_src := val.Type() + + // Identical types? Conversion is a no-op. + if types.Identical(t_src, typ) { + return val + } + + ut_dst := typ.Underlying() + ut_src := t_src.Underlying() + + // Just a change of type, but not value or representation? + if isValuePreserving(ut_src, ut_dst) { + c := &ChangeType{X: val} + c.setType(typ) + return f.emit(c) + } + + // Conversion to, or construction of a value of, an interface type? + if _, ok := ut_dst.(*types.Interface); ok { + // Assignment from one interface type to another? + if _, ok := ut_src.(*types.Interface); ok { + c := &ChangeInterface{X: val} + c.setType(typ) + return f.emit(c) + } + + // Untyped nil constant? Return interface-typed nil constant. + if ut_src == tUntypedNil { + return nilConst(typ) + } + + // Convert (non-nil) "untyped" literals to their default type. + if t, ok := ut_src.(*types.Basic); ok && t.Info()&types.IsUntyped != 0 { + val = emitConv(f, val, DefaultType(ut_src)) + } + + f.Pkg.Prog.needMethodsOf(val.Type()) + mi := &MakeInterface{X: val} + mi.setType(typ) + return f.emit(mi) + } + + // Conversion of a compile-time constant value? + if c, ok := val.(*Const); ok { + if _, ok := ut_dst.(*types.Basic); ok || c.IsNil() { + // Conversion of a compile-time constant to + // another constant type results in a new + // constant of the destination type and + // (initially) the same abstract value. + // We don't truncate the value yet. + return NewConst(c.Value, typ) + } + + // We're converting from constant to non-constant type, + // e.g. string -> []byte/[]rune. + } + + // A representation-changing conversion? + // At least one of {ut_src,ut_dst} must be *Basic. + // (The other may be []byte or []rune.) + _, ok1 := ut_src.(*types.Basic) + _, ok2 := ut_dst.(*types.Basic) + if ok1 || ok2 { + c := &Convert{X: val} + c.setType(typ) + return f.emit(c) + } + + panic(fmt.Sprintf("in %s: cannot convert %s (%s) to %s", f, val, val.Type(), typ)) +} + +// emitStore emits to f an instruction to store value val at location +// addr, applying implicit conversions as required by assignability rules. +// +func emitStore(f *Function, addr, val Value, pos token.Pos) *Store { + s := &Store{ + Addr: addr, + Val: emitConv(f, val, deref(addr.Type())), + pos: pos, + } + f.emit(s) + return s +} + +// emitJump emits to f a jump to target, and updates the control-flow graph. +// Postcondition: f.currentBlock is nil. +// +func emitJump(f *Function, target *BasicBlock) { + b := f.currentBlock + b.emit(new(Jump)) + addEdge(b, target) + f.currentBlock = nil +} + +// emitIf emits to f a conditional jump to tblock or fblock based on +// cond, and updates the control-flow graph. +// Postcondition: f.currentBlock is nil. +// +func emitIf(f *Function, cond Value, tblock, fblock *BasicBlock) { + b := f.currentBlock + b.emit(&If{Cond: cond}) + addEdge(b, tblock) + addEdge(b, fblock) + f.currentBlock = nil +} + +// emitExtract emits to f an instruction to extract the index'th +// component of tuple. It returns the extracted value. +// +func emitExtract(f *Function, tuple Value, index int) Value { + e := &Extract{Tuple: tuple, Index: index} + e.setType(tuple.Type().(*types.Tuple).At(index).Type()) + return f.emit(e) +} + +// emitTypeAssert emits to f a type assertion value := x.(t) and +// returns the value. x.Type() must be an interface. +// +func emitTypeAssert(f *Function, x Value, t types.Type, pos token.Pos) Value { + a := &TypeAssert{X: x, AssertedType: t} + a.setPos(pos) + a.setType(t) + return f.emit(a) +} + +// emitTypeTest emits to f a type test value,ok := x.(t) and returns +// a (value, ok) tuple. x.Type() must be an interface. +// +func emitTypeTest(f *Function, x Value, t types.Type, pos token.Pos) Value { + a := &TypeAssert{ + X: x, + AssertedType: t, + CommaOk: true, + } + a.setPos(pos) + a.setType(types.NewTuple( + newVar("value", t), + varOk, + )) + return f.emit(a) +} + +// emitTailCall emits to f a function call in tail position. The +// caller is responsible for all fields of 'call' except its type. +// Intended for wrapper methods. +// Precondition: f does/will not use deferred procedure calls. +// Postcondition: f.currentBlock is nil. +// +func emitTailCall(f *Function, call *Call) { + tresults := f.Signature.Results() + nr := tresults.Len() + if nr == 1 { + call.typ = tresults.At(0).Type() + } else { + call.typ = tresults + } + tuple := f.emit(call) + var ret Return + switch nr { + case 0: + // no-op + case 1: + ret.Results = []Value{tuple} + default: + for i := 0; i < nr; i++ { + v := emitExtract(f, tuple, i) + // TODO(adonovan): in principle, this is required: + // v = emitConv(f, o.Type, f.Signature.Results[i].Type) + // but in practice emitTailCall is only used when + // the types exactly match. + ret.Results = append(ret.Results, v) + } + } + f.emit(&ret) + f.currentBlock = nil +} + +// emitImplicitSelections emits to f code to apply the sequence of +// implicit field selections specified by indices to base value v, and +// returns the selected value. +// +// If v is the address of a struct, the result will be the address of +// a field; if it is the value of a struct, the result will be the +// value of a field. +// +func emitImplicitSelections(f *Function, v Value, indices []int) Value { + for _, index := range indices { + fld := deref(v.Type()).Underlying().(*types.Struct).Field(index) + + if isPointer(v.Type()) { + instr := &FieldAddr{ + X: v, + Field: index, + } + instr.setType(types.NewPointer(fld.Type())) + v = f.emit(instr) + // Load the field's value iff indirectly embedded. + if isPointer(fld.Type()) { + v = emitLoad(f, v) + } + } else { + instr := &Field{ + X: v, + Field: index, + } + instr.setType(fld.Type()) + v = f.emit(instr) + } + } + return v +} + +// emitFieldSelection emits to f code to select the index'th field of v. +// +// If wantAddr, the input must be a pointer-to-struct and the result +// will be the field's address; otherwise the result will be the +// field's value. +// Ident id is used for position and debug info. +// +func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value { + fld := deref(v.Type()).Underlying().(*types.Struct).Field(index) + if isPointer(v.Type()) { + instr := &FieldAddr{ + X: v, + Field: index, + } + instr.setPos(id.Pos()) + instr.setType(types.NewPointer(fld.Type())) + v = f.emit(instr) + // Load the field's value iff we don't want its address. + if !wantAddr { + v = emitLoad(f, v) + } + } else { + instr := &Field{ + X: v, + Field: index, + } + instr.setPos(id.Pos()) + instr.setType(fld.Type()) + v = f.emit(instr) + } + emitDebugRef(f, id, v, wantAddr) + return v +} + +// zeroValue emits to f code to produce a zero value of type t, +// and returns it. +// +func zeroValue(f *Function, t types.Type) Value { + switch t.Underlying().(type) { + case *types.Struct, *types.Array: + return emitLoad(f, f.addLocal(t, token.NoPos)) + default: + return zeroConst(t) + } +} + +// createRecoverBlock emits to f a block of code to return after a +// recovered panic, and sets f.Recover to it. +// +// If f's result parameters are named, the code loads and returns +// their current values, otherwise it returns the zero values of their +// type. +// +// Idempotent. +// +func createRecoverBlock(f *Function) { + if f.Recover != nil { + return // already created + } + saved := f.currentBlock + + f.Recover = f.newBasicBlock("recover") + f.currentBlock = f.Recover + + var results []Value + if f.namedResults != nil { + // Reload NRPs to form value tuple. + for _, r := range f.namedResults { + results = append(results, emitLoad(f, r)) + } + } else { + R := f.Signature.Results() + for i, n := 0, R.Len(); i < n; i++ { + T := R.At(i).Type() + + // Return zero value of each result type. + results = append(results, zeroValue(f, T)) + } + } + f.emit(&Return{Results: results}) + + f.currentBlock = saved +} diff --git a/vendor/honnef.co/go/tools/ssa/func.go b/vendor/honnef.co/go/tools/ssa/func.go new file mode 100644 index 000000000..222eea641 --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/func.go @@ -0,0 +1,765 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file implements the Function and BasicBlock types. + +import ( + "bytes" + "fmt" + "go/ast" + "go/token" + "go/types" + "io" + "os" + "strings" +) + +// addEdge adds a control-flow graph edge from from to to. +func addEdge(from, to *BasicBlock) { + from.Succs = append(from.Succs, to) + to.Preds = append(to.Preds, from) +} + +// Parent returns the function that contains block b. +func (b *BasicBlock) Parent() *Function { return b.parent } + +// String returns a human-readable label of this block. +// It is not guaranteed unique within the function. +// +func (b *BasicBlock) String() string { + return fmt.Sprintf("%d", b.Index) +} + +// emit appends an instruction to the current basic block. +// If the instruction defines a Value, it is returned. +// +func (b *BasicBlock) emit(i Instruction) Value { + i.setBlock(b) + b.Instrs = append(b.Instrs, i) + v, _ := i.(Value) + return v +} + +// predIndex returns the i such that b.Preds[i] == c or panics if +// there is none. +func (b *BasicBlock) predIndex(c *BasicBlock) int { + for i, pred := range b.Preds { + if pred == c { + return i + } + } + panic(fmt.Sprintf("no edge %s -> %s", c, b)) +} + +// hasPhi returns true if b.Instrs contains φ-nodes. +func (b *BasicBlock) hasPhi() bool { + _, ok := b.Instrs[0].(*Phi) + return ok +} + +func (b *BasicBlock) Phis() []Instruction { + return b.phis() +} + +// phis returns the prefix of b.Instrs containing all the block's φ-nodes. +func (b *BasicBlock) phis() []Instruction { + for i, instr := range b.Instrs { + if _, ok := instr.(*Phi); !ok { + return b.Instrs[:i] + } + } + return nil // unreachable in well-formed blocks +} + +// replacePred replaces all occurrences of p in b's predecessor list with q. +// Ordinarily there should be at most one. +// +func (b *BasicBlock) replacePred(p, q *BasicBlock) { + for i, pred := range b.Preds { + if pred == p { + b.Preds[i] = q + } + } +} + +// replaceSucc replaces all occurrences of p in b's successor list with q. +// Ordinarily there should be at most one. +// +func (b *BasicBlock) replaceSucc(p, q *BasicBlock) { + for i, succ := range b.Succs { + if succ == p { + b.Succs[i] = q + } + } +} + +func (b *BasicBlock) RemovePred(p *BasicBlock) { + b.removePred(p) +} + +// removePred removes all occurrences of p in b's +// predecessor list and φ-nodes. +// Ordinarily there should be at most one. +// +func (b *BasicBlock) removePred(p *BasicBlock) { + phis := b.phis() + + // We must preserve edge order for φ-nodes. + j := 0 + for i, pred := range b.Preds { + if pred != p { + b.Preds[j] = b.Preds[i] + // Strike out φ-edge too. + for _, instr := range phis { + phi := instr.(*Phi) + phi.Edges[j] = phi.Edges[i] + } + j++ + } + } + // Nil out b.Preds[j:] and φ-edges[j:] to aid GC. + for i := j; i < len(b.Preds); i++ { + b.Preds[i] = nil + for _, instr := range phis { + instr.(*Phi).Edges[i] = nil + } + } + b.Preds = b.Preds[:j] + for _, instr := range phis { + phi := instr.(*Phi) + phi.Edges = phi.Edges[:j] + } +} + +// Destinations associated with unlabelled for/switch/select stmts. +// We push/pop one of these as we enter/leave each construct and for +// each BranchStmt we scan for the innermost target of the right type. +// +type targets struct { + tail *targets // rest of stack + _break *BasicBlock + _continue *BasicBlock + _fallthrough *BasicBlock +} + +// Destinations associated with a labelled block. +// We populate these as labels are encountered in forward gotos or +// labelled statements. +// +type lblock struct { + _goto *BasicBlock + _break *BasicBlock + _continue *BasicBlock +} + +// labelledBlock returns the branch target associated with the +// specified label, creating it if needed. +// +func (f *Function) labelledBlock(label *ast.Ident) *lblock { + lb := f.lblocks[label.Obj] + if lb == nil { + lb = &lblock{_goto: f.newBasicBlock(label.Name)} + if f.lblocks == nil { + f.lblocks = make(map[*ast.Object]*lblock) + } + f.lblocks[label.Obj] = lb + } + return lb +} + +// addParam adds a (non-escaping) parameter to f.Params of the +// specified name, type and source position. +// +func (f *Function) addParam(name string, typ types.Type, pos token.Pos) *Parameter { + v := &Parameter{ + name: name, + typ: typ, + pos: pos, + parent: f, + } + f.Params = append(f.Params, v) + return v +} + +func (f *Function) addParamObj(obj types.Object) *Parameter { + name := obj.Name() + if name == "" { + name = fmt.Sprintf("arg%d", len(f.Params)) + } + param := f.addParam(name, obj.Type(), obj.Pos()) + param.object = obj + return param +} + +// addSpilledParam declares a parameter that is pre-spilled to the +// stack; the function body will load/store the spilled location. +// Subsequent lifting will eliminate spills where possible. +// +func (f *Function) addSpilledParam(obj types.Object) { + param := f.addParamObj(obj) + spill := &Alloc{Comment: obj.Name()} + spill.setType(types.NewPointer(obj.Type())) + spill.setPos(obj.Pos()) + f.objects[obj] = spill + f.Locals = append(f.Locals, spill) + f.emit(spill) + f.emit(&Store{Addr: spill, Val: param}) +} + +// startBody initializes the function prior to generating SSA code for its body. +// Precondition: f.Type() already set. +// +func (f *Function) startBody() { + f.currentBlock = f.newBasicBlock("entry") + f.objects = make(map[types.Object]Value) // needed for some synthetics, e.g. init +} + +// createSyntacticParams populates f.Params and generates code (spills +// and named result locals) for all the parameters declared in the +// syntax. In addition it populates the f.objects mapping. +// +// Preconditions: +// f.startBody() was called. +// Postcondition: +// len(f.Params) == len(f.Signature.Params) + (f.Signature.Recv() ? 1 : 0) +// +func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.FuncType) { + // Receiver (at most one inner iteration). + if recv != nil { + for _, field := range recv.List { + for _, n := range field.Names { + f.addSpilledParam(f.Pkg.info.Defs[n]) + } + // Anonymous receiver? No need to spill. + if field.Names == nil { + f.addParamObj(f.Signature.Recv()) + } + } + } + + // Parameters. + if functype.Params != nil { + n := len(f.Params) // 1 if has recv, 0 otherwise + for _, field := range functype.Params.List { + for _, n := range field.Names { + f.addSpilledParam(f.Pkg.info.Defs[n]) + } + // Anonymous parameter? No need to spill. + if field.Names == nil { + f.addParamObj(f.Signature.Params().At(len(f.Params) - n)) + } + } + } + + // Named results. + if functype.Results != nil { + for _, field := range functype.Results.List { + // Implicit "var" decl of locals for named results. + for _, n := range field.Names { + f.namedResults = append(f.namedResults, f.addLocalForIdent(n)) + } + } + } +} + +// numberRegisters assigns numbers to all SSA registers +// (value-defining Instructions) in f, to aid debugging. +// (Non-Instruction Values are named at construction.) +// +func numberRegisters(f *Function) { + v := 0 + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + switch instr.(type) { + case Value: + instr.(interface { + setNum(int) + }).setNum(v) + v++ + } + } + } +} + +// buildReferrers populates the def/use information in all non-nil +// Value.Referrers slice. +// Precondition: all such slices are initially empty. +func buildReferrers(f *Function) { + var rands []*Value + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + rands = instr.Operands(rands[:0]) // recycle storage + for _, rand := range rands { + if r := *rand; r != nil { + if ref := r.Referrers(); ref != nil { + *ref = append(*ref, instr) + } + } + } + } + } +} + +// finishBody() finalizes the function after SSA code generation of its body. +func (f *Function) finishBody() { + f.objects = nil + f.currentBlock = nil + f.lblocks = nil + + // Don't pin the AST in memory (except in debug mode). + if n := f.syntax; n != nil && !f.debugInfo() { + f.syntax = extentNode{n.Pos(), n.End()} + } + + // Remove from f.Locals any Allocs that escape to the heap. + j := 0 + for _, l := range f.Locals { + if !l.Heap { + f.Locals[j] = l + j++ + } + } + // Nil out f.Locals[j:] to aid GC. + for i := j; i < len(f.Locals); i++ { + f.Locals[i] = nil + } + f.Locals = f.Locals[:j] + + // comma-ok receiving from a time.Tick channel will never return + // ok == false, so any branching on the value of ok can be + // replaced with an unconditional jump. This will primarily match + // `for range time.Tick(x)` loops, but it can also match + // user-written code. + for _, block := range f.Blocks { + if len(block.Instrs) < 3 { + continue + } + if len(block.Succs) != 2 { + continue + } + var instrs []*Instruction + for i, ins := range block.Instrs { + if _, ok := ins.(*DebugRef); ok { + continue + } + instrs = append(instrs, &block.Instrs[i]) + } + + for i, ins := range instrs { + unop, ok := (*ins).(*UnOp) + if !ok || unop.Op != token.ARROW { + continue + } + call, ok := unop.X.(*Call) + if !ok { + continue + } + if call.Common().IsInvoke() { + continue + } + + // OPT(dh): surely there is a more efficient way of doing + // this, than using FullName. We should already have + // resolved time.Tick somewhere? + v, ok := call.Common().Value.(*Function) + if !ok { + continue + } + t, ok := v.Object().(*types.Func) + if !ok { + continue + } + if t.FullName() != "time.Tick" { + continue + } + ex, ok := (*instrs[i+1]).(*Extract) + if !ok || ex.Tuple != unop || ex.Index != 1 { + continue + } + + ifstmt, ok := (*instrs[i+2]).(*If) + if !ok || ifstmt.Cond != ex { + continue + } + + *instrs[i+2] = NewJump(block) + succ := block.Succs[1] + block.Succs = block.Succs[0:1] + succ.RemovePred(block) + } + } + + optimizeBlocks(f) + + buildReferrers(f) + + buildDomTree(f) + + if f.Prog.mode&NaiveForm == 0 { + // For debugging pre-state of lifting pass: + // numberRegisters(f) + // f.WriteTo(os.Stderr) + lift(f) + } + + f.namedResults = nil // (used by lifting) + + numberRegisters(f) + + if f.Prog.mode&PrintFunctions != 0 { + printMu.Lock() + f.WriteTo(os.Stdout) + printMu.Unlock() + } + + if f.Prog.mode&SanityCheckFunctions != 0 { + mustSanityCheck(f, nil) + } +} + +func (f *Function) RemoveNilBlocks() { + f.removeNilBlocks() +} + +// removeNilBlocks eliminates nils from f.Blocks and updates each +// BasicBlock.Index. Use this after any pass that may delete blocks. +// +func (f *Function) removeNilBlocks() { + j := 0 + for _, b := range f.Blocks { + if b != nil { + b.Index = j + f.Blocks[j] = b + j++ + } + } + // Nil out f.Blocks[j:] to aid GC. + for i := j; i < len(f.Blocks); i++ { + f.Blocks[i] = nil + } + f.Blocks = f.Blocks[:j] +} + +// SetDebugMode sets the debug mode for package pkg. If true, all its +// functions will include full debug info. This greatly increases the +// size of the instruction stream, and causes Functions to depend upon +// the ASTs, potentially keeping them live in memory for longer. +// +func (pkg *Package) SetDebugMode(debug bool) { + // TODO(adonovan): do we want ast.File granularity? + pkg.debug = debug +} + +// debugInfo reports whether debug info is wanted for this function. +func (f *Function) debugInfo() bool { + return f.Pkg != nil && f.Pkg.debug +} + +// addNamedLocal creates a local variable, adds it to function f and +// returns it. Its name and type are taken from obj. Subsequent +// calls to f.lookup(obj) will return the same local. +// +func (f *Function) addNamedLocal(obj types.Object) *Alloc { + l := f.addLocal(obj.Type(), obj.Pos()) + l.Comment = obj.Name() + f.objects[obj] = l + return l +} + +func (f *Function) addLocalForIdent(id *ast.Ident) *Alloc { + return f.addNamedLocal(f.Pkg.info.Defs[id]) +} + +// addLocal creates an anonymous local variable of type typ, adds it +// to function f and returns it. pos is the optional source location. +// +func (f *Function) addLocal(typ types.Type, pos token.Pos) *Alloc { + v := &Alloc{} + v.setType(types.NewPointer(typ)) + v.setPos(pos) + f.Locals = append(f.Locals, v) + f.emit(v) + return v +} + +// lookup returns the address of the named variable identified by obj +// that is local to function f or one of its enclosing functions. +// If escaping, the reference comes from a potentially escaping pointer +// expression and the referent must be heap-allocated. +// +func (f *Function) lookup(obj types.Object, escaping bool) Value { + if v, ok := f.objects[obj]; ok { + if alloc, ok := v.(*Alloc); ok && escaping { + alloc.Heap = true + } + return v // function-local var (address) + } + + // Definition must be in an enclosing function; + // plumb it through intervening closures. + if f.parent == nil { + panic("no ssa.Value for " + obj.String()) + } + outer := f.parent.lookup(obj, true) // escaping + v := &FreeVar{ + name: obj.Name(), + typ: outer.Type(), + pos: outer.Pos(), + outer: outer, + parent: f, + } + f.objects[obj] = v + f.FreeVars = append(f.FreeVars, v) + return v +} + +// emit emits the specified instruction to function f. +func (f *Function) emit(instr Instruction) Value { + return f.currentBlock.emit(instr) +} + +// RelString returns the full name of this function, qualified by +// package name, receiver type, etc. +// +// The specific formatting rules are not guaranteed and may change. +// +// Examples: +// "math.IsNaN" // a package-level function +// "(*bytes.Buffer).Bytes" // a declared method or a wrapper +// "(*bytes.Buffer).Bytes$thunk" // thunk (func wrapping method; receiver is param 0) +// "(*bytes.Buffer).Bytes$bound" // bound (func wrapping method; receiver supplied by closure) +// "main.main$1" // an anonymous function in main +// "main.init#1" // a declared init function +// "main.init" // the synthesized package initializer +// +// When these functions are referred to from within the same package +// (i.e. from == f.Pkg.Object), they are rendered without the package path. +// For example: "IsNaN", "(*Buffer).Bytes", etc. +// +// All non-synthetic functions have distinct package-qualified names. +// (But two methods may have the same name "(T).f" if one is a synthetic +// wrapper promoting a non-exported method "f" from another package; in +// that case, the strings are equal but the identifiers "f" are distinct.) +// +func (f *Function) RelString(from *types.Package) string { + // Anonymous? + if f.parent != nil { + // An anonymous function's Name() looks like "parentName$1", + // but its String() should include the type/package/etc. + parent := f.parent.RelString(from) + for i, anon := range f.parent.AnonFuncs { + if anon == f { + return fmt.Sprintf("%s$%d", parent, 1+i) + } + } + + return f.name // should never happen + } + + // Method (declared or wrapper)? + if recv := f.Signature.Recv(); recv != nil { + return f.relMethod(from, recv.Type()) + } + + // Thunk? + if f.method != nil { + return f.relMethod(from, f.method.Recv()) + } + + // Bound? + if len(f.FreeVars) == 1 && strings.HasSuffix(f.name, "$bound") { + return f.relMethod(from, f.FreeVars[0].Type()) + } + + // Package-level function? + // Prefix with package name for cross-package references only. + if p := f.pkg(); p != nil && p != from { + return fmt.Sprintf("%s.%s", p.Path(), f.name) + } + + // Unknown. + return f.name +} + +func (f *Function) relMethod(from *types.Package, recv types.Type) string { + return fmt.Sprintf("(%s).%s", relType(recv, from), f.name) +} + +// writeSignature writes to buf the signature sig in declaration syntax. +func writeSignature(buf *bytes.Buffer, from *types.Package, name string, sig *types.Signature, params []*Parameter) { + buf.WriteString("func ") + if recv := sig.Recv(); recv != nil { + buf.WriteString("(") + if n := params[0].Name(); n != "" { + buf.WriteString(n) + buf.WriteString(" ") + } + types.WriteType(buf, params[0].Type(), types.RelativeTo(from)) + buf.WriteString(") ") + } + buf.WriteString(name) + types.WriteSignature(buf, sig, types.RelativeTo(from)) +} + +func (f *Function) pkg() *types.Package { + if f.Pkg != nil { + return f.Pkg.Pkg + } + return nil +} + +var _ io.WriterTo = (*Function)(nil) // *Function implements io.Writer + +func (f *Function) WriteTo(w io.Writer) (int64, error) { + var buf bytes.Buffer + WriteFunction(&buf, f) + n, err := w.Write(buf.Bytes()) + return int64(n), err +} + +// WriteFunction writes to buf a human-readable "disassembly" of f. +func WriteFunction(buf *bytes.Buffer, f *Function) { + fmt.Fprintf(buf, "# Name: %s\n", f.String()) + if f.Pkg != nil { + fmt.Fprintf(buf, "# Package: %s\n", f.Pkg.Pkg.Path()) + } + if syn := f.Synthetic; syn != "" { + fmt.Fprintln(buf, "# Synthetic:", syn) + } + if pos := f.Pos(); pos.IsValid() { + fmt.Fprintf(buf, "# Location: %s\n", f.Prog.Fset.Position(pos)) + } + + if f.parent != nil { + fmt.Fprintf(buf, "# Parent: %s\n", f.parent.Name()) + } + + if f.Recover != nil { + fmt.Fprintf(buf, "# Recover: %s\n", f.Recover) + } + + from := f.pkg() + + if f.FreeVars != nil { + buf.WriteString("# Free variables:\n") + for i, fv := range f.FreeVars { + fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, fv.Name(), relType(fv.Type(), from)) + } + } + + if len(f.Locals) > 0 { + buf.WriteString("# Locals:\n") + for i, l := range f.Locals { + fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, l.Name(), relType(deref(l.Type()), from)) + } + } + writeSignature(buf, from, f.Name(), f.Signature, f.Params) + buf.WriteString(":\n") + + if f.Blocks == nil { + buf.WriteString("\t(external)\n") + } + + // NB. column calculations are confused by non-ASCII + // characters and assume 8-space tabs. + const punchcard = 80 // for old time's sake. + const tabwidth = 8 + for _, b := range f.Blocks { + if b == nil { + // Corrupt CFG. + fmt.Fprintf(buf, ".nil:\n") + continue + } + n, _ := fmt.Fprintf(buf, "%d:", b.Index) + bmsg := fmt.Sprintf("%s P:%d S:%d", b.Comment, len(b.Preds), len(b.Succs)) + fmt.Fprintf(buf, "%*s%s\n", punchcard-1-n-len(bmsg), "", bmsg) + + if false { // CFG debugging + fmt.Fprintf(buf, "\t# CFG: %s --> %s --> %s\n", b.Preds, b, b.Succs) + } + for _, instr := range b.Instrs { + buf.WriteString("\t") + switch v := instr.(type) { + case Value: + l := punchcard - tabwidth + // Left-align the instruction. + if name := v.Name(); name != "" { + n, _ := fmt.Fprintf(buf, "%s = ", name) + l -= n + } + n, _ := buf.WriteString(instr.String()) + l -= n + // Right-align the type if there's space. + if t := v.Type(); t != nil { + buf.WriteByte(' ') + ts := relType(t, from) + l -= len(ts) + len(" ") // (spaces before and after type) + if l > 0 { + fmt.Fprintf(buf, "%*s", l, "") + } + buf.WriteString(ts) + } + case nil: + // Be robust against bad transforms. + buf.WriteString("") + default: + buf.WriteString(instr.String()) + } + buf.WriteString("\n") + } + } + fmt.Fprintf(buf, "\n") +} + +// newBasicBlock adds to f a new basic block and returns it. It does +// not automatically become the current block for subsequent calls to emit. +// comment is an optional string for more readable debugging output. +// +func (f *Function) newBasicBlock(comment string) *BasicBlock { + b := &BasicBlock{ + Index: len(f.Blocks), + Comment: comment, + parent: f, + } + b.Succs = b.succs2[:0] + f.Blocks = append(f.Blocks, b) + return b +} + +// NewFunction returns a new synthetic Function instance belonging to +// prog, with its name and signature fields set as specified. +// +// The caller is responsible for initializing the remaining fields of +// the function object, e.g. Pkg, Params, Blocks. +// +// It is practically impossible for clients to construct well-formed +// SSA functions/packages/programs directly, so we assume this is the +// job of the Builder alone. NewFunction exists to provide clients a +// little flexibility. For example, analysis tools may wish to +// construct fake Functions for the root of the callgraph, a fake +// "reflect" package, etc. +// +// TODO(adonovan): think harder about the API here. +// +func (prog *Program) NewFunction(name string, sig *types.Signature, provenance string) *Function { + return &Function{Prog: prog, name: name, Signature: sig, Synthetic: provenance} +} + +type extentNode [2]token.Pos + +func (n extentNode) Pos() token.Pos { return n[0] } +func (n extentNode) End() token.Pos { return n[1] } + +// Syntax returns an ast.Node whose Pos/End methods provide the +// lexical extent of the function if it was defined by Go source code +// (f.Synthetic==""), or nil otherwise. +// +// If f was built with debug information (see Package.SetDebugRef), +// the result is the *ast.FuncDecl or *ast.FuncLit that declared the +// function. Otherwise, it is an opaque Node providing only position +// information; this avoids pinning the AST in memory. +// +func (f *Function) Syntax() ast.Node { return f.syntax } diff --git a/vendor/honnef.co/go/tools/ssa/identical.go b/vendor/honnef.co/go/tools/ssa/identical.go new file mode 100644 index 000000000..53cbee107 --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/identical.go @@ -0,0 +1,7 @@ +// +build go1.8 + +package ssa + +import "go/types" + +var structTypesIdentical = types.IdenticalIgnoreTags diff --git a/vendor/honnef.co/go/tools/ssa/identical_17.go b/vendor/honnef.co/go/tools/ssa/identical_17.go new file mode 100644 index 000000000..da89d3339 --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/identical_17.go @@ -0,0 +1,7 @@ +// +build !go1.8 + +package ssa + +import "go/types" + +var structTypesIdentical = types.Identical diff --git a/vendor/honnef.co/go/tools/ssa/lift.go b/vendor/honnef.co/go/tools/ssa/lift.go new file mode 100644 index 000000000..531358fa3 --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/lift.go @@ -0,0 +1,657 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines the lifting pass which tries to "lift" Alloc +// cells (new/local variables) into SSA registers, replacing loads +// with the dominating stored value, eliminating loads and stores, and +// inserting φ-nodes as needed. + +// Cited papers and resources: +// +// Ron Cytron et al. 1991. Efficiently computing SSA form... +// http://doi.acm.org/10.1145/115372.115320 +// +// Cooper, Harvey, Kennedy. 2001. A Simple, Fast Dominance Algorithm. +// Software Practice and Experience 2001, 4:1-10. +// http://www.hipersoft.rice.edu/grads/publications/dom14.pdf +// +// Daniel Berlin, llvmdev mailing list, 2012. +// http://lists.cs.uiuc.edu/pipermail/llvmdev/2012-January/046638.html +// (Be sure to expand the whole thread.) + +// TODO(adonovan): opt: there are many optimizations worth evaluating, and +// the conventional wisdom for SSA construction is that a simple +// algorithm well engineered often beats those of better asymptotic +// complexity on all but the most egregious inputs. +// +// Danny Berlin suggests that the Cooper et al. algorithm for +// computing the dominance frontier is superior to Cytron et al. +// Furthermore he recommends that rather than computing the DF for the +// whole function then renaming all alloc cells, it may be cheaper to +// compute the DF for each alloc cell separately and throw it away. +// +// Consider exploiting liveness information to avoid creating dead +// φ-nodes which we then immediately remove. +// +// Also see many other "TODO: opt" suggestions in the code. + +import ( + "fmt" + "go/token" + "go/types" + "math/big" + "os" +) + +// If true, show diagnostic information at each step of lifting. +// Very verbose. +const debugLifting = false + +// domFrontier maps each block to the set of blocks in its dominance +// frontier. The outer slice is conceptually a map keyed by +// Block.Index. The inner slice is conceptually a set, possibly +// containing duplicates. +// +// TODO(adonovan): opt: measure impact of dups; consider a packed bit +// representation, e.g. big.Int, and bitwise parallel operations for +// the union step in the Children loop. +// +// domFrontier's methods mutate the slice's elements but not its +// length, so their receivers needn't be pointers. +// +type domFrontier [][]*BasicBlock + +func (df domFrontier) add(u, v *BasicBlock) { + p := &df[u.Index] + *p = append(*p, v) +} + +// build builds the dominance frontier df for the dominator (sub)tree +// rooted at u, using the Cytron et al. algorithm. +// +// TODO(adonovan): opt: consider Berlin approach, computing pruned SSA +// by pruning the entire IDF computation, rather than merely pruning +// the DF -> IDF step. +func (df domFrontier) build(u *BasicBlock) { + // Encounter each node u in postorder of dom tree. + for _, child := range u.dom.children { + df.build(child) + } + for _, vb := range u.Succs { + if v := vb.dom; v.idom != u { + df.add(u, vb) + } + } + for _, w := range u.dom.children { + for _, vb := range df[w.Index] { + // TODO(adonovan): opt: use word-parallel bitwise union. + if v := vb.dom; v.idom != u { + df.add(u, vb) + } + } + } +} + +func buildDomFrontier(fn *Function) domFrontier { + df := make(domFrontier, len(fn.Blocks)) + df.build(fn.Blocks[0]) + if fn.Recover != nil { + df.build(fn.Recover) + } + return df +} + +func removeInstr(refs []Instruction, instr Instruction) []Instruction { + i := 0 + for _, ref := range refs { + if ref == instr { + continue + } + refs[i] = ref + i++ + } + for j := i; j != len(refs); j++ { + refs[j] = nil // aid GC + } + return refs[:i] +} + +// lift replaces local and new Allocs accessed only with +// load/store by SSA registers, inserting φ-nodes where necessary. +// The result is a program in classical pruned SSA form. +// +// Preconditions: +// - fn has no dead blocks (blockopt has run). +// - Def/use info (Operands and Referrers) is up-to-date. +// - The dominator tree is up-to-date. +// +func lift(fn *Function) { + // TODO(adonovan): opt: lots of little optimizations may be + // worthwhile here, especially if they cause us to avoid + // buildDomFrontier. For example: + // + // - Alloc never loaded? Eliminate. + // - Alloc never stored? Replace all loads with a zero constant. + // - Alloc stored once? Replace loads with dominating store; + // don't forget that an Alloc is itself an effective store + // of zero. + // - Alloc used only within a single block? + // Use degenerate algorithm avoiding φ-nodes. + // - Consider synergy with scalar replacement of aggregates (SRA). + // e.g. *(&x.f) where x is an Alloc. + // Perhaps we'd get better results if we generated this as x.f + // i.e. Field(x, .f) instead of Load(FieldIndex(x, .f)). + // Unclear. + // + // But we will start with the simplest correct code. + df := buildDomFrontier(fn) + + if debugLifting { + title := false + for i, blocks := range df { + if blocks != nil { + if !title { + fmt.Fprintf(os.Stderr, "Dominance frontier of %s:\n", fn) + title = true + } + fmt.Fprintf(os.Stderr, "\t%s: %s\n", fn.Blocks[i], blocks) + } + } + } + + newPhis := make(newPhiMap) + + // During this pass we will replace some BasicBlock.Instrs + // (allocs, loads and stores) with nil, keeping a count in + // BasicBlock.gaps. At the end we will reset Instrs to the + // concatenation of all non-dead newPhis and non-nil Instrs + // for the block, reusing the original array if space permits. + + // While we're here, we also eliminate 'rundefers' + // instructions in functions that contain no 'defer' + // instructions. + usesDefer := false + + // A counter used to generate ~unique ids for Phi nodes, as an + // aid to debugging. We use large numbers to make them highly + // visible. All nodes are renumbered later. + fresh := 1000 + + // Determine which allocs we can lift and number them densely. + // The renaming phase uses this numbering for compact maps. + numAllocs := 0 + for _, b := range fn.Blocks { + b.gaps = 0 + b.rundefers = 0 + for _, instr := range b.Instrs { + switch instr := instr.(type) { + case *Alloc: + index := -1 + if liftAlloc(df, instr, newPhis, &fresh) { + index = numAllocs + numAllocs++ + } + instr.index = index + case *Defer: + usesDefer = true + case *RunDefers: + b.rundefers++ + } + } + } + + // renaming maps an alloc (keyed by index) to its replacement + // value. Initially the renaming contains nil, signifying the + // zero constant of the appropriate type; we construct the + // Const lazily at most once on each path through the domtree. + // TODO(adonovan): opt: cache per-function not per subtree. + renaming := make([]Value, numAllocs) + + // Renaming. + rename(fn.Blocks[0], renaming, newPhis) + + // Eliminate dead φ-nodes. + removeDeadPhis(fn.Blocks, newPhis) + + // Prepend remaining live φ-nodes to each block. + for _, b := range fn.Blocks { + nps := newPhis[b] + j := len(nps) + + rundefersToKill := b.rundefers + if usesDefer { + rundefersToKill = 0 + } + + if j+b.gaps+rundefersToKill == 0 { + continue // fast path: no new phis or gaps + } + + // Compact nps + non-nil Instrs into a new slice. + // TODO(adonovan): opt: compact in situ (rightwards) + // if Instrs has sufficient space or slack. + dst := make([]Instruction, len(b.Instrs)+j-b.gaps-rundefersToKill) + for i, np := range nps { + dst[i] = np.phi + } + for _, instr := range b.Instrs { + if instr == nil { + continue + } + if !usesDefer { + if _, ok := instr.(*RunDefers); ok { + continue + } + } + dst[j] = instr + j++ + } + b.Instrs = dst + } + + // Remove any fn.Locals that were lifted. + j := 0 + for _, l := range fn.Locals { + if l.index < 0 { + fn.Locals[j] = l + j++ + } + } + // Nil out fn.Locals[j:] to aid GC. + for i := j; i < len(fn.Locals); i++ { + fn.Locals[i] = nil + } + fn.Locals = fn.Locals[:j] +} + +// removeDeadPhis removes φ-nodes not transitively needed by a +// non-Phi, non-DebugRef instruction. +func removeDeadPhis(blocks []*BasicBlock, newPhis newPhiMap) { + // First pass: find the set of "live" φ-nodes: those reachable + // from some non-Phi instruction. + // + // We compute reachability in reverse, starting from each φ, + // rather than forwards, starting from each live non-Phi + // instruction, because this way visits much less of the + // Value graph. + livePhis := make(map[*Phi]bool) + for _, npList := range newPhis { + for _, np := range npList { + phi := np.phi + if !livePhis[phi] && phiHasDirectReferrer(phi) { + markLivePhi(livePhis, phi) + } + } + } + + // Existing φ-nodes due to && and || operators + // are all considered live (see Go issue 19622). + for _, b := range blocks { + for _, phi := range b.phis() { + markLivePhi(livePhis, phi.(*Phi)) + } + } + + // Second pass: eliminate unused phis from newPhis. + for block, npList := range newPhis { + j := 0 + for _, np := range npList { + if livePhis[np.phi] { + npList[j] = np + j++ + } else { + // discard it, first removing it from referrers + for _, val := range np.phi.Edges { + if refs := val.Referrers(); refs != nil { + *refs = removeInstr(*refs, np.phi) + } + } + np.phi.block = nil + } + } + newPhis[block] = npList[:j] + } +} + +// markLivePhi marks phi, and all φ-nodes transitively reachable via +// its Operands, live. +func markLivePhi(livePhis map[*Phi]bool, phi *Phi) { + livePhis[phi] = true + for _, rand := range phi.Operands(nil) { + if q, ok := (*rand).(*Phi); ok { + if !livePhis[q] { + markLivePhi(livePhis, q) + } + } + } +} + +// phiHasDirectReferrer reports whether phi is directly referred to by +// a non-Phi instruction. Such instructions are the +// roots of the liveness traversal. +func phiHasDirectReferrer(phi *Phi) bool { + for _, instr := range *phi.Referrers() { + if _, ok := instr.(*Phi); !ok { + return true + } + } + return false +} + +type BlockSet struct{ big.Int } // (inherit methods from Int) + +// add adds b to the set and returns true if the set changed. +func (s *BlockSet) Add(b *BasicBlock) bool { + i := b.Index + if s.Bit(i) != 0 { + return false + } + s.SetBit(&s.Int, i, 1) + return true +} + +func (s *BlockSet) Has(b *BasicBlock) bool { + return s.Bit(b.Index) == 1 +} + +// take removes an arbitrary element from a set s and +// returns its index, or returns -1 if empty. +func (s *BlockSet) Take() int { + l := s.BitLen() + for i := 0; i < l; i++ { + if s.Bit(i) == 1 { + s.SetBit(&s.Int, i, 0) + return i + } + } + return -1 +} + +// newPhi is a pair of a newly introduced φ-node and the lifted Alloc +// it replaces. +type newPhi struct { + phi *Phi + alloc *Alloc +} + +// newPhiMap records for each basic block, the set of newPhis that +// must be prepended to the block. +type newPhiMap map[*BasicBlock][]newPhi + +// liftAlloc determines whether alloc can be lifted into registers, +// and if so, it populates newPhis with all the φ-nodes it may require +// and returns true. +// +// fresh is a source of fresh ids for phi nodes. +// +func liftAlloc(df domFrontier, alloc *Alloc, newPhis newPhiMap, fresh *int) bool { + // Don't lift aggregates into registers, because we don't have + // a way to express their zero-constants. + switch deref(alloc.Type()).Underlying().(type) { + case *types.Array, *types.Struct: + return false + } + + // Don't lift named return values in functions that defer + // calls that may recover from panic. + if fn := alloc.Parent(); fn.Recover != nil { + for _, nr := range fn.namedResults { + if nr == alloc { + return false + } + } + } + + // Compute defblocks, the set of blocks containing a + // definition of the alloc cell. + var defblocks BlockSet + for _, instr := range *alloc.Referrers() { + // Bail out if we discover the alloc is not liftable; + // the only operations permitted to use the alloc are + // loads/stores into the cell, and DebugRef. + switch instr := instr.(type) { + case *Store: + if instr.Val == alloc { + return false // address used as value + } + if instr.Addr != alloc { + panic("Alloc.Referrers is inconsistent") + } + defblocks.Add(instr.Block()) + case *UnOp: + if instr.Op != token.MUL { + return false // not a load + } + if instr.X != alloc { + panic("Alloc.Referrers is inconsistent") + } + case *DebugRef: + // ok + default: + return false // some other instruction + } + } + // The Alloc itself counts as a (zero) definition of the cell. + defblocks.Add(alloc.Block()) + + if debugLifting { + fmt.Fprintln(os.Stderr, "\tlifting ", alloc, alloc.Name()) + } + + fn := alloc.Parent() + + // Φ-insertion. + // + // What follows is the body of the main loop of the insert-φ + // function described by Cytron et al, but instead of using + // counter tricks, we just reset the 'hasAlready' and 'work' + // sets each iteration. These are bitmaps so it's pretty cheap. + // + // TODO(adonovan): opt: recycle slice storage for W, + // hasAlready, defBlocks across liftAlloc calls. + var hasAlready BlockSet + + // Initialize W and work to defblocks. + var work BlockSet = defblocks // blocks seen + var W BlockSet // blocks to do + W.Set(&defblocks.Int) + + // Traverse iterated dominance frontier, inserting φ-nodes. + for i := W.Take(); i != -1; i = W.Take() { + u := fn.Blocks[i] + for _, v := range df[u.Index] { + if hasAlready.Add(v) { + // Create φ-node. + // It will be prepended to v.Instrs later, if needed. + phi := &Phi{ + Edges: make([]Value, len(v.Preds)), + Comment: alloc.Comment, + } + // This is merely a debugging aid: + phi.setNum(*fresh) + *fresh++ + + phi.pos = alloc.Pos() + phi.setType(deref(alloc.Type())) + phi.block = v + if debugLifting { + fmt.Fprintf(os.Stderr, "\tplace %s = %s at block %s\n", phi.Name(), phi, v) + } + newPhis[v] = append(newPhis[v], newPhi{phi, alloc}) + + if work.Add(v) { + W.Add(v) + } + } + } + } + + return true +} + +// replaceAll replaces all intraprocedural uses of x with y, +// updating x.Referrers and y.Referrers. +// Precondition: x.Referrers() != nil, i.e. x must be local to some function. +// +func replaceAll(x, y Value) { + var rands []*Value + pxrefs := x.Referrers() + pyrefs := y.Referrers() + for _, instr := range *pxrefs { + rands = instr.Operands(rands[:0]) // recycle storage + for _, rand := range rands { + if *rand != nil { + if *rand == x { + *rand = y + } + } + } + if pyrefs != nil { + *pyrefs = append(*pyrefs, instr) // dups ok + } + } + *pxrefs = nil // x is now unreferenced +} + +// renamed returns the value to which alloc is being renamed, +// constructing it lazily if it's the implicit zero initialization. +// +func renamed(renaming []Value, alloc *Alloc) Value { + v := renaming[alloc.index] + if v == nil { + v = zeroConst(deref(alloc.Type())) + renaming[alloc.index] = v + } + return v +} + +// rename implements the (Cytron et al) SSA renaming algorithm, a +// preorder traversal of the dominator tree replacing all loads of +// Alloc cells with the value stored to that cell by the dominating +// store instruction. For lifting, we need only consider loads, +// stores and φ-nodes. +// +// renaming is a map from *Alloc (keyed by index number) to its +// dominating stored value; newPhis[x] is the set of new φ-nodes to be +// prepended to block x. +// +func rename(u *BasicBlock, renaming []Value, newPhis newPhiMap) { + // Each φ-node becomes the new name for its associated Alloc. + for _, np := range newPhis[u] { + phi := np.phi + alloc := np.alloc + renaming[alloc.index] = phi + } + + // Rename loads and stores of allocs. + for i, instr := range u.Instrs { + switch instr := instr.(type) { + case *Alloc: + if instr.index >= 0 { // store of zero to Alloc cell + // Replace dominated loads by the zero value. + renaming[instr.index] = nil + if debugLifting { + fmt.Fprintf(os.Stderr, "\tkill alloc %s\n", instr) + } + // Delete the Alloc. + u.Instrs[i] = nil + u.gaps++ + } + + case *Store: + if alloc, ok := instr.Addr.(*Alloc); ok && alloc.index >= 0 { // store to Alloc cell + // Replace dominated loads by the stored value. + renaming[alloc.index] = instr.Val + if debugLifting { + fmt.Fprintf(os.Stderr, "\tkill store %s; new value: %s\n", + instr, instr.Val.Name()) + } + // Remove the store from the referrer list of the stored value. + if refs := instr.Val.Referrers(); refs != nil { + *refs = removeInstr(*refs, instr) + } + // Delete the Store. + u.Instrs[i] = nil + u.gaps++ + } + + case *UnOp: + if instr.Op == token.MUL { + if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // load of Alloc cell + newval := renamed(renaming, alloc) + if debugLifting { + fmt.Fprintf(os.Stderr, "\tupdate load %s = %s with %s\n", + instr.Name(), instr, newval.Name()) + } + // Replace all references to + // the loaded value by the + // dominating stored value. + replaceAll(instr, newval) + // Delete the Load. + u.Instrs[i] = nil + u.gaps++ + } + } + + case *DebugRef: + if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // ref of Alloc cell + if instr.IsAddr { + instr.X = renamed(renaming, alloc) + instr.IsAddr = false + + // Add DebugRef to instr.X's referrers. + if refs := instr.X.Referrers(); refs != nil { + *refs = append(*refs, instr) + } + } else { + // A source expression denotes the address + // of an Alloc that was optimized away. + instr.X = nil + + // Delete the DebugRef. + u.Instrs[i] = nil + u.gaps++ + } + } + } + } + + // For each φ-node in a CFG successor, rename the edge. + for _, v := range u.Succs { + phis := newPhis[v] + if len(phis) == 0 { + continue + } + i := v.predIndex(u) + for _, np := range phis { + phi := np.phi + alloc := np.alloc + newval := renamed(renaming, alloc) + if debugLifting { + fmt.Fprintf(os.Stderr, "\tsetphi %s edge %s -> %s (#%d) (alloc=%s) := %s\n", + phi.Name(), u, v, i, alloc.Name(), newval.Name()) + } + phi.Edges[i] = newval + if prefs := newval.Referrers(); prefs != nil { + *prefs = append(*prefs, phi) + } + } + } + + // Continue depth-first recursion over domtree, pushing a + // fresh copy of the renaming map for each subtree. + for i, v := range u.dom.children { + r := renaming + if i < len(u.dom.children)-1 { + // On all but the final iteration, we must make + // a copy to avoid destructive update. + r = make([]Value, len(renaming)) + copy(r, renaming) + } + rename(v, r, newPhis) + } + +} diff --git a/vendor/honnef.co/go/tools/ssa/lvalue.go b/vendor/honnef.co/go/tools/ssa/lvalue.go new file mode 100644 index 000000000..eb5d71e18 --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/lvalue.go @@ -0,0 +1,123 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// lvalues are the union of addressable expressions and map-index +// expressions. + +import ( + "go/ast" + "go/token" + "go/types" +) + +// An lvalue represents an assignable location that may appear on the +// left-hand side of an assignment. This is a generalization of a +// pointer to permit updates to elements of maps. +// +type lvalue interface { + store(fn *Function, v Value) // stores v into the location + load(fn *Function) Value // loads the contents of the location + address(fn *Function) Value // address of the location + typ() types.Type // returns the type of the location +} + +// An address is an lvalue represented by a true pointer. +type address struct { + addr Value + pos token.Pos // source position + expr ast.Expr // source syntax of the value (not address) [debug mode] +} + +func (a *address) load(fn *Function) Value { + load := emitLoad(fn, a.addr) + load.pos = a.pos + return load +} + +func (a *address) store(fn *Function, v Value) { + store := emitStore(fn, a.addr, v, a.pos) + if a.expr != nil { + // store.Val is v, converted for assignability. + emitDebugRef(fn, a.expr, store.Val, false) + } +} + +func (a *address) address(fn *Function) Value { + if a.expr != nil { + emitDebugRef(fn, a.expr, a.addr, true) + } + return a.addr +} + +func (a *address) typ() types.Type { + return deref(a.addr.Type()) +} + +// An element is an lvalue represented by m[k], the location of an +// element of a map or string. These locations are not addressable +// since pointers cannot be formed from them, but they do support +// load(), and in the case of maps, store(). +// +type element struct { + m, k Value // map or string + t types.Type // map element type or string byte type + pos token.Pos // source position of colon ({k:v}) or lbrack (m[k]=v) +} + +func (e *element) load(fn *Function) Value { + l := &Lookup{ + X: e.m, + Index: e.k, + } + l.setPos(e.pos) + l.setType(e.t) + return fn.emit(l) +} + +func (e *element) store(fn *Function, v Value) { + up := &MapUpdate{ + Map: e.m, + Key: e.k, + Value: emitConv(fn, v, e.t), + } + up.pos = e.pos + fn.emit(up) +} + +func (e *element) address(fn *Function) Value { + panic("map/string elements are not addressable") +} + +func (e *element) typ() types.Type { + return e.t +} + +// A blank is a dummy variable whose name is "_". +// It is not reified: loads are illegal and stores are ignored. +// +type blank struct{} + +func (bl blank) load(fn *Function) Value { + panic("blank.load is illegal") +} + +func (bl blank) store(fn *Function, v Value) { + s := &BlankStore{ + Val: v, + } + fn.emit(s) +} + +func (bl blank) address(fn *Function) Value { + panic("blank var is not addressable") +} + +func (bl blank) typ() types.Type { + // This should be the type of the blank Ident; the typechecker + // doesn't provide this yet, but fortunately, we don't need it + // yet either. + panic("blank.typ is unimplemented") +} diff --git a/vendor/honnef.co/go/tools/ssa/methods.go b/vendor/honnef.co/go/tools/ssa/methods.go new file mode 100644 index 000000000..9cf383916 --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/methods.go @@ -0,0 +1,239 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines utilities for population of method sets. + +import ( + "fmt" + "go/types" +) + +// MethodValue returns the Function implementing method sel, building +// wrapper methods on demand. It returns nil if sel denotes an +// abstract (interface) method. +// +// Precondition: sel.Kind() == MethodVal. +// +// Thread-safe. +// +// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) +// +func (prog *Program) MethodValue(sel *types.Selection) *Function { + if sel.Kind() != types.MethodVal { + panic(fmt.Sprintf("MethodValue(%s) kind != MethodVal", sel)) + } + T := sel.Recv() + if isInterface(T) { + return nil // abstract method + } + if prog.mode&LogSource != 0 { + defer logStack("MethodValue %s %v", T, sel)() + } + + prog.methodsMu.Lock() + defer prog.methodsMu.Unlock() + + return prog.addMethod(prog.createMethodSet(T), sel) +} + +// LookupMethod returns the implementation of the method of type T +// identified by (pkg, name). It returns nil if the method exists but +// is abstract, and panics if T has no such method. +// +func (prog *Program) LookupMethod(T types.Type, pkg *types.Package, name string) *Function { + sel := prog.MethodSets.MethodSet(T).Lookup(pkg, name) + if sel == nil { + panic(fmt.Sprintf("%s has no method %s", T, types.Id(pkg, name))) + } + return prog.MethodValue(sel) +} + +// methodSet contains the (concrete) methods of a non-interface type. +type methodSet struct { + mapping map[string]*Function // populated lazily + complete bool // mapping contains all methods +} + +// Precondition: !isInterface(T). +// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) +func (prog *Program) createMethodSet(T types.Type) *methodSet { + mset, ok := prog.methodSets.At(T).(*methodSet) + if !ok { + mset = &methodSet{mapping: make(map[string]*Function)} + prog.methodSets.Set(T, mset) + } + return mset +} + +// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) +func (prog *Program) addMethod(mset *methodSet, sel *types.Selection) *Function { + if sel.Kind() == types.MethodExpr { + panic(sel) + } + id := sel.Obj().Id() + fn := mset.mapping[id] + if fn == nil { + obj := sel.Obj().(*types.Func) + + needsPromotion := len(sel.Index()) > 1 + needsIndirection := !isPointer(recvType(obj)) && isPointer(sel.Recv()) + if needsPromotion || needsIndirection { + fn = makeWrapper(prog, sel) + } else { + fn = prog.declaredFunc(obj) + } + if fn.Signature.Recv() == nil { + panic(fn) // missing receiver + } + mset.mapping[id] = fn + } + return fn +} + +// RuntimeTypes returns a new unordered slice containing all +// concrete types in the program for which a complete (non-empty) +// method set is required at run-time. +// +// Thread-safe. +// +// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) +// +func (prog *Program) RuntimeTypes() []types.Type { + prog.methodsMu.Lock() + defer prog.methodsMu.Unlock() + + var res []types.Type + prog.methodSets.Iterate(func(T types.Type, v interface{}) { + if v.(*methodSet).complete { + res = append(res, T) + } + }) + return res +} + +// declaredFunc returns the concrete function/method denoted by obj. +// Panic ensues if there is none. +// +func (prog *Program) declaredFunc(obj *types.Func) *Function { + if v := prog.packageLevelValue(obj); v != nil { + return v.(*Function) + } + panic("no concrete method: " + obj.String()) +} + +// needMethodsOf ensures that runtime type information (including the +// complete method set) is available for the specified type T and all +// its subcomponents. +// +// needMethodsOf must be called for at least every type that is an +// operand of some MakeInterface instruction, and for the type of +// every exported package member. +// +// Precondition: T is not a method signature (*Signature with Recv()!=nil). +// +// Thread-safe. (Called via emitConv from multiple builder goroutines.) +// +// TODO(adonovan): make this faster. It accounts for 20% of SSA build time. +// +// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) +// +func (prog *Program) needMethodsOf(T types.Type) { + prog.methodsMu.Lock() + prog.needMethods(T, false) + prog.methodsMu.Unlock() +} + +// Precondition: T is not a method signature (*Signature with Recv()!=nil). +// Recursive case: skip => don't create methods for T. +// +// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) +// +func (prog *Program) needMethods(T types.Type, skip bool) { + // Each package maintains its own set of types it has visited. + if prevSkip, ok := prog.runtimeTypes.At(T).(bool); ok { + // needMethods(T) was previously called + if !prevSkip || skip { + return // already seen, with same or false 'skip' value + } + } + prog.runtimeTypes.Set(T, skip) + + tmset := prog.MethodSets.MethodSet(T) + + if !skip && !isInterface(T) && tmset.Len() > 0 { + // Create methods of T. + mset := prog.createMethodSet(T) + if !mset.complete { + mset.complete = true + n := tmset.Len() + for i := 0; i < n; i++ { + prog.addMethod(mset, tmset.At(i)) + } + } + } + + // Recursion over signatures of each method. + for i := 0; i < tmset.Len(); i++ { + sig := tmset.At(i).Type().(*types.Signature) + prog.needMethods(sig.Params(), false) + prog.needMethods(sig.Results(), false) + } + + switch t := T.(type) { + case *types.Basic: + // nop + + case *types.Interface: + // nop---handled by recursion over method set. + + case *types.Pointer: + prog.needMethods(t.Elem(), false) + + case *types.Slice: + prog.needMethods(t.Elem(), false) + + case *types.Chan: + prog.needMethods(t.Elem(), false) + + case *types.Map: + prog.needMethods(t.Key(), false) + prog.needMethods(t.Elem(), false) + + case *types.Signature: + if t.Recv() != nil { + panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv())) + } + prog.needMethods(t.Params(), false) + prog.needMethods(t.Results(), false) + + case *types.Named: + // A pointer-to-named type can be derived from a named + // type via reflection. It may have methods too. + prog.needMethods(types.NewPointer(T), false) + + // Consider 'type T struct{S}' where S has methods. + // Reflection provides no way to get from T to struct{S}, + // only to S, so the method set of struct{S} is unwanted, + // so set 'skip' flag during recursion. + prog.needMethods(t.Underlying(), true) + + case *types.Array: + prog.needMethods(t.Elem(), false) + + case *types.Struct: + for i, n := 0, t.NumFields(); i < n; i++ { + prog.needMethods(t.Field(i).Type(), false) + } + + case *types.Tuple: + for i, n := 0, t.Len(); i < n; i++ { + prog.needMethods(t.At(i).Type(), false) + } + + default: + panic(T) + } +} diff --git a/vendor/honnef.co/go/tools/ssa/mode.go b/vendor/honnef.co/go/tools/ssa/mode.go new file mode 100644 index 000000000..d2a269893 --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/mode.go @@ -0,0 +1,100 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines the BuilderMode type and its command-line flag. + +import ( + "bytes" + "fmt" +) + +// BuilderMode is a bitmask of options for diagnostics and checking. +// +// *BuilderMode satisfies the flag.Value interface. Example: +// +// var mode = ssa.BuilderMode(0) +// func init() { flag.Var(&mode, "build", ssa.BuilderModeDoc) } +// +type BuilderMode uint + +const ( + PrintPackages BuilderMode = 1 << iota // Print package inventory to stdout + PrintFunctions // Print function SSA code to stdout + LogSource // Log source locations as SSA builder progresses + SanityCheckFunctions // Perform sanity checking of function bodies + NaiveForm // Build naïve SSA form: don't replace local loads/stores with registers + BuildSerially // Build packages serially, not in parallel. + GlobalDebug // Enable debug info for all packages + BareInits // Build init functions without guards or calls to dependent inits +) + +const BuilderModeDoc = `Options controlling the SSA builder. +The value is a sequence of zero or more of these letters: +C perform sanity [C]hecking of the SSA form. +D include [D]ebug info for every function. +P print [P]ackage inventory. +F print [F]unction SSA code. +S log [S]ource locations as SSA builder progresses. +L build distinct packages seria[L]ly instead of in parallel. +N build [N]aive SSA form: don't replace local loads/stores with registers. +I build bare [I]nit functions: no init guards or calls to dependent inits. +` + +func (m BuilderMode) String() string { + var buf bytes.Buffer + if m&GlobalDebug != 0 { + buf.WriteByte('D') + } + if m&PrintPackages != 0 { + buf.WriteByte('P') + } + if m&PrintFunctions != 0 { + buf.WriteByte('F') + } + if m&LogSource != 0 { + buf.WriteByte('S') + } + if m&SanityCheckFunctions != 0 { + buf.WriteByte('C') + } + if m&NaiveForm != 0 { + buf.WriteByte('N') + } + if m&BuildSerially != 0 { + buf.WriteByte('L') + } + return buf.String() +} + +// Set parses the flag characters in s and updates *m. +func (m *BuilderMode) Set(s string) error { + var mode BuilderMode + for _, c := range s { + switch c { + case 'D': + mode |= GlobalDebug + case 'P': + mode |= PrintPackages + case 'F': + mode |= PrintFunctions + case 'S': + mode |= LogSource | BuildSerially + case 'C': + mode |= SanityCheckFunctions + case 'N': + mode |= NaiveForm + case 'L': + mode |= BuildSerially + default: + return fmt.Errorf("unknown BuilderMode option: %q", c) + } + } + *m = mode + return nil +} + +// Get returns m. +func (m BuilderMode) Get() interface{} { return m } diff --git a/vendor/honnef.co/go/tools/ssa/print.go b/vendor/honnef.co/go/tools/ssa/print.go new file mode 100644 index 000000000..6fd277277 --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/print.go @@ -0,0 +1,435 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file implements the String() methods for all Value and +// Instruction types. + +import ( + "bytes" + "fmt" + "go/types" + "io" + "reflect" + "sort" + + "golang.org/x/tools/go/types/typeutil" +) + +// relName returns the name of v relative to i. +// In most cases, this is identical to v.Name(), but references to +// Functions (including methods) and Globals use RelString and +// all types are displayed with relType, so that only cross-package +// references are package-qualified. +// +func relName(v Value, i Instruction) string { + var from *types.Package + if i != nil { + from = i.Parent().pkg() + } + switch v := v.(type) { + case Member: // *Function or *Global + return v.RelString(from) + case *Const: + return v.RelString(from) + } + return v.Name() +} + +func relType(t types.Type, from *types.Package) string { + return types.TypeString(t, types.RelativeTo(from)) +} + +func relString(m Member, from *types.Package) string { + // NB: not all globals have an Object (e.g. init$guard), + // so use Package().Object not Object.Package(). + if pkg := m.Package().Pkg; pkg != nil && pkg != from { + return fmt.Sprintf("%s.%s", pkg.Path(), m.Name()) + } + return m.Name() +} + +// Value.String() +// +// This method is provided only for debugging. +// It never appears in disassembly, which uses Value.Name(). + +func (v *Parameter) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("parameter %s : %s", v.Name(), relType(v.Type(), from)) +} + +func (v *FreeVar) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("freevar %s : %s", v.Name(), relType(v.Type(), from)) +} + +func (v *Builtin) String() string { + return fmt.Sprintf("builtin %s", v.Name()) +} + +// Instruction.String() + +func (v *Alloc) String() string { + op := "local" + if v.Heap { + op = "new" + } + from := v.Parent().pkg() + return fmt.Sprintf("%s %s (%s)", op, relType(deref(v.Type()), from), v.Comment) +} + +func (v *Phi) String() string { + var b bytes.Buffer + b.WriteString("phi [") + for i, edge := range v.Edges { + if i > 0 { + b.WriteString(", ") + } + // Be robust against malformed CFG. + if v.block == nil { + b.WriteString("??") + continue + } + block := -1 + if i < len(v.block.Preds) { + block = v.block.Preds[i].Index + } + fmt.Fprintf(&b, "%d: ", block) + edgeVal := "" // be robust + if edge != nil { + edgeVal = relName(edge, v) + } + b.WriteString(edgeVal) + } + b.WriteString("]") + if v.Comment != "" { + b.WriteString(" #") + b.WriteString(v.Comment) + } + return b.String() +} + +func printCall(v *CallCommon, prefix string, instr Instruction) string { + var b bytes.Buffer + b.WriteString(prefix) + if !v.IsInvoke() { + b.WriteString(relName(v.Value, instr)) + } else { + fmt.Fprintf(&b, "invoke %s.%s", relName(v.Value, instr), v.Method.Name()) + } + b.WriteString("(") + for i, arg := range v.Args { + if i > 0 { + b.WriteString(", ") + } + b.WriteString(relName(arg, instr)) + } + if v.Signature().Variadic() { + b.WriteString("...") + } + b.WriteString(")") + return b.String() +} + +func (c *CallCommon) String() string { + return printCall(c, "", nil) +} + +func (v *Call) String() string { + return printCall(&v.Call, "", v) +} + +func (v *BinOp) String() string { + return fmt.Sprintf("%s %s %s", relName(v.X, v), v.Op.String(), relName(v.Y, v)) +} + +func (v *UnOp) String() string { + return fmt.Sprintf("%s%s%s", v.Op, relName(v.X, v), commaOk(v.CommaOk)) +} + +func printConv(prefix string, v, x Value) string { + from := v.Parent().pkg() + return fmt.Sprintf("%s %s <- %s (%s)", + prefix, + relType(v.Type(), from), + relType(x.Type(), from), + relName(x, v.(Instruction))) +} + +func (v *ChangeType) String() string { return printConv("changetype", v, v.X) } +func (v *Convert) String() string { return printConv("convert", v, v.X) } +func (v *ChangeInterface) String() string { return printConv("change interface", v, v.X) } +func (v *MakeInterface) String() string { return printConv("make", v, v.X) } + +func (v *MakeClosure) String() string { + var b bytes.Buffer + fmt.Fprintf(&b, "make closure %s", relName(v.Fn, v)) + if v.Bindings != nil { + b.WriteString(" [") + for i, c := range v.Bindings { + if i > 0 { + b.WriteString(", ") + } + b.WriteString(relName(c, v)) + } + b.WriteString("]") + } + return b.String() +} + +func (v *MakeSlice) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("make %s %s %s", + relType(v.Type(), from), + relName(v.Len, v), + relName(v.Cap, v)) +} + +func (v *Slice) String() string { + var b bytes.Buffer + b.WriteString("slice ") + b.WriteString(relName(v.X, v)) + b.WriteString("[") + if v.Low != nil { + b.WriteString(relName(v.Low, v)) + } + b.WriteString(":") + if v.High != nil { + b.WriteString(relName(v.High, v)) + } + if v.Max != nil { + b.WriteString(":") + b.WriteString(relName(v.Max, v)) + } + b.WriteString("]") + return b.String() +} + +func (v *MakeMap) String() string { + res := "" + if v.Reserve != nil { + res = relName(v.Reserve, v) + } + from := v.Parent().pkg() + return fmt.Sprintf("make %s %s", relType(v.Type(), from), res) +} + +func (v *MakeChan) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("make %s %s", relType(v.Type(), from), relName(v.Size, v)) +} + +func (v *FieldAddr) String() string { + st := deref(v.X.Type()).Underlying().(*types.Struct) + // Be robust against a bad index. + name := "?" + if 0 <= v.Field && v.Field < st.NumFields() { + name = st.Field(v.Field).Name() + } + return fmt.Sprintf("&%s.%s [#%d]", relName(v.X, v), name, v.Field) +} + +func (v *Field) String() string { + st := v.X.Type().Underlying().(*types.Struct) + // Be robust against a bad index. + name := "?" + if 0 <= v.Field && v.Field < st.NumFields() { + name = st.Field(v.Field).Name() + } + return fmt.Sprintf("%s.%s [#%d]", relName(v.X, v), name, v.Field) +} + +func (v *IndexAddr) String() string { + return fmt.Sprintf("&%s[%s]", relName(v.X, v), relName(v.Index, v)) +} + +func (v *Index) String() string { + return fmt.Sprintf("%s[%s]", relName(v.X, v), relName(v.Index, v)) +} + +func (v *Lookup) String() string { + return fmt.Sprintf("%s[%s]%s", relName(v.X, v), relName(v.Index, v), commaOk(v.CommaOk)) +} + +func (v *Range) String() string { + return "range " + relName(v.X, v) +} + +func (v *Next) String() string { + return "next " + relName(v.Iter, v) +} + +func (v *TypeAssert) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("typeassert%s %s.(%s)", commaOk(v.CommaOk), relName(v.X, v), relType(v.AssertedType, from)) +} + +func (v *Extract) String() string { + return fmt.Sprintf("extract %s #%d", relName(v.Tuple, v), v.Index) +} + +func (s *Jump) String() string { + // Be robust against malformed CFG. + block := -1 + if s.block != nil && len(s.block.Succs) == 1 { + block = s.block.Succs[0].Index + } + return fmt.Sprintf("jump %d", block) +} + +func (s *If) String() string { + // Be robust against malformed CFG. + tblock, fblock := -1, -1 + if s.block != nil && len(s.block.Succs) == 2 { + tblock = s.block.Succs[0].Index + fblock = s.block.Succs[1].Index + } + return fmt.Sprintf("if %s goto %d else %d", relName(s.Cond, s), tblock, fblock) +} + +func (s *Go) String() string { + return printCall(&s.Call, "go ", s) +} + +func (s *Panic) String() string { + return "panic " + relName(s.X, s) +} + +func (s *Return) String() string { + var b bytes.Buffer + b.WriteString("return") + for i, r := range s.Results { + if i == 0 { + b.WriteString(" ") + } else { + b.WriteString(", ") + } + b.WriteString(relName(r, s)) + } + return b.String() +} + +func (*RunDefers) String() string { + return "rundefers" +} + +func (s *Send) String() string { + return fmt.Sprintf("send %s <- %s", relName(s.Chan, s), relName(s.X, s)) +} + +func (s *Defer) String() string { + return printCall(&s.Call, "defer ", s) +} + +func (s *Select) String() string { + var b bytes.Buffer + for i, st := range s.States { + if i > 0 { + b.WriteString(", ") + } + if st.Dir == types.RecvOnly { + b.WriteString("<-") + b.WriteString(relName(st.Chan, s)) + } else { + b.WriteString(relName(st.Chan, s)) + b.WriteString("<-") + b.WriteString(relName(st.Send, s)) + } + } + non := "" + if !s.Blocking { + non = "non" + } + return fmt.Sprintf("select %sblocking [%s]", non, b.String()) +} + +func (s *Store) String() string { + return fmt.Sprintf("*%s = %s", relName(s.Addr, s), relName(s.Val, s)) +} + +func (s *BlankStore) String() string { + return fmt.Sprintf("_ = %s", relName(s.Val, s)) +} + +func (s *MapUpdate) String() string { + return fmt.Sprintf("%s[%s] = %s", relName(s.Map, s), relName(s.Key, s), relName(s.Value, s)) +} + +func (s *DebugRef) String() string { + p := s.Parent().Prog.Fset.Position(s.Pos()) + var descr interface{} + if s.object != nil { + descr = s.object // e.g. "var x int" + } else { + descr = reflect.TypeOf(s.Expr) // e.g. "*ast.CallExpr" + } + var addr string + if s.IsAddr { + addr = "address of " + } + return fmt.Sprintf("; %s%s @ %d:%d is %s", addr, descr, p.Line, p.Column, s.X.Name()) +} + +func (p *Package) String() string { + return "package " + p.Pkg.Path() +} + +var _ io.WriterTo = (*Package)(nil) // *Package implements io.Writer + +func (p *Package) WriteTo(w io.Writer) (int64, error) { + var buf bytes.Buffer + WritePackage(&buf, p) + n, err := w.Write(buf.Bytes()) + return int64(n), err +} + +// WritePackage writes to buf a human-readable summary of p. +func WritePackage(buf *bytes.Buffer, p *Package) { + fmt.Fprintf(buf, "%s:\n", p) + + var names []string + maxname := 0 + for name := range p.Members { + if l := len(name); l > maxname { + maxname = l + } + names = append(names, name) + } + + from := p.Pkg + sort.Strings(names) + for _, name := range names { + switch mem := p.Members[name].(type) { + case *NamedConst: + fmt.Fprintf(buf, " const %-*s %s = %s\n", + maxname, name, mem.Name(), mem.Value.RelString(from)) + + case *Function: + fmt.Fprintf(buf, " func %-*s %s\n", + maxname, name, relType(mem.Type(), from)) + + case *Type: + fmt.Fprintf(buf, " type %-*s %s\n", + maxname, name, relType(mem.Type().Underlying(), from)) + for _, meth := range typeutil.IntuitiveMethodSet(mem.Type(), &p.Prog.MethodSets) { + fmt.Fprintf(buf, " %s\n", types.SelectionString(meth, types.RelativeTo(from))) + } + + case *Global: + fmt.Fprintf(buf, " var %-*s %s\n", + maxname, name, relType(mem.Type().(*types.Pointer).Elem(), from)) + } + } + + fmt.Fprintf(buf, "\n") +} + +func commaOk(x bool) string { + if x { + return ",ok" + } + return "" +} diff --git a/vendor/honnef.co/go/tools/ssa/sanity.go b/vendor/honnef.co/go/tools/ssa/sanity.go new file mode 100644 index 000000000..1d29b66b0 --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/sanity.go @@ -0,0 +1,535 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// An optional pass for sanity-checking invariants of the SSA representation. +// Currently it checks CFG invariants but little at the instruction level. + +import ( + "fmt" + "go/types" + "io" + "os" + "strings" +) + +type sanity struct { + reporter io.Writer + fn *Function + block *BasicBlock + instrs map[Instruction]struct{} + insane bool +} + +// sanityCheck performs integrity checking of the SSA representation +// of the function fn and returns true if it was valid. Diagnostics +// are written to reporter if non-nil, os.Stderr otherwise. Some +// diagnostics are only warnings and do not imply a negative result. +// +// Sanity-checking is intended to facilitate the debugging of code +// transformation passes. +// +func sanityCheck(fn *Function, reporter io.Writer) bool { + if reporter == nil { + reporter = os.Stderr + } + return (&sanity{reporter: reporter}).checkFunction(fn) +} + +// mustSanityCheck is like sanityCheck but panics instead of returning +// a negative result. +// +func mustSanityCheck(fn *Function, reporter io.Writer) { + if !sanityCheck(fn, reporter) { + fn.WriteTo(os.Stderr) + panic("SanityCheck failed") + } +} + +func (s *sanity) diagnostic(prefix, format string, args ...interface{}) { + fmt.Fprintf(s.reporter, "%s: function %s", prefix, s.fn) + if s.block != nil { + fmt.Fprintf(s.reporter, ", block %s", s.block) + } + io.WriteString(s.reporter, ": ") + fmt.Fprintf(s.reporter, format, args...) + io.WriteString(s.reporter, "\n") +} + +func (s *sanity) errorf(format string, args ...interface{}) { + s.insane = true + s.diagnostic("Error", format, args...) +} + +func (s *sanity) warnf(format string, args ...interface{}) { + s.diagnostic("Warning", format, args...) +} + +// findDuplicate returns an arbitrary basic block that appeared more +// than once in blocks, or nil if all were unique. +func findDuplicate(blocks []*BasicBlock) *BasicBlock { + if len(blocks) < 2 { + return nil + } + if blocks[0] == blocks[1] { + return blocks[0] + } + // Slow path: + m := make(map[*BasicBlock]bool) + for _, b := range blocks { + if m[b] { + return b + } + m[b] = true + } + return nil +} + +func (s *sanity) checkInstr(idx int, instr Instruction) { + switch instr := instr.(type) { + case *If, *Jump, *Return, *Panic: + s.errorf("control flow instruction not at end of block") + case *Phi: + if idx == 0 { + // It suffices to apply this check to just the first phi node. + if dup := findDuplicate(s.block.Preds); dup != nil { + s.errorf("phi node in block with duplicate predecessor %s", dup) + } + } else { + prev := s.block.Instrs[idx-1] + if _, ok := prev.(*Phi); !ok { + s.errorf("Phi instruction follows a non-Phi: %T", prev) + } + } + if ne, np := len(instr.Edges), len(s.block.Preds); ne != np { + s.errorf("phi node has %d edges but %d predecessors", ne, np) + + } else { + for i, e := range instr.Edges { + if e == nil { + s.errorf("phi node '%s' has no value for edge #%d from %s", instr.Comment, i, s.block.Preds[i]) + } + } + } + + case *Alloc: + if !instr.Heap { + found := false + for _, l := range s.fn.Locals { + if l == instr { + found = true + break + } + } + if !found { + s.errorf("local alloc %s = %s does not appear in Function.Locals", instr.Name(), instr) + } + } + + case *BinOp: + case *Call: + case *ChangeInterface: + case *ChangeType: + case *Convert: + if _, ok := instr.X.Type().Underlying().(*types.Basic); !ok { + if _, ok := instr.Type().Underlying().(*types.Basic); !ok { + s.errorf("convert %s -> %s: at least one type must be basic", instr.X.Type(), instr.Type()) + } + } + + case *Defer: + case *Extract: + case *Field: + case *FieldAddr: + case *Go: + case *Index: + case *IndexAddr: + case *Lookup: + case *MakeChan: + case *MakeClosure: + numFree := len(instr.Fn.(*Function).FreeVars) + numBind := len(instr.Bindings) + if numFree != numBind { + s.errorf("MakeClosure has %d Bindings for function %s with %d free vars", + numBind, instr.Fn, numFree) + + } + if recv := instr.Type().(*types.Signature).Recv(); recv != nil { + s.errorf("MakeClosure's type includes receiver %s", recv.Type()) + } + + case *MakeInterface: + case *MakeMap: + case *MakeSlice: + case *MapUpdate: + case *Next: + case *Range: + case *RunDefers: + case *Select: + case *Send: + case *Slice: + case *Store: + case *TypeAssert: + case *UnOp: + case *DebugRef: + case *BlankStore: + case *Sigma: + // TODO(adonovan): implement checks. + default: + panic(fmt.Sprintf("Unknown instruction type: %T", instr)) + } + + if call, ok := instr.(CallInstruction); ok { + if call.Common().Signature() == nil { + s.errorf("nil signature: %s", call) + } + } + + // Check that value-defining instructions have valid types + // and a valid referrer list. + if v, ok := instr.(Value); ok { + t := v.Type() + if t == nil { + s.errorf("no type: %s = %s", v.Name(), v) + } else if t == tRangeIter { + // not a proper type; ignore. + } else if b, ok := t.Underlying().(*types.Basic); ok && b.Info()&types.IsUntyped != 0 { + s.errorf("instruction has 'untyped' result: %s = %s : %s", v.Name(), v, t) + } + s.checkReferrerList(v) + } + + // Untyped constants are legal as instruction Operands(), + // for example: + // _ = "foo"[0] + // or: + // if wordsize==64 {...} + + // All other non-Instruction Values can be found via their + // enclosing Function or Package. +} + +func (s *sanity) checkFinalInstr(instr Instruction) { + switch instr := instr.(type) { + case *If: + if nsuccs := len(s.block.Succs); nsuccs != 2 { + s.errorf("If-terminated block has %d successors; expected 2", nsuccs) + return + } + if s.block.Succs[0] == s.block.Succs[1] { + s.errorf("If-instruction has same True, False target blocks: %s", s.block.Succs[0]) + return + } + + case *Jump: + if nsuccs := len(s.block.Succs); nsuccs != 1 { + s.errorf("Jump-terminated block has %d successors; expected 1", nsuccs) + return + } + + case *Return: + if nsuccs := len(s.block.Succs); nsuccs != 0 { + s.errorf("Return-terminated block has %d successors; expected none", nsuccs) + return + } + if na, nf := len(instr.Results), s.fn.Signature.Results().Len(); nf != na { + s.errorf("%d-ary return in %d-ary function", na, nf) + } + + case *Panic: + if nsuccs := len(s.block.Succs); nsuccs != 0 { + s.errorf("Panic-terminated block has %d successors; expected none", nsuccs) + return + } + + default: + s.errorf("non-control flow instruction at end of block") + } +} + +func (s *sanity) checkBlock(b *BasicBlock, index int) { + s.block = b + + if b.Index != index { + s.errorf("block has incorrect Index %d", b.Index) + } + if b.parent != s.fn { + s.errorf("block has incorrect parent %s", b.parent) + } + + // Check all blocks are reachable. + // (The entry block is always implicitly reachable, + // as is the Recover block, if any.) + if (index > 0 && b != b.parent.Recover) && len(b.Preds) == 0 { + s.warnf("unreachable block") + if b.Instrs == nil { + // Since this block is about to be pruned, + // tolerating transient problems in it + // simplifies other optimizations. + return + } + } + + // Check predecessor and successor relations are dual, + // and that all blocks in CFG belong to same function. + for _, a := range b.Preds { + found := false + for _, bb := range a.Succs { + if bb == b { + found = true + break + } + } + if !found { + s.errorf("expected successor edge in predecessor %s; found only: %s", a, a.Succs) + } + if a.parent != s.fn { + s.errorf("predecessor %s belongs to different function %s", a, a.parent) + } + } + for _, c := range b.Succs { + found := false + for _, bb := range c.Preds { + if bb == b { + found = true + break + } + } + if !found { + s.errorf("expected predecessor edge in successor %s; found only: %s", c, c.Preds) + } + if c.parent != s.fn { + s.errorf("successor %s belongs to different function %s", c, c.parent) + } + } + + // Check each instruction is sane. + n := len(b.Instrs) + if n == 0 { + s.errorf("basic block contains no instructions") + } + var rands [10]*Value // reuse storage + for j, instr := range b.Instrs { + if instr == nil { + s.errorf("nil instruction at index %d", j) + continue + } + if b2 := instr.Block(); b2 == nil { + s.errorf("nil Block() for instruction at index %d", j) + continue + } else if b2 != b { + s.errorf("wrong Block() (%s) for instruction at index %d ", b2, j) + continue + } + if j < n-1 { + s.checkInstr(j, instr) + } else { + s.checkFinalInstr(instr) + } + + // Check Instruction.Operands. + operands: + for i, op := range instr.Operands(rands[:0]) { + if op == nil { + s.errorf("nil operand pointer %d of %s", i, instr) + continue + } + val := *op + if val == nil { + continue // a nil operand is ok + } + + // Check that "untyped" types only appear on constant operands. + if _, ok := (*op).(*Const); !ok { + if basic, ok := (*op).Type().(*types.Basic); ok { + if basic.Info()&types.IsUntyped != 0 { + s.errorf("operand #%d of %s is untyped: %s", i, instr, basic) + } + } + } + + // Check that Operands that are also Instructions belong to same function. + // TODO(adonovan): also check their block dominates block b. + if val, ok := val.(Instruction); ok { + if val.Block() == nil { + s.errorf("operand %d of %s is an instruction (%s) that belongs to no block", i, instr, val) + } else if val.Parent() != s.fn { + s.errorf("operand %d of %s is an instruction (%s) from function %s", i, instr, val, val.Parent()) + } + } + + // Check that each function-local operand of + // instr refers back to instr. (NB: quadratic) + switch val := val.(type) { + case *Const, *Global, *Builtin: + continue // not local + case *Function: + if val.parent == nil { + continue // only anon functions are local + } + } + + // TODO(adonovan): check val.Parent() != nil <=> val.Referrers() is defined. + + if refs := val.Referrers(); refs != nil { + for _, ref := range *refs { + if ref == instr { + continue operands + } + } + s.errorf("operand %d of %s (%s) does not refer to us", i, instr, val) + } else { + s.errorf("operand %d of %s (%s) has no referrers", i, instr, val) + } + } + } +} + +func (s *sanity) checkReferrerList(v Value) { + refs := v.Referrers() + if refs == nil { + s.errorf("%s has missing referrer list", v.Name()) + return + } + for i, ref := range *refs { + if _, ok := s.instrs[ref]; !ok { + s.errorf("%s.Referrers()[%d] = %s is not an instruction belonging to this function", v.Name(), i, ref) + } + } +} + +func (s *sanity) checkFunction(fn *Function) bool { + // TODO(adonovan): check Function invariants: + // - check params match signature + // - check transient fields are nil + // - warn if any fn.Locals do not appear among block instructions. + s.fn = fn + if fn.Prog == nil { + s.errorf("nil Prog") + } + + _ = fn.String() // must not crash + _ = fn.RelString(fn.pkg()) // must not crash + + // All functions have a package, except delegates (which are + // shared across packages, or duplicated as weak symbols in a + // separate-compilation model), and error.Error. + if fn.Pkg == nil { + if strings.HasPrefix(fn.Synthetic, "wrapper ") || + strings.HasPrefix(fn.Synthetic, "bound ") || + strings.HasPrefix(fn.Synthetic, "thunk ") || + strings.HasSuffix(fn.name, "Error") { + // ok + } else { + s.errorf("nil Pkg") + } + } + if src, syn := fn.Synthetic == "", fn.Syntax() != nil; src != syn { + s.errorf("got fromSource=%t, hasSyntax=%t; want same values", src, syn) + } + for i, l := range fn.Locals { + if l.Parent() != fn { + s.errorf("Local %s at index %d has wrong parent", l.Name(), i) + } + if l.Heap { + s.errorf("Local %s at index %d has Heap flag set", l.Name(), i) + } + } + // Build the set of valid referrers. + s.instrs = make(map[Instruction]struct{}) + for _, b := range fn.Blocks { + for _, instr := range b.Instrs { + s.instrs[instr] = struct{}{} + } + } + for i, p := range fn.Params { + if p.Parent() != fn { + s.errorf("Param %s at index %d has wrong parent", p.Name(), i) + } + // Check common suffix of Signature and Params match type. + if sig := fn.Signature; sig != nil { + j := i - len(fn.Params) + sig.Params().Len() // index within sig.Params + if j < 0 { + continue + } + if !types.Identical(p.Type(), sig.Params().At(j).Type()) { + s.errorf("Param %s at index %d has wrong type (%s, versus %s in Signature)", p.Name(), i, p.Type(), sig.Params().At(j).Type()) + + } + } + + s.checkReferrerList(p) + } + for i, fv := range fn.FreeVars { + if fv.Parent() != fn { + s.errorf("FreeVar %s at index %d has wrong parent", fv.Name(), i) + } + s.checkReferrerList(fv) + } + + if fn.Blocks != nil && len(fn.Blocks) == 0 { + // Function _had_ blocks (so it's not external) but + // they were "optimized" away, even the entry block. + s.errorf("Blocks slice is non-nil but empty") + } + for i, b := range fn.Blocks { + if b == nil { + s.warnf("nil *BasicBlock at f.Blocks[%d]", i) + continue + } + s.checkBlock(b, i) + } + if fn.Recover != nil && fn.Blocks[fn.Recover.Index] != fn.Recover { + s.errorf("Recover block is not in Blocks slice") + } + + s.block = nil + for i, anon := range fn.AnonFuncs { + if anon.Parent() != fn { + s.errorf("AnonFuncs[%d]=%s but %s.Parent()=%s", i, anon, anon, anon.Parent()) + } + } + s.fn = nil + return !s.insane +} + +// sanityCheckPackage checks invariants of packages upon creation. +// It does not require that the package is built. +// Unlike sanityCheck (for functions), it just panics at the first error. +func sanityCheckPackage(pkg *Package) { + if pkg.Pkg == nil { + panic(fmt.Sprintf("Package %s has no Object", pkg)) + } + _ = pkg.String() // must not crash + + for name, mem := range pkg.Members { + if name != mem.Name() { + panic(fmt.Sprintf("%s: %T.Name() = %s, want %s", + pkg.Pkg.Path(), mem, mem.Name(), name)) + } + obj := mem.Object() + if obj == nil { + // This check is sound because fields + // {Global,Function}.object have type + // types.Object. (If they were declared as + // *types.{Var,Func}, we'd have a non-empty + // interface containing a nil pointer.) + + continue // not all members have typechecker objects + } + if obj.Name() != name { + if obj.Name() == "init" && strings.HasPrefix(mem.Name(), "init#") { + // Ok. The name of a declared init function varies between + // its types.Func ("init") and its ssa.Function ("init#%d"). + } else { + panic(fmt.Sprintf("%s: %T.Object().Name() = %s, want %s", + pkg.Pkg.Path(), mem, obj.Name(), name)) + } + } + if obj.Pos() != mem.Pos() { + panic(fmt.Sprintf("%s Pos=%d obj.Pos=%d", mem, mem.Pos(), obj.Pos())) + } + } +} diff --git a/vendor/honnef.co/go/tools/ssa/source.go b/vendor/honnef.co/go/tools/ssa/source.go new file mode 100644 index 000000000..8d9cca170 --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/source.go @@ -0,0 +1,293 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines utilities for working with source positions +// or source-level named entities ("objects"). + +// TODO(adonovan): test that {Value,Instruction}.Pos() positions match +// the originating syntax, as specified. + +import ( + "go/ast" + "go/token" + "go/types" +) + +// EnclosingFunction returns the function that contains the syntax +// node denoted by path. +// +// Syntax associated with package-level variable specifications is +// enclosed by the package's init() function. +// +// Returns nil if not found; reasons might include: +// - the node is not enclosed by any function. +// - the node is within an anonymous function (FuncLit) and +// its SSA function has not been created yet +// (pkg.Build() has not yet been called). +// +func EnclosingFunction(pkg *Package, path []ast.Node) *Function { + // Start with package-level function... + fn := findEnclosingPackageLevelFunction(pkg, path) + if fn == nil { + return nil // not in any function + } + + // ...then walk down the nested anonymous functions. + n := len(path) +outer: + for i := range path { + if lit, ok := path[n-1-i].(*ast.FuncLit); ok { + for _, anon := range fn.AnonFuncs { + if anon.Pos() == lit.Type.Func { + fn = anon + continue outer + } + } + // SSA function not found: + // - package not yet built, or maybe + // - builder skipped FuncLit in dead block + // (in principle; but currently the Builder + // generates even dead FuncLits). + return nil + } + } + return fn +} + +// HasEnclosingFunction returns true if the AST node denoted by path +// is contained within the declaration of some function or +// package-level variable. +// +// Unlike EnclosingFunction, the behaviour of this function does not +// depend on whether SSA code for pkg has been built, so it can be +// used to quickly reject check inputs that will cause +// EnclosingFunction to fail, prior to SSA building. +// +func HasEnclosingFunction(pkg *Package, path []ast.Node) bool { + return findEnclosingPackageLevelFunction(pkg, path) != nil +} + +// findEnclosingPackageLevelFunction returns the Function +// corresponding to the package-level function enclosing path. +// +func findEnclosingPackageLevelFunction(pkg *Package, path []ast.Node) *Function { + if n := len(path); n >= 2 { // [... {Gen,Func}Decl File] + switch decl := path[n-2].(type) { + case *ast.GenDecl: + if decl.Tok == token.VAR && n >= 3 { + // Package-level 'var' initializer. + return pkg.init + } + + case *ast.FuncDecl: + if decl.Recv == nil && decl.Name.Name == "init" { + // Explicit init() function. + for _, b := range pkg.init.Blocks { + for _, instr := range b.Instrs { + if instr, ok := instr.(*Call); ok { + if callee, ok := instr.Call.Value.(*Function); ok && callee.Pkg == pkg && callee.Pos() == decl.Name.NamePos { + return callee + } + } + } + } + // Hack: return non-nil when SSA is not yet + // built so that HasEnclosingFunction works. + return pkg.init + } + // Declared function/method. + return findNamedFunc(pkg, decl.Name.NamePos) + } + } + return nil // not in any function +} + +// findNamedFunc returns the named function whose FuncDecl.Ident is at +// position pos. +// +func findNamedFunc(pkg *Package, pos token.Pos) *Function { + // Look at all package members and method sets of named types. + // Not very efficient. + for _, mem := range pkg.Members { + switch mem := mem.(type) { + case *Function: + if mem.Pos() == pos { + return mem + } + case *Type: + mset := pkg.Prog.MethodSets.MethodSet(types.NewPointer(mem.Type())) + for i, n := 0, mset.Len(); i < n; i++ { + // Don't call Program.Method: avoid creating wrappers. + obj := mset.At(i).Obj().(*types.Func) + if obj.Pos() == pos { + return pkg.values[obj].(*Function) + } + } + } + } + return nil +} + +// ValueForExpr returns the SSA Value that corresponds to non-constant +// expression e. +// +// It returns nil if no value was found, e.g. +// - the expression is not lexically contained within f; +// - f was not built with debug information; or +// - e is a constant expression. (For efficiency, no debug +// information is stored for constants. Use +// go/types.Info.Types[e].Value instead.) +// - e is a reference to nil or a built-in function. +// - the value was optimised away. +// +// If e is an addressable expression used in an lvalue context, +// value is the address denoted by e, and isAddr is true. +// +// The types of e (or &e, if isAddr) and the result are equal +// (modulo "untyped" bools resulting from comparisons). +// +// (Tip: to find the ssa.Value given a source position, use +// astutil.PathEnclosingInterval to locate the ast.Node, then +// EnclosingFunction to locate the Function, then ValueForExpr to find +// the ssa.Value.) +// +func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) { + if f.debugInfo() { // (opt) + e = unparen(e) + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + if ref, ok := instr.(*DebugRef); ok { + if ref.Expr == e { + return ref.X, ref.IsAddr + } + } + } + } + } + return +} + +// --- Lookup functions for source-level named entities (types.Objects) --- + +// Package returns the SSA Package corresponding to the specified +// type-checker package object. +// It returns nil if no such SSA package has been created. +// +func (prog *Program) Package(obj *types.Package) *Package { + return prog.packages[obj] +} + +// packageLevelValue returns the package-level value corresponding to +// the specified named object, which may be a package-level const +// (*Const), var (*Global) or func (*Function) of some package in +// prog. It returns nil if the object is not found. +// +func (prog *Program) packageLevelValue(obj types.Object) Value { + if pkg, ok := prog.packages[obj.Pkg()]; ok { + return pkg.values[obj] + } + return nil +} + +// FuncValue returns the concrete Function denoted by the source-level +// named function obj, or nil if obj denotes an interface method. +// +// TODO(adonovan): check the invariant that obj.Type() matches the +// result's Signature, both in the params/results and in the receiver. +// +func (prog *Program) FuncValue(obj *types.Func) *Function { + fn, _ := prog.packageLevelValue(obj).(*Function) + return fn +} + +// ConstValue returns the SSA Value denoted by the source-level named +// constant obj. +// +func (prog *Program) ConstValue(obj *types.Const) *Const { + // TODO(adonovan): opt: share (don't reallocate) + // Consts for const objects and constant ast.Exprs. + + // Universal constant? {true,false,nil} + if obj.Parent() == types.Universe { + return NewConst(obj.Val(), obj.Type()) + } + // Package-level named constant? + if v := prog.packageLevelValue(obj); v != nil { + return v.(*Const) + } + return NewConst(obj.Val(), obj.Type()) +} + +// VarValue returns the SSA Value that corresponds to a specific +// identifier denoting the source-level named variable obj. +// +// VarValue returns nil if a local variable was not found, perhaps +// because its package was not built, the debug information was not +// requested during SSA construction, or the value was optimized away. +// +// ref is the path to an ast.Ident (e.g. from PathEnclosingInterval), +// and that ident must resolve to obj. +// +// pkg is the package enclosing the reference. (A reference to a var +// always occurs within a function, so we need to know where to find it.) +// +// If the identifier is a field selector and its base expression is +// non-addressable, then VarValue returns the value of that field. +// For example: +// func f() struct {x int} +// f().x // VarValue(x) returns a *Field instruction of type int +// +// All other identifiers denote addressable locations (variables). +// For them, VarValue may return either the variable's address or its +// value, even when the expression is evaluated only for its value; the +// situation is reported by isAddr, the second component of the result. +// +// If !isAddr, the returned value is the one associated with the +// specific identifier. For example, +// var x int // VarValue(x) returns Const 0 here +// x = 1 // VarValue(x) returns Const 1 here +// +// It is not specified whether the value or the address is returned in +// any particular case, as it may depend upon optimizations performed +// during SSA code generation, such as registerization, constant +// folding, avoidance of materialization of subexpressions, etc. +// +func (prog *Program) VarValue(obj *types.Var, pkg *Package, ref []ast.Node) (value Value, isAddr bool) { + // All references to a var are local to some function, possibly init. + fn := EnclosingFunction(pkg, ref) + if fn == nil { + return // e.g. def of struct field; SSA not built? + } + + id := ref[0].(*ast.Ident) + + // Defining ident of a parameter? + if id.Pos() == obj.Pos() { + for _, param := range fn.Params { + if param.Object() == obj { + return param, false + } + } + } + + // Other ident? + for _, b := range fn.Blocks { + for _, instr := range b.Instrs { + if dr, ok := instr.(*DebugRef); ok { + if dr.Pos() == id.Pos() { + return dr.X, dr.IsAddr + } + } + } + } + + // Defining ident of package-level var? + if v := prog.packageLevelValue(obj); v != nil { + return v.(*Global), true + } + + return // e.g. debug info not requested, or var optimized away +} diff --git a/vendor/honnef.co/go/tools/ssa/ssa.go b/vendor/honnef.co/go/tools/ssa/ssa.go new file mode 100644 index 000000000..aeddd65e5 --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/ssa.go @@ -0,0 +1,1745 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This package defines a high-level intermediate representation for +// Go programs using static single-assignment (SSA) form. + +import ( + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "sync" + + "golang.org/x/tools/go/types/typeutil" +) + +// A Program is a partial or complete Go program converted to SSA form. +type Program struct { + Fset *token.FileSet // position information for the files of this Program + imported map[string]*Package // all importable Packages, keyed by import path + packages map[*types.Package]*Package // all loaded Packages, keyed by object + mode BuilderMode // set of mode bits for SSA construction + MethodSets typeutil.MethodSetCache // cache of type-checker's method-sets + + methodsMu sync.Mutex // guards the following maps: + methodSets typeutil.Map // maps type to its concrete methodSet + runtimeTypes typeutil.Map // types for which rtypes are needed + canon typeutil.Map // type canonicalization map + bounds map[*types.Func]*Function // bounds for curried x.Method closures + thunks map[selectionKey]*Function // thunks for T.Method expressions +} + +// A Package is a single analyzed Go package containing Members for +// all package-level functions, variables, constants and types it +// declares. These may be accessed directly via Members, or via the +// type-specific accessor methods Func, Type, Var and Const. +// +// Members also contains entries for "init" (the synthetic package +// initializer) and "init#%d", the nth declared init function, +// and unspecified other things too. +// +type Package struct { + Prog *Program // the owning program + Pkg *types.Package // the corresponding go/types.Package + Members map[string]Member // all package members keyed by name (incl. init and init#%d) + values map[types.Object]Value // package members (incl. types and methods), keyed by object + init *Function // Func("init"); the package's init function + debug bool // include full debug info in this package + + // The following fields are set transiently, then cleared + // after building. + buildOnce sync.Once // ensures package building occurs once + ninit int32 // number of init functions + info *types.Info // package type information + files []*ast.File // package ASTs +} + +// A Member is a member of a Go package, implemented by *NamedConst, +// *Global, *Function, or *Type; they are created by package-level +// const, var, func and type declarations respectively. +// +type Member interface { + Name() string // declared name of the package member + String() string // package-qualified name of the package member + RelString(*types.Package) string // like String, but relative refs are unqualified + Object() types.Object // typechecker's object for this member, if any + Pos() token.Pos // position of member's declaration, if known + Type() types.Type // type of the package member + Token() token.Token // token.{VAR,FUNC,CONST,TYPE} + Package() *Package // the containing package +} + +// A Type is a Member of a Package representing a package-level named type. +type Type struct { + object *types.TypeName + pkg *Package +} + +// A NamedConst is a Member of a Package representing a package-level +// named constant. +// +// Pos() returns the position of the declaring ast.ValueSpec.Names[*] +// identifier. +// +// NB: a NamedConst is not a Value; it contains a constant Value, which +// it augments with the name and position of its 'const' declaration. +// +type NamedConst struct { + object *types.Const + Value *Const + pkg *Package +} + +// A Value is an SSA value that can be referenced by an instruction. +type Value interface { + // Name returns the name of this value, and determines how + // this Value appears when used as an operand of an + // Instruction. + // + // This is the same as the source name for Parameters, + // Builtins, Functions, FreeVars, Globals. + // For constants, it is a representation of the constant's value + // and type. For all other Values this is the name of the + // virtual register defined by the instruction. + // + // The name of an SSA Value is not semantically significant, + // and may not even be unique within a function. + Name() string + + // If this value is an Instruction, String returns its + // disassembled form; otherwise it returns unspecified + // human-readable information about the Value, such as its + // kind, name and type. + String() string + + // Type returns the type of this value. Many instructions + // (e.g. IndexAddr) change their behaviour depending on the + // types of their operands. + Type() types.Type + + // Parent returns the function to which this Value belongs. + // It returns nil for named Functions, Builtin, Const and Global. + Parent() *Function + + // Referrers returns the list of instructions that have this + // value as one of their operands; it may contain duplicates + // if an instruction has a repeated operand. + // + // Referrers actually returns a pointer through which the + // caller may perform mutations to the object's state. + // + // Referrers is currently only defined if Parent()!=nil, + // i.e. for the function-local values FreeVar, Parameter, + // Functions (iff anonymous) and all value-defining instructions. + // It returns nil for named Functions, Builtin, Const and Global. + // + // Instruction.Operands contains the inverse of this relation. + Referrers() *[]Instruction + + // Pos returns the location of the AST token most closely + // associated with the operation that gave rise to this value, + // or token.NoPos if it was not explicit in the source. + // + // For each ast.Node type, a particular token is designated as + // the closest location for the expression, e.g. the Lparen + // for an *ast.CallExpr. This permits a compact but + // approximate mapping from Values to source positions for use + // in diagnostic messages, for example. + // + // (Do not use this position to determine which Value + // corresponds to an ast.Expr; use Function.ValueForExpr + // instead. NB: it requires that the function was built with + // debug information.) + Pos() token.Pos +} + +// An Instruction is an SSA instruction that computes a new Value or +// has some effect. +// +// An Instruction that defines a value (e.g. BinOp) also implements +// the Value interface; an Instruction that only has an effect (e.g. Store) +// does not. +// +type Instruction interface { + // String returns the disassembled form of this value. + // + // Examples of Instructions that are Values: + // "x + y" (BinOp) + // "len([])" (Call) + // Note that the name of the Value is not printed. + // + // Examples of Instructions that are not Values: + // "return x" (Return) + // "*y = x" (Store) + // + // (The separation Value.Name() from Value.String() is useful + // for some analyses which distinguish the operation from the + // value it defines, e.g., 'y = local int' is both an allocation + // of memory 'local int' and a definition of a pointer y.) + String() string + + // Parent returns the function to which this instruction + // belongs. + Parent() *Function + + // Block returns the basic block to which this instruction + // belongs. + Block() *BasicBlock + + // setBlock sets the basic block to which this instruction belongs. + setBlock(*BasicBlock) + + // Operands returns the operands of this instruction: the + // set of Values it references. + // + // Specifically, it appends their addresses to rands, a + // user-provided slice, and returns the resulting slice, + // permitting avoidance of memory allocation. + // + // The operands are appended in undefined order, but the order + // is consistent for a given Instruction; the addresses are + // always non-nil but may point to a nil Value. Clients may + // store through the pointers, e.g. to effect a value + // renaming. + // + // Value.Referrers is a subset of the inverse of this + // relation. (Referrers are not tracked for all types of + // Values.) + Operands(rands []*Value) []*Value + + // Pos returns the location of the AST token most closely + // associated with the operation that gave rise to this + // instruction, or token.NoPos if it was not explicit in the + // source. + // + // For each ast.Node type, a particular token is designated as + // the closest location for the expression, e.g. the Go token + // for an *ast.GoStmt. This permits a compact but approximate + // mapping from Instructions to source positions for use in + // diagnostic messages, for example. + // + // (Do not use this position to determine which Instruction + // corresponds to an ast.Expr; see the notes for Value.Pos. + // This position may be used to determine which non-Value + // Instruction corresponds to some ast.Stmts, but not all: If + // and Jump instructions have no Pos(), for example.) + Pos() token.Pos +} + +// A Node is a node in the SSA value graph. Every concrete type that +// implements Node is also either a Value, an Instruction, or both. +// +// Node contains the methods common to Value and Instruction, plus the +// Operands and Referrers methods generalized to return nil for +// non-Instructions and non-Values, respectively. +// +// Node is provided to simplify SSA graph algorithms. Clients should +// use the more specific and informative Value or Instruction +// interfaces where appropriate. +// +type Node interface { + // Common methods: + String() string + Pos() token.Pos + Parent() *Function + + // Partial methods: + Operands(rands []*Value) []*Value // nil for non-Instructions + Referrers() *[]Instruction // nil for non-Values +} + +// Function represents the parameters, results, and code of a function +// or method. +// +// If Blocks is nil, this indicates an external function for which no +// Go source code is available. In this case, FreeVars and Locals +// are nil too. Clients performing whole-program analysis must +// handle external functions specially. +// +// Blocks contains the function's control-flow graph (CFG). +// Blocks[0] is the function entry point; block order is not otherwise +// semantically significant, though it may affect the readability of +// the disassembly. +// To iterate over the blocks in dominance order, use DomPreorder(). +// +// Recover is an optional second entry point to which control resumes +// after a recovered panic. The Recover block may contain only a return +// statement, preceded by a load of the function's named return +// parameters, if any. +// +// A nested function (Parent()!=nil) that refers to one or more +// lexically enclosing local variables ("free variables") has FreeVars. +// Such functions cannot be called directly but require a +// value created by MakeClosure which, via its Bindings, supplies +// values for these parameters. +// +// If the function is a method (Signature.Recv() != nil) then the first +// element of Params is the receiver parameter. +// +// A Go package may declare many functions called "init". +// For each one, Object().Name() returns "init" but Name() returns +// "init#1", etc, in declaration order. +// +// Pos() returns the declaring ast.FuncLit.Type.Func or the position +// of the ast.FuncDecl.Name, if the function was explicit in the +// source. Synthetic wrappers, for which Synthetic != "", may share +// the same position as the function they wrap. +// Syntax.Pos() always returns the position of the declaring "func" token. +// +// Type() returns the function's Signature. +// +type Function struct { + name string + object types.Object // a declared *types.Func or one of its wrappers + method *types.Selection // info about provenance of synthetic methods + Signature *types.Signature + pos token.Pos + + Synthetic string // provenance of synthetic function; "" for true source functions + syntax ast.Node // *ast.Func{Decl,Lit}; replaced with simple ast.Node after build, unless debug mode + parent *Function // enclosing function if anon; nil if global + Pkg *Package // enclosing package; nil for shared funcs (wrappers and error.Error) + Prog *Program // enclosing program + Params []*Parameter // function parameters; for methods, includes receiver + FreeVars []*FreeVar // free variables whose values must be supplied by closure + Locals []*Alloc // local variables of this function + Blocks []*BasicBlock // basic blocks of the function; nil => external + Recover *BasicBlock // optional; control transfers here after recovered panic + AnonFuncs []*Function // anonymous functions directly beneath this one + referrers []Instruction // referring instructions (iff Parent() != nil) + + // The following fields are set transiently during building, + // then cleared. + currentBlock *BasicBlock // where to emit code + objects map[types.Object]Value // addresses of local variables + namedResults []*Alloc // tuple of named results + targets *targets // linked stack of branch targets + lblocks map[*ast.Object]*lblock // labelled blocks +} + +// BasicBlock represents an SSA basic block. +// +// The final element of Instrs is always an explicit transfer of +// control (If, Jump, Return, or Panic). +// +// A block may contain no Instructions only if it is unreachable, +// i.e., Preds is nil. Empty blocks are typically pruned. +// +// BasicBlocks and their Preds/Succs relation form a (possibly cyclic) +// graph independent of the SSA Value graph: the control-flow graph or +// CFG. It is illegal for multiple edges to exist between the same +// pair of blocks. +// +// Each BasicBlock is also a node in the dominator tree of the CFG. +// The tree may be navigated using Idom()/Dominees() and queried using +// Dominates(). +// +// The order of Preds and Succs is significant (to Phi and If +// instructions, respectively). +// +type BasicBlock struct { + Index int // index of this block within Parent().Blocks + Comment string // optional label; no semantic significance + parent *Function // parent function + Instrs []Instruction // instructions in order + Preds, Succs []*BasicBlock // predecessors and successors + succs2 [2]*BasicBlock // initial space for Succs + dom domInfo // dominator tree info + gaps int // number of nil Instrs (transient) + rundefers int // number of rundefers (transient) +} + +// Pure values ---------------------------------------- + +// A FreeVar represents a free variable of the function to which it +// belongs. +// +// FreeVars are used to implement anonymous functions, whose free +// variables are lexically captured in a closure formed by +// MakeClosure. The value of such a free var is an Alloc or another +// FreeVar and is considered a potentially escaping heap address, with +// pointer type. +// +// FreeVars are also used to implement bound method closures. Such a +// free var represents the receiver value and may be of any type that +// has concrete methods. +// +// Pos() returns the position of the value that was captured, which +// belongs to an enclosing function. +// +type FreeVar struct { + name string + typ types.Type + pos token.Pos + parent *Function + referrers []Instruction + + // Transiently needed during building. + outer Value // the Value captured from the enclosing context. +} + +// A Parameter represents an input parameter of a function. +// +type Parameter struct { + name string + object types.Object // a *types.Var; nil for non-source locals + typ types.Type + pos token.Pos + parent *Function + referrers []Instruction +} + +// A Const represents the value of a constant expression. +// +// The underlying type of a constant may be any boolean, numeric, or +// string type. In addition, a Const may represent the nil value of +// any reference type---interface, map, channel, pointer, slice, or +// function---but not "untyped nil". +// +// All source-level constant expressions are represented by a Const +// of the same type and value. +// +// Value holds the exact value of the constant, independent of its +// Type(), using the same representation as package go/constant uses for +// constants, or nil for a typed nil value. +// +// Pos() returns token.NoPos. +// +// Example printed form: +// 42:int +// "hello":untyped string +// 3+4i:MyComplex +// +type Const struct { + typ types.Type + Value constant.Value +} + +// A Global is a named Value holding the address of a package-level +// variable. +// +// Pos() returns the position of the ast.ValueSpec.Names[*] +// identifier. +// +type Global struct { + name string + object types.Object // a *types.Var; may be nil for synthetics e.g. init$guard + typ types.Type + pos token.Pos + + Pkg *Package +} + +// A Builtin represents a specific use of a built-in function, e.g. len. +// +// Builtins are immutable values. Builtins do not have addresses. +// Builtins can only appear in CallCommon.Func. +// +// Name() indicates the function: one of the built-in functions from the +// Go spec (excluding "make" and "new") or one of these ssa-defined +// intrinsics: +// +// // wrapnilchk returns ptr if non-nil, panics otherwise. +// // (For use in indirection wrappers.) +// func ssa:wrapnilchk(ptr *T, recvType, methodName string) *T +// +// Object() returns a *types.Builtin for built-ins defined by the spec, +// nil for others. +// +// Type() returns a *types.Signature representing the effective +// signature of the built-in for this call. +// +type Builtin struct { + name string + sig *types.Signature +} + +// Value-defining instructions ---------------------------------------- + +// The Alloc instruction reserves space for a variable of the given type, +// zero-initializes it, and yields its address. +// +// Alloc values are always addresses, and have pointer types, so the +// type of the allocated variable is actually +// Type().Underlying().(*types.Pointer).Elem(). +// +// If Heap is false, Alloc allocates space in the function's +// activation record (frame); we refer to an Alloc(Heap=false) as a +// "local" alloc. Each local Alloc returns the same address each time +// it is executed within the same activation; the space is +// re-initialized to zero. +// +// If Heap is true, Alloc allocates space in the heap; we +// refer to an Alloc(Heap=true) as a "new" alloc. Each new Alloc +// returns a different address each time it is executed. +// +// When Alloc is applied to a channel, map or slice type, it returns +// the address of an uninitialized (nil) reference of that kind; store +// the result of MakeSlice, MakeMap or MakeChan in that location to +// instantiate these types. +// +// Pos() returns the ast.CompositeLit.Lbrace for a composite literal, +// or the ast.CallExpr.Rparen for a call to new() or for a call that +// allocates a varargs slice. +// +// Example printed form: +// t0 = local int +// t1 = new int +// +type Alloc struct { + register + Comment string + Heap bool + index int // dense numbering; for lifting +} + +var _ Instruction = (*Sigma)(nil) +var _ Value = (*Sigma)(nil) + +type Sigma struct { + register + X Value + Branch bool +} + +func (p *Sigma) Value() Value { + v := p.X + for { + sigma, ok := v.(*Sigma) + if !ok { + break + } + v = sigma + } + return v +} + +func (p *Sigma) String() string { + return fmt.Sprintf("σ [%s.%t]", relName(p.X, p), p.Branch) +} + +// The Phi instruction represents an SSA φ-node, which combines values +// that differ across incoming control-flow edges and yields a new +// value. Within a block, all φ-nodes must appear before all non-φ +// nodes. +// +// Pos() returns the position of the && or || for short-circuit +// control-flow joins, or that of the *Alloc for φ-nodes inserted +// during SSA renaming. +// +// Example printed form: +// t2 = phi [0: t0, 1: t1] +// +type Phi struct { + register + Comment string // a hint as to its purpose + Edges []Value // Edges[i] is value for Block().Preds[i] +} + +// The Call instruction represents a function or method call. +// +// The Call instruction yields the function result if there is exactly +// one. Otherwise it returns a tuple, the components of which are +// accessed via Extract. +// +// See CallCommon for generic function call documentation. +// +// Pos() returns the ast.CallExpr.Lparen, if explicit in the source. +// +// Example printed form: +// t2 = println(t0, t1) +// t4 = t3() +// t7 = invoke t5.Println(...t6) +// +type Call struct { + register + Call CallCommon +} + +// The BinOp instruction yields the result of binary operation X Op Y. +// +// Pos() returns the ast.BinaryExpr.OpPos, if explicit in the source. +// +// Example printed form: +// t1 = t0 + 1:int +// +type BinOp struct { + register + // One of: + // ADD SUB MUL QUO REM + - * / % + // AND OR XOR SHL SHR AND_NOT & | ^ << >> &^ + // EQL NEQ LSS LEQ GTR GEQ == != < <= < >= + Op token.Token + X, Y Value +} + +// The UnOp instruction yields the result of Op X. +// ARROW is channel receive. +// MUL is pointer indirection (load). +// XOR is bitwise complement. +// SUB is negation. +// NOT is logical negation. +// +// If CommaOk and Op=ARROW, the result is a 2-tuple of the value above +// and a boolean indicating the success of the receive. The +// components of the tuple are accessed using Extract. +// +// Pos() returns the ast.UnaryExpr.OpPos, if explicit in the source. +// For receive operations (ARROW) implicit in ranging over a channel, +// Pos() returns the ast.RangeStmt.For. +// For implicit memory loads (STAR), Pos() returns the position of the +// most closely associated source-level construct; the details are not +// specified. +// +// Example printed form: +// t0 = *x +// t2 = <-t1,ok +// +type UnOp struct { + register + Op token.Token // One of: NOT SUB ARROW MUL XOR ! - <- * ^ + X Value + CommaOk bool +} + +// The ChangeType instruction applies to X a value-preserving type +// change to Type(). +// +// Type changes are permitted: +// - between a named type and its underlying type. +// - between two named types of the same underlying type. +// - between (possibly named) pointers to identical base types. +// - from a bidirectional channel to a read- or write-channel, +// optionally adding/removing a name. +// +// This operation cannot fail dynamically. +// +// Pos() returns the ast.CallExpr.Lparen, if the instruction arose +// from an explicit conversion in the source. +// +// Example printed form: +// t1 = changetype *int <- IntPtr (t0) +// +type ChangeType struct { + register + X Value +} + +// The Convert instruction yields the conversion of value X to type +// Type(). One or both of those types is basic (but possibly named). +// +// A conversion may change the value and representation of its operand. +// Conversions are permitted: +// - between real numeric types. +// - between complex numeric types. +// - between string and []byte or []rune. +// - between pointers and unsafe.Pointer. +// - between unsafe.Pointer and uintptr. +// - from (Unicode) integer to (UTF-8) string. +// A conversion may imply a type name change also. +// +// This operation cannot fail dynamically. +// +// Conversions of untyped string/number/bool constants to a specific +// representation are eliminated during SSA construction. +// +// Pos() returns the ast.CallExpr.Lparen, if the instruction arose +// from an explicit conversion in the source. +// +// Example printed form: +// t1 = convert []byte <- string (t0) +// +type Convert struct { + register + X Value +} + +// ChangeInterface constructs a value of one interface type from a +// value of another interface type known to be assignable to it. +// This operation cannot fail. +// +// Pos() returns the ast.CallExpr.Lparen if the instruction arose from +// an explicit T(e) conversion; the ast.TypeAssertExpr.Lparen if the +// instruction arose from an explicit e.(T) operation; or token.NoPos +// otherwise. +// +// Example printed form: +// t1 = change interface interface{} <- I (t0) +// +type ChangeInterface struct { + register + X Value +} + +// MakeInterface constructs an instance of an interface type from a +// value of a concrete type. +// +// Use Program.MethodSets.MethodSet(X.Type()) to find the method-set +// of X, and Program.MethodValue(m) to find the implementation of a method. +// +// To construct the zero value of an interface type T, use: +// NewConst(constant.MakeNil(), T, pos) +// +// Pos() returns the ast.CallExpr.Lparen, if the instruction arose +// from an explicit conversion in the source. +// +// Example printed form: +// t1 = make interface{} <- int (42:int) +// t2 = make Stringer <- t0 +// +type MakeInterface struct { + register + X Value +} + +// The MakeClosure instruction yields a closure value whose code is +// Fn and whose free variables' values are supplied by Bindings. +// +// Type() returns a (possibly named) *types.Signature. +// +// Pos() returns the ast.FuncLit.Type.Func for a function literal +// closure or the ast.SelectorExpr.Sel for a bound method closure. +// +// Example printed form: +// t0 = make closure anon@1.2 [x y z] +// t1 = make closure bound$(main.I).add [i] +// +type MakeClosure struct { + register + Fn Value // always a *Function + Bindings []Value // values for each free variable in Fn.FreeVars +} + +// The MakeMap instruction creates a new hash-table-based map object +// and yields a value of kind map. +// +// Type() returns a (possibly named) *types.Map. +// +// Pos() returns the ast.CallExpr.Lparen, if created by make(map), or +// the ast.CompositeLit.Lbrack if created by a literal. +// +// Example printed form: +// t1 = make map[string]int t0 +// t1 = make StringIntMap t0 +// +type MakeMap struct { + register + Reserve Value // initial space reservation; nil => default +} + +// The MakeChan instruction creates a new channel object and yields a +// value of kind chan. +// +// Type() returns a (possibly named) *types.Chan. +// +// Pos() returns the ast.CallExpr.Lparen for the make(chan) that +// created it. +// +// Example printed form: +// t0 = make chan int 0 +// t0 = make IntChan 0 +// +type MakeChan struct { + register + Size Value // int; size of buffer; zero => synchronous. +} + +// The MakeSlice instruction yields a slice of length Len backed by a +// newly allocated array of length Cap. +// +// Both Len and Cap must be non-nil Values of integer type. +// +// (Alloc(types.Array) followed by Slice will not suffice because +// Alloc can only create arrays of constant length.) +// +// Type() returns a (possibly named) *types.Slice. +// +// Pos() returns the ast.CallExpr.Lparen for the make([]T) that +// created it. +// +// Example printed form: +// t1 = make []string 1:int t0 +// t1 = make StringSlice 1:int t0 +// +type MakeSlice struct { + register + Len Value + Cap Value +} + +// The Slice instruction yields a slice of an existing string, slice +// or *array X between optional integer bounds Low and High. +// +// Dynamically, this instruction panics if X evaluates to a nil *array +// pointer. +// +// Type() returns string if the type of X was string, otherwise a +// *types.Slice with the same element type as X. +// +// Pos() returns the ast.SliceExpr.Lbrack if created by a x[:] slice +// operation, the ast.CompositeLit.Lbrace if created by a literal, or +// NoPos if not explicit in the source (e.g. a variadic argument slice). +// +// Example printed form: +// t1 = slice t0[1:] +// +type Slice struct { + register + X Value // slice, string, or *array + Low, High, Max Value // each may be nil +} + +// The FieldAddr instruction yields the address of Field of *struct X. +// +// The field is identified by its index within the field list of the +// struct type of X. +// +// Dynamically, this instruction panics if X evaluates to a nil +// pointer. +// +// Type() returns a (possibly named) *types.Pointer. +// +// Pos() returns the position of the ast.SelectorExpr.Sel for the +// field, if explicit in the source. +// +// Example printed form: +// t1 = &t0.name [#1] +// +type FieldAddr struct { + register + X Value // *struct + Field int // field is X.Type().Underlying().(*types.Pointer).Elem().Underlying().(*types.Struct).Field(Field) +} + +// The Field instruction yields the Field of struct X. +// +// The field is identified by its index within the field list of the +// struct type of X; by using numeric indices we avoid ambiguity of +// package-local identifiers and permit compact representations. +// +// Pos() returns the position of the ast.SelectorExpr.Sel for the +// field, if explicit in the source. +// +// Example printed form: +// t1 = t0.name [#1] +// +type Field struct { + register + X Value // struct + Field int // index into X.Type().(*types.Struct).Fields +} + +// The IndexAddr instruction yields the address of the element at +// index Index of collection X. Index is an integer expression. +// +// The elements of maps and strings are not addressable; use Lookup or +// MapUpdate instead. +// +// Dynamically, this instruction panics if X evaluates to a nil *array +// pointer. +// +// Type() returns a (possibly named) *types.Pointer. +// +// Pos() returns the ast.IndexExpr.Lbrack for the index operation, if +// explicit in the source. +// +// Example printed form: +// t2 = &t0[t1] +// +type IndexAddr struct { + register + X Value // slice or *array, + Index Value // numeric index +} + +// The Index instruction yields element Index of array X. +// +// Pos() returns the ast.IndexExpr.Lbrack for the index operation, if +// explicit in the source. +// +// Example printed form: +// t2 = t0[t1] +// +type Index struct { + register + X Value // array + Index Value // integer index +} + +// The Lookup instruction yields element Index of collection X, a map +// or string. Index is an integer expression if X is a string or the +// appropriate key type if X is a map. +// +// If CommaOk, the result is a 2-tuple of the value above and a +// boolean indicating the result of a map membership test for the key. +// The components of the tuple are accessed using Extract. +// +// Pos() returns the ast.IndexExpr.Lbrack, if explicit in the source. +// +// Example printed form: +// t2 = t0[t1] +// t5 = t3[t4],ok +// +type Lookup struct { + register + X Value // string or map + Index Value // numeric or key-typed index + CommaOk bool // return a value,ok pair +} + +// SelectState is a helper for Select. +// It represents one goal state and its corresponding communication. +// +type SelectState struct { + Dir types.ChanDir // direction of case (SendOnly or RecvOnly) + Chan Value // channel to use (for send or receive) + Send Value // value to send (for send) + Pos token.Pos // position of token.ARROW + DebugNode ast.Node // ast.SendStmt or ast.UnaryExpr(<-) [debug mode] +} + +// The Select instruction tests whether (or blocks until) one +// of the specified sent or received states is entered. +// +// Let n be the number of States for which Dir==RECV and T_i (0<=i string iterator; false => map iterator. +} + +// The TypeAssert instruction tests whether interface value X has type +// AssertedType. +// +// If !CommaOk, on success it returns v, the result of the conversion +// (defined below); on failure it panics. +// +// If CommaOk: on success it returns a pair (v, true) where v is the +// result of the conversion; on failure it returns (z, false) where z +// is AssertedType's zero value. The components of the pair must be +// accessed using the Extract instruction. +// +// If AssertedType is a concrete type, TypeAssert checks whether the +// dynamic type in interface X is equal to it, and if so, the result +// of the conversion is a copy of the value in the interface. +// +// If AssertedType is an interface, TypeAssert checks whether the +// dynamic type of the interface is assignable to it, and if so, the +// result of the conversion is a copy of the interface value X. +// If AssertedType is a superinterface of X.Type(), the operation will +// fail iff the operand is nil. (Contrast with ChangeInterface, which +// performs no nil-check.) +// +// Type() reflects the actual type of the result, possibly a +// 2-types.Tuple; AssertedType is the asserted type. +// +// Pos() returns the ast.CallExpr.Lparen if the instruction arose from +// an explicit T(e) conversion; the ast.TypeAssertExpr.Lparen if the +// instruction arose from an explicit e.(T) operation; or the +// ast.CaseClause.Case if the instruction arose from a case of a +// type-switch statement. +// +// Example printed form: +// t1 = typeassert t0.(int) +// t3 = typeassert,ok t2.(T) +// +type TypeAssert struct { + register + X Value + AssertedType types.Type + CommaOk bool +} + +// The Extract instruction yields component Index of Tuple. +// +// This is used to access the results of instructions with multiple +// return values, such as Call, TypeAssert, Next, UnOp(ARROW) and +// IndexExpr(Map). +// +// Example printed form: +// t1 = extract t0 #1 +// +type Extract struct { + register + Tuple Value + Index int +} + +// Instructions executed for effect. They do not yield a value. -------------------- + +// The Jump instruction transfers control to the sole successor of its +// owning block. +// +// A Jump must be the last instruction of its containing BasicBlock. +// +// Pos() returns NoPos. +// +// Example printed form: +// jump done +// +type Jump struct { + anInstruction +} + +// The If instruction transfers control to one of the two successors +// of its owning block, depending on the boolean Cond: the first if +// true, the second if false. +// +// An If instruction must be the last instruction of its containing +// BasicBlock. +// +// Pos() returns NoPos. +// +// Example printed form: +// if t0 goto done else body +// +type If struct { + anInstruction + Cond Value +} + +// The Return instruction returns values and control back to the calling +// function. +// +// len(Results) is always equal to the number of results in the +// function's signature. +// +// If len(Results) > 1, Return returns a tuple value with the specified +// components which the caller must access using Extract instructions. +// +// There is no instruction to return a ready-made tuple like those +// returned by a "value,ok"-mode TypeAssert, Lookup or UnOp(ARROW) or +// a tail-call to a function with multiple result parameters. +// +// Return must be the last instruction of its containing BasicBlock. +// Such a block has no successors. +// +// Pos() returns the ast.ReturnStmt.Return, if explicit in the source. +// +// Example printed form: +// return +// return nil:I, 2:int +// +type Return struct { + anInstruction + Results []Value + pos token.Pos +} + +// The RunDefers instruction pops and invokes the entire stack of +// procedure calls pushed by Defer instructions in this function. +// +// It is legal to encounter multiple 'rundefers' instructions in a +// single control-flow path through a function; this is useful in +// the combined init() function, for example. +// +// Pos() returns NoPos. +// +// Example printed form: +// rundefers +// +type RunDefers struct { + anInstruction +} + +// The Panic instruction initiates a panic with value X. +// +// A Panic instruction must be the last instruction of its containing +// BasicBlock, which must have no successors. +// +// NB: 'go panic(x)' and 'defer panic(x)' do not use this instruction; +// they are treated as calls to a built-in function. +// +// Pos() returns the ast.CallExpr.Lparen if this panic was explicit +// in the source. +// +// Example printed form: +// panic t0 +// +type Panic struct { + anInstruction + X Value // an interface{} + pos token.Pos +} + +// The Go instruction creates a new goroutine and calls the specified +// function within it. +// +// See CallCommon for generic function call documentation. +// +// Pos() returns the ast.GoStmt.Go. +// +// Example printed form: +// go println(t0, t1) +// go t3() +// go invoke t5.Println(...t6) +// +type Go struct { + anInstruction + Call CallCommon + pos token.Pos +} + +// The Defer instruction pushes the specified call onto a stack of +// functions to be called by a RunDefers instruction or by a panic. +// +// See CallCommon for generic function call documentation. +// +// Pos() returns the ast.DeferStmt.Defer. +// +// Example printed form: +// defer println(t0, t1) +// defer t3() +// defer invoke t5.Println(...t6) +// +type Defer struct { + anInstruction + Call CallCommon + pos token.Pos +} + +// The Send instruction sends X on channel Chan. +// +// Pos() returns the ast.SendStmt.Arrow, if explicit in the source. +// +// Example printed form: +// send t0 <- t1 +// +type Send struct { + anInstruction + Chan, X Value + pos token.Pos +} + +// The Store instruction stores Val at address Addr. +// Stores can be of arbitrary types. +// +// Pos() returns the position of the source-level construct most closely +// associated with the memory store operation. +// Since implicit memory stores are numerous and varied and depend upon +// implementation choices, the details are not specified. +// +// Example printed form: +// *x = y +// +type Store struct { + anInstruction + Addr Value + Val Value + pos token.Pos +} + +// The BlankStore instruction is emitted for assignments to the blank +// identifier. +// +// BlankStore is a pseudo-instruction: it has no dynamic effect. +// +// Pos() returns NoPos. +// +// Example printed form: +// _ = t0 +// +type BlankStore struct { + anInstruction + Val Value +} + +// The MapUpdate instruction updates the association of Map[Key] to +// Value. +// +// Pos() returns the ast.KeyValueExpr.Colon or ast.IndexExpr.Lbrack, +// if explicit in the source. +// +// Example printed form: +// t0[t1] = t2 +// +type MapUpdate struct { + anInstruction + Map Value + Key Value + Value Value + pos token.Pos +} + +// A DebugRef instruction maps a source-level expression Expr to the +// SSA value X that represents the value (!IsAddr) or address (IsAddr) +// of that expression. +// +// DebugRef is a pseudo-instruction: it has no dynamic effect. +// +// Pos() returns Expr.Pos(), the start position of the source-level +// expression. This is not the same as the "designated" token as +// documented at Value.Pos(). e.g. CallExpr.Pos() does not return the +// position of the ("designated") Lparen token. +// +// If Expr is an *ast.Ident denoting a var or func, Object() returns +// the object; though this information can be obtained from the type +// checker, including it here greatly facilitates debugging. +// For non-Ident expressions, Object() returns nil. +// +// DebugRefs are generated only for functions built with debugging +// enabled; see Package.SetDebugMode() and the GlobalDebug builder +// mode flag. +// +// DebugRefs are not emitted for ast.Idents referring to constants or +// predeclared identifiers, since they are trivial and numerous. +// Nor are they emitted for ast.ParenExprs. +// +// (By representing these as instructions, rather than out-of-band, +// consistency is maintained during transformation passes by the +// ordinary SSA renaming machinery.) +// +// Example printed form: +// ; *ast.CallExpr @ 102:9 is t5 +// ; var x float64 @ 109:72 is x +// ; address of *ast.CompositeLit @ 216:10 is t0 +// +type DebugRef struct { + anInstruction + Expr ast.Expr // the referring expression (never *ast.ParenExpr) + object types.Object // the identity of the source var/func + IsAddr bool // Expr is addressable and X is the address it denotes + X Value // the value or address of Expr +} + +// Embeddable mix-ins and helpers for common parts of other structs. ----------- + +// register is a mix-in embedded by all SSA values that are also +// instructions, i.e. virtual registers, and provides a uniform +// implementation of most of the Value interface: Value.Name() is a +// numbered register (e.g. "t0"); the other methods are field accessors. +// +// Temporary names are automatically assigned to each register on +// completion of building a function in SSA form. +// +// Clients must not assume that the 'id' value (and the Name() derived +// from it) is unique within a function. As always in this API, +// semantics are determined only by identity; names exist only to +// facilitate debugging. +// +type register struct { + anInstruction + num int // "name" of virtual register, e.g. "t0". Not guaranteed unique. + typ types.Type // type of virtual register + pos token.Pos // position of source expression, or NoPos + referrers []Instruction +} + +// anInstruction is a mix-in embedded by all Instructions. +// It provides the implementations of the Block and setBlock methods. +type anInstruction struct { + block *BasicBlock // the basic block of this instruction +} + +// CallCommon is contained by Go, Defer and Call to hold the +// common parts of a function or method call. +// +// Each CallCommon exists in one of two modes, function call and +// interface method invocation, or "call" and "invoke" for short. +// +// 1. "call" mode: when Method is nil (!IsInvoke), a CallCommon +// represents an ordinary function call of the value in Value, +// which may be a *Builtin, a *Function or any other value of kind +// 'func'. +// +// Value may be one of: +// (a) a *Function, indicating a statically dispatched call +// to a package-level function, an anonymous function, or +// a method of a named type. +// (b) a *MakeClosure, indicating an immediately applied +// function literal with free variables. +// (c) a *Builtin, indicating a statically dispatched call +// to a built-in function. +// (d) any other value, indicating a dynamically dispatched +// function call. +// StaticCallee returns the identity of the callee in cases +// (a) and (b), nil otherwise. +// +// Args contains the arguments to the call. If Value is a method, +// Args[0] contains the receiver parameter. +// +// Example printed form: +// t2 = println(t0, t1) +// go t3() +// defer t5(...t6) +// +// 2. "invoke" mode: when Method is non-nil (IsInvoke), a CallCommon +// represents a dynamically dispatched call to an interface method. +// In this mode, Value is the interface value and Method is the +// interface's abstract method. Note: an abstract method may be +// shared by multiple interfaces due to embedding; Value.Type() +// provides the specific interface used for this call. +// +// Value is implicitly supplied to the concrete method implementation +// as the receiver parameter; in other words, Args[0] holds not the +// receiver but the first true argument. +// +// Example printed form: +// t1 = invoke t0.String() +// go invoke t3.Run(t2) +// defer invoke t4.Handle(...t5) +// +// For all calls to variadic functions (Signature().Variadic()), +// the last element of Args is a slice. +// +type CallCommon struct { + Value Value // receiver (invoke mode) or func value (call mode) + Method *types.Func // abstract method (invoke mode) + Args []Value // actual parameters (in static method call, includes receiver) + pos token.Pos // position of CallExpr.Lparen, iff explicit in source +} + +// IsInvoke returns true if this call has "invoke" (not "call") mode. +func (c *CallCommon) IsInvoke() bool { + return c.Method != nil +} + +func (c *CallCommon) Pos() token.Pos { return c.pos } + +// Signature returns the signature of the called function. +// +// For an "invoke"-mode call, the signature of the interface method is +// returned. +// +// In either "call" or "invoke" mode, if the callee is a method, its +// receiver is represented by sig.Recv, not sig.Params().At(0). +// +func (c *CallCommon) Signature() *types.Signature { + if c.Method != nil { + return c.Method.Type().(*types.Signature) + } + return c.Value.Type().Underlying().(*types.Signature) +} + +// StaticCallee returns the callee if this is a trivially static +// "call"-mode call to a function. +func (c *CallCommon) StaticCallee() *Function { + switch fn := c.Value.(type) { + case *Function: + return fn + case *MakeClosure: + return fn.Fn.(*Function) + } + return nil +} + +// Description returns a description of the mode of this call suitable +// for a user interface, e.g., "static method call". +func (c *CallCommon) Description() string { + switch fn := c.Value.(type) { + case *Builtin: + return "built-in function call" + case *MakeClosure: + return "static function closure call" + case *Function: + if fn.Signature.Recv() != nil { + return "static method call" + } + return "static function call" + } + if c.IsInvoke() { + return "dynamic method call" // ("invoke" mode) + } + return "dynamic function call" +} + +// The CallInstruction interface, implemented by *Go, *Defer and *Call, +// exposes the common parts of function-calling instructions, +// yet provides a way back to the Value defined by *Call alone. +// +type CallInstruction interface { + Instruction + Common() *CallCommon // returns the common parts of the call + Value() *Call // returns the result value of the call (*Call) or nil (*Go, *Defer) +} + +func (s *Call) Common() *CallCommon { return &s.Call } +func (s *Defer) Common() *CallCommon { return &s.Call } +func (s *Go) Common() *CallCommon { return &s.Call } + +func (s *Call) Value() *Call { return s } +func (s *Defer) Value() *Call { return nil } +func (s *Go) Value() *Call { return nil } + +func (v *Builtin) Type() types.Type { return v.sig } +func (v *Builtin) Name() string { return v.name } +func (*Builtin) Referrers() *[]Instruction { return nil } +func (v *Builtin) Pos() token.Pos { return token.NoPos } +func (v *Builtin) Object() types.Object { return types.Universe.Lookup(v.name) } +func (v *Builtin) Parent() *Function { return nil } + +func (v *FreeVar) Type() types.Type { return v.typ } +func (v *FreeVar) Name() string { return v.name } +func (v *FreeVar) Referrers() *[]Instruction { return &v.referrers } +func (v *FreeVar) Pos() token.Pos { return v.pos } +func (v *FreeVar) Parent() *Function { return v.parent } + +func (v *Global) Type() types.Type { return v.typ } +func (v *Global) Name() string { return v.name } +func (v *Global) Parent() *Function { return nil } +func (v *Global) Pos() token.Pos { return v.pos } +func (v *Global) Referrers() *[]Instruction { return nil } +func (v *Global) Token() token.Token { return token.VAR } +func (v *Global) Object() types.Object { return v.object } +func (v *Global) String() string { return v.RelString(nil) } +func (v *Global) Package() *Package { return v.Pkg } +func (v *Global) RelString(from *types.Package) string { return relString(v, from) } + +func (v *Function) Name() string { return v.name } +func (v *Function) Type() types.Type { return v.Signature } +func (v *Function) Pos() token.Pos { return v.pos } +func (v *Function) Token() token.Token { return token.FUNC } +func (v *Function) Object() types.Object { return v.object } +func (v *Function) String() string { return v.RelString(nil) } +func (v *Function) Package() *Package { return v.Pkg } +func (v *Function) Parent() *Function { return v.parent } +func (v *Function) Referrers() *[]Instruction { + if v.parent != nil { + return &v.referrers + } + return nil +} + +func (v *Parameter) Type() types.Type { return v.typ } +func (v *Parameter) Name() string { return v.name } +func (v *Parameter) Object() types.Object { return v.object } +func (v *Parameter) Referrers() *[]Instruction { return &v.referrers } +func (v *Parameter) Pos() token.Pos { return v.pos } +func (v *Parameter) Parent() *Function { return v.parent } + +func (v *Alloc) Type() types.Type { return v.typ } +func (v *Alloc) Referrers() *[]Instruction { return &v.referrers } +func (v *Alloc) Pos() token.Pos { return v.pos } + +func (v *register) Type() types.Type { return v.typ } +func (v *register) setType(typ types.Type) { v.typ = typ } +func (v *register) Name() string { return fmt.Sprintf("t%d", v.num) } +func (v *register) setNum(num int) { v.num = num } +func (v *register) Referrers() *[]Instruction { return &v.referrers } +func (v *register) Pos() token.Pos { return v.pos } +func (v *register) setPos(pos token.Pos) { v.pos = pos } + +func (v *anInstruction) Parent() *Function { return v.block.parent } +func (v *anInstruction) Block() *BasicBlock { return v.block } +func (v *anInstruction) setBlock(block *BasicBlock) { v.block = block } +func (v *anInstruction) Referrers() *[]Instruction { return nil } + +func (t *Type) Name() string { return t.object.Name() } +func (t *Type) Pos() token.Pos { return t.object.Pos() } +func (t *Type) Type() types.Type { return t.object.Type() } +func (t *Type) Token() token.Token { return token.TYPE } +func (t *Type) Object() types.Object { return t.object } +func (t *Type) String() string { return t.RelString(nil) } +func (t *Type) Package() *Package { return t.pkg } +func (t *Type) RelString(from *types.Package) string { return relString(t, from) } + +func (c *NamedConst) Name() string { return c.object.Name() } +func (c *NamedConst) Pos() token.Pos { return c.object.Pos() } +func (c *NamedConst) String() string { return c.RelString(nil) } +func (c *NamedConst) Type() types.Type { return c.object.Type() } +func (c *NamedConst) Token() token.Token { return token.CONST } +func (c *NamedConst) Object() types.Object { return c.object } +func (c *NamedConst) Package() *Package { return c.pkg } +func (c *NamedConst) RelString(from *types.Package) string { return relString(c, from) } + +// Func returns the package-level function of the specified name, +// or nil if not found. +// +func (p *Package) Func(name string) (f *Function) { + f, _ = p.Members[name].(*Function) + return +} + +// Var returns the package-level variable of the specified name, +// or nil if not found. +// +func (p *Package) Var(name string) (g *Global) { + g, _ = p.Members[name].(*Global) + return +} + +// Const returns the package-level constant of the specified name, +// or nil if not found. +// +func (p *Package) Const(name string) (c *NamedConst) { + c, _ = p.Members[name].(*NamedConst) + return +} + +// Type returns the package-level type of the specified name, +// or nil if not found. +// +func (p *Package) Type(name string) (t *Type) { + t, _ = p.Members[name].(*Type) + return +} + +func (v *Call) Pos() token.Pos { return v.Call.pos } +func (s *Defer) Pos() token.Pos { return s.pos } +func (s *Go) Pos() token.Pos { return s.pos } +func (s *MapUpdate) Pos() token.Pos { return s.pos } +func (s *Panic) Pos() token.Pos { return s.pos } +func (s *Return) Pos() token.Pos { return s.pos } +func (s *Send) Pos() token.Pos { return s.pos } +func (s *Store) Pos() token.Pos { return s.pos } +func (s *BlankStore) Pos() token.Pos { return token.NoPos } +func (s *If) Pos() token.Pos { return token.NoPos } +func (s *Jump) Pos() token.Pos { return token.NoPos } +func (s *RunDefers) Pos() token.Pos { return token.NoPos } +func (s *DebugRef) Pos() token.Pos { return s.Expr.Pos() } + +// Operands. + +func (v *Alloc) Operands(rands []*Value) []*Value { + return rands +} + +func (v *BinOp) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Y) +} + +func (c *CallCommon) Operands(rands []*Value) []*Value { + rands = append(rands, &c.Value) + for i := range c.Args { + rands = append(rands, &c.Args[i]) + } + return rands +} + +func (s *Go) Operands(rands []*Value) []*Value { + return s.Call.Operands(rands) +} + +func (s *Call) Operands(rands []*Value) []*Value { + return s.Call.Operands(rands) +} + +func (s *Defer) Operands(rands []*Value) []*Value { + return s.Call.Operands(rands) +} + +func (v *ChangeInterface) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *ChangeType) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *Convert) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (s *DebugRef) Operands(rands []*Value) []*Value { + return append(rands, &s.X) +} + +func (v *Extract) Operands(rands []*Value) []*Value { + return append(rands, &v.Tuple) +} + +func (v *Field) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *FieldAddr) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (s *If) Operands(rands []*Value) []*Value { + return append(rands, &s.Cond) +} + +func (v *Index) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Index) +} + +func (v *IndexAddr) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Index) +} + +func (*Jump) Operands(rands []*Value) []*Value { + return rands +} + +func (v *Lookup) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Index) +} + +func (v *MakeChan) Operands(rands []*Value) []*Value { + return append(rands, &v.Size) +} + +func (v *MakeClosure) Operands(rands []*Value) []*Value { + rands = append(rands, &v.Fn) + for i := range v.Bindings { + rands = append(rands, &v.Bindings[i]) + } + return rands +} + +func (v *MakeInterface) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *MakeMap) Operands(rands []*Value) []*Value { + return append(rands, &v.Reserve) +} + +func (v *MakeSlice) Operands(rands []*Value) []*Value { + return append(rands, &v.Len, &v.Cap) +} + +func (v *MapUpdate) Operands(rands []*Value) []*Value { + return append(rands, &v.Map, &v.Key, &v.Value) +} + +func (v *Next) Operands(rands []*Value) []*Value { + return append(rands, &v.Iter) +} + +func (s *Panic) Operands(rands []*Value) []*Value { + return append(rands, &s.X) +} + +func (v *Sigma) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *Phi) Operands(rands []*Value) []*Value { + for i := range v.Edges { + rands = append(rands, &v.Edges[i]) + } + return rands +} + +func (v *Range) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (s *Return) Operands(rands []*Value) []*Value { + for i := range s.Results { + rands = append(rands, &s.Results[i]) + } + return rands +} + +func (*RunDefers) Operands(rands []*Value) []*Value { + return rands +} + +func (v *Select) Operands(rands []*Value) []*Value { + for i := range v.States { + rands = append(rands, &v.States[i].Chan, &v.States[i].Send) + } + return rands +} + +func (s *Send) Operands(rands []*Value) []*Value { + return append(rands, &s.Chan, &s.X) +} + +func (v *Slice) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Low, &v.High, &v.Max) +} + +func (s *Store) Operands(rands []*Value) []*Value { + return append(rands, &s.Addr, &s.Val) +} + +func (s *BlankStore) Operands(rands []*Value) []*Value { + return append(rands, &s.Val) +} + +func (v *TypeAssert) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *UnOp) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +// Non-Instruction Values: +func (v *Builtin) Operands(rands []*Value) []*Value { return rands } +func (v *FreeVar) Operands(rands []*Value) []*Value { return rands } +func (v *Const) Operands(rands []*Value) []*Value { return rands } +func (v *Function) Operands(rands []*Value) []*Value { return rands } +func (v *Global) Operands(rands []*Value) []*Value { return rands } +func (v *Parameter) Operands(rands []*Value) []*Value { return rands } diff --git a/vendor/honnef.co/go/tools/ssa/staticcheck.conf b/vendor/honnef.co/go/tools/ssa/staticcheck.conf new file mode 100644 index 000000000..d7b38bc35 --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/staticcheck.conf @@ -0,0 +1,3 @@ +# ssa/... is mostly imported from upstream and we don't want to +# deviate from it too much, hence disabling SA1019 +checks = ["inherit", "-SA1019"] diff --git a/vendor/honnef.co/go/tools/ssa/testmain.go b/vendor/honnef.co/go/tools/ssa/testmain.go new file mode 100644 index 000000000..8ec15ba50 --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/testmain.go @@ -0,0 +1,271 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// CreateTestMainPackage synthesizes a main package that runs all the +// tests of the supplied packages. +// It is closely coupled to $GOROOT/src/cmd/go/test.go and $GOROOT/src/testing. +// +// TODO(adonovan): throws this all away now that x/tools/go/packages +// provides access to the actual synthetic test main files. + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/types" + "log" + "os" + "strings" + "text/template" +) + +// FindTests returns the Test, Benchmark, and Example functions +// (as defined by "go test") defined in the specified package, +// and its TestMain function, if any. +// +// Deprecated: use x/tools/go/packages to access synthetic testmain packages. +func FindTests(pkg *Package) (tests, benchmarks, examples []*Function, main *Function) { + prog := pkg.Prog + + // The first two of these may be nil: if the program doesn't import "testing", + // it can't contain any tests, but it may yet contain Examples. + var testSig *types.Signature // func(*testing.T) + var benchmarkSig *types.Signature // func(*testing.B) + var exampleSig = types.NewSignature(nil, nil, nil, false) // func() + + // Obtain the types from the parameters of testing.MainStart. + if testingPkg := prog.ImportedPackage("testing"); testingPkg != nil { + mainStart := testingPkg.Func("MainStart") + params := mainStart.Signature.Params() + testSig = funcField(params.At(1).Type()) + benchmarkSig = funcField(params.At(2).Type()) + + // Does the package define this function? + // func TestMain(*testing.M) + if f := pkg.Func("TestMain"); f != nil { + sig := f.Type().(*types.Signature) + starM := mainStart.Signature.Results().At(0).Type() // *testing.M + if sig.Results().Len() == 0 && + sig.Params().Len() == 1 && + types.Identical(sig.Params().At(0).Type(), starM) { + main = f + } + } + } + + // TODO(adonovan): use a stable order, e.g. lexical. + for _, mem := range pkg.Members { + if f, ok := mem.(*Function); ok && + ast.IsExported(f.Name()) && + strings.HasSuffix(prog.Fset.Position(f.Pos()).Filename, "_test.go") { + + switch { + case testSig != nil && isTestSig(f, "Test", testSig): + tests = append(tests, f) + case benchmarkSig != nil && isTestSig(f, "Benchmark", benchmarkSig): + benchmarks = append(benchmarks, f) + case isTestSig(f, "Example", exampleSig): + examples = append(examples, f) + default: + continue + } + } + } + return +} + +// Like isTest, but checks the signature too. +func isTestSig(f *Function, prefix string, sig *types.Signature) bool { + return isTest(f.Name(), prefix) && types.Identical(f.Signature, sig) +} + +// Given the type of one of the three slice parameters of testing.Main, +// returns the function type. +func funcField(slice types.Type) *types.Signature { + return slice.(*types.Slice).Elem().Underlying().(*types.Struct).Field(1).Type().(*types.Signature) +} + +// isTest tells whether name looks like a test (or benchmark, according to prefix). +// It is a Test (say) if there is a character after Test that is not a lower-case letter. +// We don't want TesticularCancer. +// Plundered from $GOROOT/src/cmd/go/test.go +func isTest(name, prefix string) bool { + if !strings.HasPrefix(name, prefix) { + return false + } + if len(name) == len(prefix) { // "Test" is ok + return true + } + return ast.IsExported(name[len(prefix):]) +} + +// CreateTestMainPackage creates and returns a synthetic "testmain" +// package for the specified package if it defines tests, benchmarks or +// executable examples, or nil otherwise. The new package is named +// "main" and provides a function named "main" that runs the tests, +// similar to the one that would be created by the 'go test' tool. +// +// Subsequent calls to prog.AllPackages include the new package. +// The package pkg must belong to the program prog. +// +// Deprecated: use x/tools/go/packages to access synthetic testmain packages. +func (prog *Program) CreateTestMainPackage(pkg *Package) *Package { + if pkg.Prog != prog { + log.Fatal("Package does not belong to Program") + } + + // Template data + var data struct { + Pkg *Package + Tests, Benchmarks, Examples []*Function + Main *Function + Go18 bool + } + data.Pkg = pkg + + // Enumerate tests. + data.Tests, data.Benchmarks, data.Examples, data.Main = FindTests(pkg) + if data.Main == nil && + data.Tests == nil && data.Benchmarks == nil && data.Examples == nil { + return nil + } + + // Synthesize source for testmain package. + path := pkg.Pkg.Path() + "$testmain" + tmpl := testmainTmpl + if testingPkg := prog.ImportedPackage("testing"); testingPkg != nil { + // In Go 1.8, testing.MainStart's first argument is an interface, not a func. + data.Go18 = types.IsInterface(testingPkg.Func("MainStart").Signature.Params().At(0).Type()) + } else { + // The program does not import "testing", but FindTests + // returned non-nil, which must mean there were Examples + // but no Test, Benchmark, or TestMain functions. + + // We'll simply call them from testmain.main; this will + // ensure they don't panic, but will not check any + // "Output:" comments. + // (We should not execute an Example that has no + // "Output:" comment, but it's impossible to tell here.) + tmpl = examplesOnlyTmpl + } + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + log.Fatalf("internal error expanding template for %s: %v", path, err) + } + if false { // debugging + fmt.Fprintln(os.Stderr, buf.String()) + } + + // Parse and type-check the testmain package. + f, err := parser.ParseFile(prog.Fset, path+".go", &buf, parser.Mode(0)) + if err != nil { + log.Fatalf("internal error parsing %s: %v", path, err) + } + conf := types.Config{ + DisableUnusedImportCheck: true, + Importer: importer{pkg}, + } + files := []*ast.File{f} + info := &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + testmainPkg, err := conf.Check(path, prog.Fset, files, info) + if err != nil { + log.Fatalf("internal error type-checking %s: %v", path, err) + } + + // Create and build SSA code. + testmain := prog.CreatePackage(testmainPkg, files, info, false) + testmain.SetDebugMode(false) + testmain.Build() + testmain.Func("main").Synthetic = "test main function" + testmain.Func("init").Synthetic = "package initializer" + return testmain +} + +// An implementation of types.Importer for an already loaded SSA program. +type importer struct { + pkg *Package // package under test; may be non-importable +} + +func (imp importer) Import(path string) (*types.Package, error) { + if p := imp.pkg.Prog.ImportedPackage(path); p != nil { + return p.Pkg, nil + } + if path == imp.pkg.Pkg.Path() { + return imp.pkg.Pkg, nil + } + return nil, fmt.Errorf("not found") // can't happen +} + +var testmainTmpl = template.Must(template.New("testmain").Parse(` +package main + +import "io" +import "os" +import "testing" +import p {{printf "%q" .Pkg.Pkg.Path}} + +{{if .Go18}} +type deps struct{} + +func (deps) ImportPath() string { return "" } +func (deps) MatchString(pat, str string) (bool, error) { return true, nil } +func (deps) StartCPUProfile(io.Writer) error { return nil } +func (deps) StartTestLog(io.Writer) {} +func (deps) StopCPUProfile() {} +func (deps) StopTestLog() error { return nil } +func (deps) WriteHeapProfile(io.Writer) error { return nil } +func (deps) WriteProfileTo(string, io.Writer, int) error { return nil } + +var match deps +{{else}} +func match(_, _ string) (bool, error) { return true, nil } +{{end}} + +func main() { + tests := []testing.InternalTest{ +{{range .Tests}} + { {{printf "%q" .Name}}, p.{{.Name}} }, +{{end}} + } + benchmarks := []testing.InternalBenchmark{ +{{range .Benchmarks}} + { {{printf "%q" .Name}}, p.{{.Name}} }, +{{end}} + } + examples := []testing.InternalExample{ +{{range .Examples}} + {Name: {{printf "%q" .Name}}, F: p.{{.Name}}}, +{{end}} + } + m := testing.MainStart(match, tests, benchmarks, examples) +{{with .Main}} + p.{{.Name}}(m) +{{else}} + os.Exit(m.Run()) +{{end}} +} + +`)) + +var examplesOnlyTmpl = template.Must(template.New("examples").Parse(` +package main + +import p {{printf "%q" .Pkg.Pkg.Path}} + +func main() { +{{range .Examples}} + p.{{.Name}}() +{{end}} +} +`)) diff --git a/vendor/honnef.co/go/tools/ssa/util.go b/vendor/honnef.co/go/tools/ssa/util.go new file mode 100644 index 000000000..ddb118460 --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/util.go @@ -0,0 +1,119 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines a number of miscellaneous utility functions. + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "io" + "os" + + "golang.org/x/tools/go/ast/astutil" +) + +//// AST utilities + +func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) } + +// isBlankIdent returns true iff e is an Ident with name "_". +// They have no associated types.Object, and thus no type. +// +func isBlankIdent(e ast.Expr) bool { + id, ok := e.(*ast.Ident) + return ok && id.Name == "_" +} + +//// Type utilities. Some of these belong in go/types. + +// isPointer returns true for types whose underlying type is a pointer. +func isPointer(typ types.Type) bool { + _, ok := typ.Underlying().(*types.Pointer) + return ok +} + +func isInterface(T types.Type) bool { return types.IsInterface(T) } + +// deref returns a pointer's element type; otherwise it returns typ. +func deref(typ types.Type) types.Type { + if p, ok := typ.Underlying().(*types.Pointer); ok { + return p.Elem() + } + return typ +} + +// recvType returns the receiver type of method obj. +func recvType(obj *types.Func) types.Type { + return obj.Type().(*types.Signature).Recv().Type() +} + +// DefaultType returns the default "typed" type for an "untyped" type; +// it returns the incoming type for all other types. The default type +// for untyped nil is untyped nil. +// +// Exported to ssa/interp. +// +// TODO(adonovan): use go/types.DefaultType after 1.8. +// +func DefaultType(typ types.Type) types.Type { + if t, ok := typ.(*types.Basic); ok { + k := t.Kind() + switch k { + case types.UntypedBool: + k = types.Bool + case types.UntypedInt: + k = types.Int + case types.UntypedRune: + k = types.Rune + case types.UntypedFloat: + k = types.Float64 + case types.UntypedComplex: + k = types.Complex128 + case types.UntypedString: + k = types.String + } + typ = types.Typ[k] + } + return typ +} + +// logStack prints the formatted "start" message to stderr and +// returns a closure that prints the corresponding "end" message. +// Call using 'defer logStack(...)()' to show builder stack on panic. +// Don't forget trailing parens! +// +func logStack(format string, args ...interface{}) func() { + msg := fmt.Sprintf(format, args...) + io.WriteString(os.Stderr, msg) + io.WriteString(os.Stderr, "\n") + return func() { + io.WriteString(os.Stderr, msg) + io.WriteString(os.Stderr, " end\n") + } +} + +// newVar creates a 'var' for use in a types.Tuple. +func newVar(name string, typ types.Type) *types.Var { + return types.NewParam(token.NoPos, nil, name, typ) +} + +// anonVar creates an anonymous 'var' for use in a types.Tuple. +func anonVar(typ types.Type) *types.Var { + return newVar("", typ) +} + +var lenResults = types.NewTuple(anonVar(tInt)) + +// makeLen returns the len builtin specialized to type func(T)int. +func makeLen(T types.Type) *Builtin { + lenParams := types.NewTuple(anonVar(T)) + return &Builtin{ + name: "len", + sig: types.NewSignature(nil, lenParams, lenResults, false), + } +} diff --git a/vendor/honnef.co/go/tools/ssa/wrappers.go b/vendor/honnef.co/go/tools/ssa/wrappers.go new file mode 100644 index 000000000..a4ae71d8c --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/wrappers.go @@ -0,0 +1,290 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines synthesis of Functions that delegate to declared +// methods; they come in three kinds: +// +// (1) wrappers: methods that wrap declared methods, performing +// implicit pointer indirections and embedded field selections. +// +// (2) thunks: funcs that wrap declared methods. Like wrappers, +// thunks perform indirections and field selections. The thunk's +// first parameter is used as the receiver for the method call. +// +// (3) bounds: funcs that wrap declared methods. The bound's sole +// free variable, supplied by a closure, is used as the receiver +// for the method call. No indirections or field selections are +// performed since they can be done before the call. + +import ( + "fmt" + + "go/types" +) + +// -- wrappers ----------------------------------------------------------- + +// makeWrapper returns a synthetic method that delegates to the +// declared method denoted by meth.Obj(), first performing any +// necessary pointer indirections or field selections implied by meth. +// +// The resulting method's receiver type is meth.Recv(). +// +// This function is versatile but quite subtle! Consider the +// following axes of variation when making changes: +// - optional receiver indirection +// - optional implicit field selections +// - meth.Obj() may denote a concrete or an interface method +// - the result may be a thunk or a wrapper. +// +// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) +// +func makeWrapper(prog *Program, sel *types.Selection) *Function { + obj := sel.Obj().(*types.Func) // the declared function + sig := sel.Type().(*types.Signature) // type of this wrapper + + var recv *types.Var // wrapper's receiver or thunk's params[0] + name := obj.Name() + var description string + var start int // first regular param + if sel.Kind() == types.MethodExpr { + name += "$thunk" + description = "thunk" + recv = sig.Params().At(0) + start = 1 + } else { + description = "wrapper" + recv = sig.Recv() + } + + description = fmt.Sprintf("%s for %s", description, sel.Obj()) + if prog.mode&LogSource != 0 { + defer logStack("make %s to (%s)", description, recv.Type())() + } + fn := &Function{ + name: name, + method: sel, + object: obj, + Signature: sig, + Synthetic: description, + Prog: prog, + pos: obj.Pos(), + } + fn.startBody() + fn.addSpilledParam(recv) + createParams(fn, start) + + indices := sel.Index() + + var v Value = fn.Locals[0] // spilled receiver + if isPointer(sel.Recv()) { + v = emitLoad(fn, v) + + // For simple indirection wrappers, perform an informative nil-check: + // "value method (T).f called using nil *T pointer" + if len(indices) == 1 && !isPointer(recvType(obj)) { + var c Call + c.Call.Value = &Builtin{ + name: "ssa:wrapnilchk", + sig: types.NewSignature(nil, + types.NewTuple(anonVar(sel.Recv()), anonVar(tString), anonVar(tString)), + types.NewTuple(anonVar(sel.Recv())), false), + } + c.Call.Args = []Value{ + v, + stringConst(deref(sel.Recv()).String()), + stringConst(sel.Obj().Name()), + } + c.setType(v.Type()) + v = fn.emit(&c) + } + } + + // Invariant: v is a pointer, either + // value of *A receiver param, or + // address of A spilled receiver. + + // We use pointer arithmetic (FieldAddr possibly followed by + // Load) in preference to value extraction (Field possibly + // preceded by Load). + + v = emitImplicitSelections(fn, v, indices[:len(indices)-1]) + + // Invariant: v is a pointer, either + // value of implicit *C field, or + // address of implicit C field. + + var c Call + if r := recvType(obj); !isInterface(r) { // concrete method + if !isPointer(r) { + v = emitLoad(fn, v) + } + c.Call.Value = prog.declaredFunc(obj) + c.Call.Args = append(c.Call.Args, v) + } else { + c.Call.Method = obj + c.Call.Value = emitLoad(fn, v) + } + for _, arg := range fn.Params[1:] { + c.Call.Args = append(c.Call.Args, arg) + } + emitTailCall(fn, &c) + fn.finishBody() + return fn +} + +// createParams creates parameters for wrapper method fn based on its +// Signature.Params, which do not include the receiver. +// start is the index of the first regular parameter to use. +// +func createParams(fn *Function, start int) { + tparams := fn.Signature.Params() + for i, n := start, tparams.Len(); i < n; i++ { + fn.addParamObj(tparams.At(i)) + } +} + +// -- bounds ----------------------------------------------------------- + +// makeBound returns a bound method wrapper (or "bound"), a synthetic +// function that delegates to a concrete or interface method denoted +// by obj. The resulting function has no receiver, but has one free +// variable which will be used as the method's receiver in the +// tail-call. +// +// Use MakeClosure with such a wrapper to construct a bound method +// closure. e.g.: +// +// type T int or: type T interface { meth() } +// func (t T) meth() +// var t T +// f := t.meth +// f() // calls t.meth() +// +// f is a closure of a synthetic wrapper defined as if by: +// +// f := func() { return t.meth() } +// +// Unlike makeWrapper, makeBound need perform no indirection or field +// selections because that can be done before the closure is +// constructed. +// +// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu) +// +func makeBound(prog *Program, obj *types.Func) *Function { + prog.methodsMu.Lock() + defer prog.methodsMu.Unlock() + fn, ok := prog.bounds[obj] + if !ok { + description := fmt.Sprintf("bound method wrapper for %s", obj) + if prog.mode&LogSource != 0 { + defer logStack("%s", description)() + } + fn = &Function{ + name: obj.Name() + "$bound", + object: obj, + Signature: changeRecv(obj.Type().(*types.Signature), nil), // drop receiver + Synthetic: description, + Prog: prog, + pos: obj.Pos(), + } + + fv := &FreeVar{name: "recv", typ: recvType(obj), parent: fn} + fn.FreeVars = []*FreeVar{fv} + fn.startBody() + createParams(fn, 0) + var c Call + + if !isInterface(recvType(obj)) { // concrete + c.Call.Value = prog.declaredFunc(obj) + c.Call.Args = []Value{fv} + } else { + c.Call.Value = fv + c.Call.Method = obj + } + for _, arg := range fn.Params { + c.Call.Args = append(c.Call.Args, arg) + } + emitTailCall(fn, &c) + fn.finishBody() + + prog.bounds[obj] = fn + } + return fn +} + +// -- thunks ----------------------------------------------------------- + +// makeThunk returns a thunk, a synthetic function that delegates to a +// concrete or interface method denoted by sel.Obj(). The resulting +// function has no receiver, but has an additional (first) regular +// parameter. +// +// Precondition: sel.Kind() == types.MethodExpr. +// +// type T int or: type T interface { meth() } +// func (t T) meth() +// f := T.meth +// var t T +// f(t) // calls t.meth() +// +// f is a synthetic wrapper defined as if by: +// +// f := func(t T) { return t.meth() } +// +// TODO(adonovan): opt: currently the stub is created even when used +// directly in a function call: C.f(i, 0). This is less efficient +// than inlining the stub. +// +// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu) +// +func makeThunk(prog *Program, sel *types.Selection) *Function { + if sel.Kind() != types.MethodExpr { + panic(sel) + } + + key := selectionKey{ + kind: sel.Kind(), + recv: sel.Recv(), + obj: sel.Obj(), + index: fmt.Sprint(sel.Index()), + indirect: sel.Indirect(), + } + + prog.methodsMu.Lock() + defer prog.methodsMu.Unlock() + + // Canonicalize key.recv to avoid constructing duplicate thunks. + canonRecv, ok := prog.canon.At(key.recv).(types.Type) + if !ok { + canonRecv = key.recv + prog.canon.Set(key.recv, canonRecv) + } + key.recv = canonRecv + + fn, ok := prog.thunks[key] + if !ok { + fn = makeWrapper(prog, sel) + if fn.Signature.Recv() != nil { + panic(fn) // unexpected receiver + } + prog.thunks[key] = fn + } + return fn +} + +func changeRecv(s *types.Signature, recv *types.Var) *types.Signature { + return types.NewSignature(recv, s.Params(), s.Results(), s.Variadic()) +} + +// selectionKey is like types.Selection but a usable map key. +type selectionKey struct { + kind types.SelectionKind + recv types.Type // canonicalized via Program.canon + obj types.Object + index string + indirect bool +} diff --git a/vendor/honnef.co/go/tools/ssa/write.go b/vendor/honnef.co/go/tools/ssa/write.go new file mode 100644 index 000000000..89761a18a --- /dev/null +++ b/vendor/honnef.co/go/tools/ssa/write.go @@ -0,0 +1,5 @@ +package ssa + +func NewJump(parent *BasicBlock) *Jump { + return &Jump{anInstruction{parent}} +} diff --git a/vendor/honnef.co/go/tools/ssautil/ssautil.go b/vendor/honnef.co/go/tools/ssautil/ssautil.go new file mode 100644 index 000000000..72c3c919d --- /dev/null +++ b/vendor/honnef.co/go/tools/ssautil/ssautil.go @@ -0,0 +1,58 @@ +package ssautil + +import ( + "honnef.co/go/tools/ssa" +) + +func Reachable(from, to *ssa.BasicBlock) bool { + if from == to { + return true + } + if from.Dominates(to) { + return true + } + + found := false + Walk(from, func(b *ssa.BasicBlock) bool { + if b == to { + found = true + return false + } + return true + }) + return found +} + +func Walk(b *ssa.BasicBlock, fn func(*ssa.BasicBlock) bool) { + seen := map[*ssa.BasicBlock]bool{} + wl := []*ssa.BasicBlock{b} + for len(wl) > 0 { + b := wl[len(wl)-1] + wl = wl[:len(wl)-1] + if seen[b] { + continue + } + seen[b] = true + if !fn(b) { + continue + } + wl = append(wl, b.Succs...) + } +} + +func Vararg(x *ssa.Slice) ([]ssa.Value, bool) { + var out []ssa.Value + slice, ok := x.X.(*ssa.Alloc) + if !ok || slice.Comment != "varargs" { + return nil, false + } + for _, ref := range *slice.Referrers() { + idx, ok := ref.(*ssa.IndexAddr) + if !ok { + continue + } + v := (*idx.Referrers())[0].(*ssa.Store).Val + out = append(out, v) + } + return out, true +} diff --git a/vendor/honnef.co/go/tools/staticcheck/CONTRIBUTING.md b/vendor/honnef.co/go/tools/staticcheck/CONTRIBUTING.md new file mode 100644 index 000000000..b12c7afc7 --- /dev/null +++ b/vendor/honnef.co/go/tools/staticcheck/CONTRIBUTING.md @@ -0,0 +1,15 @@ +# Contributing to staticcheck + +## Before filing an issue: + +### Are you having trouble building staticcheck? + +Check you have the latest version of its dependencies. Run +``` +go get -u honnef.co/go/tools/staticcheck +``` +If you still have problems, consider searching for existing issues before filing a new issue. + +## Before sending a pull request: + +Have you understood the purpose of staticcheck? Make sure to carefully read `README`. diff --git a/vendor/honnef.co/go/tools/staticcheck/analysis.go b/vendor/honnef.co/go/tools/staticcheck/analysis.go new file mode 100644 index 000000000..442aebe5a --- /dev/null +++ b/vendor/honnef.co/go/tools/staticcheck/analysis.go @@ -0,0 +1,525 @@ +package staticcheck + +import ( + "flag" + + "honnef.co/go/tools/facts" + "honnef.co/go/tools/internal/passes/buildssa" + "honnef.co/go/tools/lint/lintutil" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" +) + +func newFlagSet() flag.FlagSet { + fs := flag.NewFlagSet("", flag.PanicOnError) + fs.Var(lintutil.NewVersionFlag(), "go", "Target Go version") + return *fs +} + +var Analyzers = map[string]*analysis.Analyzer{ + "SA1000": { + Name: "SA1000", + Run: callChecker(checkRegexpRules), + Doc: Docs["SA1000"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer}, + Flags: newFlagSet(), + }, + "SA1001": { + Name: "SA1001", + Run: CheckTemplate, + Doc: Docs["SA1001"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA1002": { + Name: "SA1002", + Run: callChecker(checkTimeParseRules), + Doc: Docs["SA1002"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer}, + Flags: newFlagSet(), + }, + "SA1003": { + Name: "SA1003", + Run: callChecker(checkEncodingBinaryRules), + Doc: Docs["SA1003"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer}, + Flags: newFlagSet(), + }, + "SA1004": { + Name: "SA1004", + Run: CheckTimeSleepConstant, + Doc: Docs["SA1004"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA1005": { + Name: "SA1005", + Run: CheckExec, + Doc: Docs["SA1005"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA1006": { + Name: "SA1006", + Run: CheckUnsafePrintf, + Doc: Docs["SA1006"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA1007": { + Name: "SA1007", + Run: callChecker(checkURLsRules), + Doc: Docs["SA1007"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer}, + Flags: newFlagSet(), + }, + "SA1008": { + Name: "SA1008", + Run: CheckCanonicalHeaderKey, + Doc: Docs["SA1008"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA1010": { + Name: "SA1010", + Run: callChecker(checkRegexpFindAllRules), + Doc: Docs["SA1010"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer}, + Flags: newFlagSet(), + }, + "SA1011": { + Name: "SA1011", + Run: callChecker(checkUTF8CutsetRules), + Doc: Docs["SA1011"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer}, + Flags: newFlagSet(), + }, + "SA1012": { + Name: "SA1012", + Run: CheckNilContext, + Doc: Docs["SA1012"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA1013": { + Name: "SA1013", + Run: CheckSeeker, + Doc: Docs["SA1013"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA1014": { + Name: "SA1014", + Run: callChecker(checkUnmarshalPointerRules), + Doc: Docs["SA1014"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer}, + Flags: newFlagSet(), + }, + "SA1015": { + Name: "SA1015", + Run: CheckLeakyTimeTick, + Doc: Docs["SA1015"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + Flags: newFlagSet(), + }, + "SA1016": { + Name: "SA1016", + Run: CheckUntrappableSignal, + Doc: Docs["SA1016"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA1017": { + Name: "SA1017", + Run: callChecker(checkUnbufferedSignalChanRules), + Doc: Docs["SA1017"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer}, + Flags: newFlagSet(), + }, + "SA1018": { + Name: "SA1018", + Run: callChecker(checkStringsReplaceZeroRules), + Doc: Docs["SA1018"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer}, + Flags: newFlagSet(), + }, + "SA1019": { + Name: "SA1019", + Run: CheckDeprecated, + Doc: Docs["SA1019"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Deprecated}, + Flags: newFlagSet(), + }, + "SA1020": { + Name: "SA1020", + Run: callChecker(checkListenAddressRules), + Doc: Docs["SA1020"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer}, + Flags: newFlagSet(), + }, + "SA1021": { + Name: "SA1021", + Run: callChecker(checkBytesEqualIPRules), + Doc: Docs["SA1021"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer}, + Flags: newFlagSet(), + }, + "SA1023": { + Name: "SA1023", + Run: CheckWriterBufferModified, + Doc: Docs["SA1023"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + Flags: newFlagSet(), + }, + "SA1024": { + Name: "SA1024", + Run: callChecker(checkUniqueCutsetRules), + Doc: Docs["SA1024"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer}, + Flags: newFlagSet(), + }, + "SA1025": { + Name: "SA1025", + Run: CheckTimerResetReturnValue, + Doc: Docs["SA1025"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + Flags: newFlagSet(), + }, + "SA1026": { + Name: "SA1026", + Run: callChecker(checkUnsupportedMarshal), + Doc: Docs["SA1026"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer}, + Flags: newFlagSet(), + }, + "SA1027": { + Name: "SA1027", + Run: callChecker(checkAtomicAlignment), + Doc: Docs["SA1027"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer}, + Flags: newFlagSet(), + }, + + "SA2000": { + Name: "SA2000", + Run: CheckWaitgroupAdd, + Doc: Docs["SA2000"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA2001": { + Name: "SA2001", + Run: CheckEmptyCriticalSection, + Doc: Docs["SA2001"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA2002": { + Name: "SA2002", + Run: CheckConcurrentTesting, + Doc: Docs["SA2002"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + Flags: newFlagSet(), + }, + "SA2003": { + Name: "SA2003", + Run: CheckDeferLock, + Doc: Docs["SA2003"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + Flags: newFlagSet(), + }, + + "SA3000": { + Name: "SA3000", + Run: CheckTestMainExit, + Doc: Docs["SA3000"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA3001": { + Name: "SA3001", + Run: CheckBenchmarkN, + Doc: Docs["SA3001"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + + "SA4000": { + Name: "SA4000", + Run: CheckLhsRhsIdentical, + Doc: Docs["SA4000"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.TokenFile, facts.Generated}, + Flags: newFlagSet(), + }, + "SA4001": { + Name: "SA4001", + Run: CheckIneffectiveCopy, + Doc: Docs["SA4001"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA4002": { + Name: "SA4002", + Run: CheckDiffSizeComparison, + Doc: Docs["SA4002"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer}, + Flags: newFlagSet(), + }, + "SA4003": { + Name: "SA4003", + Run: CheckExtremeComparison, + Doc: Docs["SA4003"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA4004": { + Name: "SA4004", + Run: CheckIneffectiveLoop, + Doc: Docs["SA4004"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA4006": { + Name: "SA4006", + Run: CheckUnreadVariableValues, + Doc: Docs["SA4006"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "SA4008": { + Name: "SA4008", + Run: CheckLoopCondition, + Doc: Docs["SA4008"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + Flags: newFlagSet(), + }, + "SA4009": { + Name: "SA4009", + Run: CheckArgOverwritten, + Doc: Docs["SA4009"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + Flags: newFlagSet(), + }, + "SA4010": { + Name: "SA4010", + Run: CheckIneffectiveAppend, + Doc: Docs["SA4010"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + Flags: newFlagSet(), + }, + "SA4011": { + Name: "SA4011", + Run: CheckScopedBreak, + Doc: Docs["SA4011"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA4012": { + Name: "SA4012", + Run: CheckNaNComparison, + Doc: Docs["SA4012"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + Flags: newFlagSet(), + }, + "SA4013": { + Name: "SA4013", + Run: CheckDoubleNegation, + Doc: Docs["SA4013"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA4014": { + Name: "SA4014", + Run: CheckRepeatedIfElse, + Doc: Docs["SA4014"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA4015": { + Name: "SA4015", + Run: callChecker(checkMathIntRules), + Doc: Docs["SA4015"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer}, + Flags: newFlagSet(), + }, + "SA4016": { + Name: "SA4016", + Run: CheckSillyBitwiseOps, + Doc: Docs["SA4016"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, facts.TokenFile}, + Flags: newFlagSet(), + }, + "SA4017": { + Name: "SA4017", + Run: CheckPureFunctions, + Doc: Docs["SA4017"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, facts.Purity}, + Flags: newFlagSet(), + }, + "SA4018": { + Name: "SA4018", + Run: CheckSelfAssignment, + Doc: Docs["SA4018"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated, facts.TokenFile}, + Flags: newFlagSet(), + }, + "SA4019": { + Name: "SA4019", + Run: CheckDuplicateBuildConstraints, + Doc: Docs["SA4019"].String(), + Requires: []*analysis.Analyzer{facts.Generated}, + Flags: newFlagSet(), + }, + "SA4020": { + Name: "SA4020", + Run: CheckUnreachableTypeCases, + Doc: Docs["SA4020"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA4021": { + Name: "SA4021", + Run: CheckSingleArgAppend, + Doc: Docs["SA4021"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated, facts.TokenFile}, + Flags: newFlagSet(), + }, + + "SA5000": { + Name: "SA5000", + Run: CheckNilMaps, + Doc: Docs["SA5000"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + Flags: newFlagSet(), + }, + "SA5001": { + Name: "SA5001", + Run: CheckEarlyDefer, + Doc: Docs["SA5001"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA5002": { + Name: "SA5002", + Run: CheckInfiniteEmptyLoop, + Doc: Docs["SA5002"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA5003": { + Name: "SA5003", + Run: CheckDeferInInfiniteLoop, + Doc: Docs["SA5003"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA5004": { + Name: "SA5004", + Run: CheckLoopEmptyDefault, + Doc: Docs["SA5004"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA5005": { + Name: "SA5005", + Run: CheckCyclicFinalizer, + Doc: Docs["SA5005"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + Flags: newFlagSet(), + }, + "SA5007": { + Name: "SA5007", + Run: CheckInfiniteRecursion, + Doc: Docs["SA5007"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + Flags: newFlagSet(), + }, + "SA5008": { + Name: "SA5008", + Run: CheckStructTags, + Doc: Docs["SA5008"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA5009": { + Name: "SA5009", + Run: callChecker(checkPrintfRules), + Doc: Docs["SA5009"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer}, + Flags: newFlagSet(), + }, + + "SA6000": { + Name: "SA6000", + Run: callChecker(checkRegexpMatchLoopRules), + Doc: Docs["SA6000"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer}, + Flags: newFlagSet(), + }, + "SA6001": { + Name: "SA6001", + Run: CheckMapBytesKey, + Doc: Docs["SA6001"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + Flags: newFlagSet(), + }, + "SA6002": { + Name: "SA6002", + Run: callChecker(checkSyncPoolValueRules), + Doc: Docs["SA6002"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer}, + Flags: newFlagSet(), + }, + "SA6003": { + Name: "SA6003", + Run: CheckRangeStringRunes, + Doc: Docs["SA6003"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + Flags: newFlagSet(), + }, + "SA6005": { + Name: "SA6005", + Run: CheckToLowerToUpperComparison, + Doc: Docs["SA6005"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + + "SA9001": { + Name: "SA9001", + Run: CheckDubiousDeferInChannelRangeLoop, + Doc: Docs["SA9001"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA9002": { + Name: "SA9002", + Run: CheckNonOctalFileMode, + Doc: Docs["SA9002"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + "SA9003": { + Name: "SA9003", + Run: CheckEmptyBranch, + Doc: Docs["SA9003"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, facts.TokenFile, facts.Generated}, + Flags: newFlagSet(), + }, + "SA9004": { + Name: "SA9004", + Run: CheckMissingEnumTypesInDeclaration, + Doc: Docs["SA9004"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, + // Filtering generated code because it may include empty structs generated from data models. + "SA9005": { + Name: "SA9005", + Run: callChecker(checkNoopMarshal), + Doc: Docs["SA9005"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer, facts.Generated, facts.TokenFile}, + Flags: newFlagSet(), + }, +} diff --git a/vendor/honnef.co/go/tools/staticcheck/buildtag.go b/vendor/honnef.co/go/tools/staticcheck/buildtag.go new file mode 100644 index 000000000..888d3e9dc --- /dev/null +++ b/vendor/honnef.co/go/tools/staticcheck/buildtag.go @@ -0,0 +1,21 @@ +package staticcheck + +import ( + "go/ast" + "strings" + + . "honnef.co/go/tools/lint/lintdsl" +) + +func buildTags(f *ast.File) [][]string { + var out [][]string + for _, line := range strings.Split(Preamble(f), "\n") { + if !strings.HasPrefix(line, "+build ") { + continue + } + line = strings.TrimSpace(strings.TrimPrefix(line, "+build ")) + fields := strings.Fields(line) + out = append(out, fields) + } + return out +} diff --git a/vendor/honnef.co/go/tools/staticcheck/doc.go b/vendor/honnef.co/go/tools/staticcheck/doc.go new file mode 100644 index 000000000..4a87d4a24 --- /dev/null +++ b/vendor/honnef.co/go/tools/staticcheck/doc.go @@ -0,0 +1,764 @@ +package staticcheck + +import "honnef.co/go/tools/lint" + +var Docs = map[string]*lint.Documentation{ + "SA1000": &lint.Documentation{ + Title: `Invalid regular expression`, + Since: "2017.1", + }, + + "SA1001": &lint.Documentation{ + Title: `Invalid template`, + Since: "2017.1", + }, + + "SA1002": &lint.Documentation{ + Title: `Invalid format in time.Parse`, + Since: "2017.1", + }, + + "SA1003": &lint.Documentation{ + Title: `Unsupported argument to functions in encoding/binary`, + Text: `The encoding/binary package can only serialize types with known sizes. +This precludes the use of the int and uint types, as their sizes +differ on different architectures. Furthermore, it doesn't support +serializing maps, channels, strings, or functions. + +Before Go 1.8, bool wasn't supported, either.`, + Since: "2017.1", + }, + + "SA1004": &lint.Documentation{ + Title: `Suspiciously small untyped constant in time.Sleep`, + Text: `The time.Sleep function takes a time.Duration as its only argument. +Durations are expressed in nanoseconds. Thus, calling time.Sleep(1) +will sleep for 1 nanosecond. This is a common source of bugs, as sleep +functions in other languages often accept seconds or milliseconds. + +The time package provides constants such as time.Second to express +large durations. These can be combined with arithmetic to express +arbitrary durations, for example '5 * time.Second' for 5 seconds. + +If you truly meant to sleep for a tiny amount of time, use +'n * time.Nanosecond' to signal to staticcheck that you did mean to sleep +for some amount of nanoseconds.`, + Since: "2017.1", + }, + + "SA1005": &lint.Documentation{ + Title: `Invalid first argument to exec.Command`, + Text: `os/exec runs programs directly (using variants of the fork and exec +system calls on Unix systems). This shouldn't be confused with running +a command in a shell. The shell will allow for features such as input +redirection, pipes, and general scripting. The shell is also +responsible for splitting the user's input into a program name and its +arguments. For example, the equivalent to + + ls / /tmp + +would be + + exec.Command("ls", "/", "/tmp") + +If you want to run a command in a shell, consider using something like +the following – but be aware that not all systems, particularly +Windows, will have a /bin/sh program: + + exec.Command("/bin/sh", "-c", "ls | grep Awesome")`, + Since: "2017.1", + }, + + "SA1006": &lint.Documentation{ + Title: `Printf with dynamic first argument and no further arguments`, + Text: `Using fmt.Printf with a dynamic first argument can lead to unexpected +output. The first argument is a format string, where certain character +combinations have special meaning. If, for example, a user were to +enter a string such as + + Interest rate: 5% + +and you printed it with + + fmt.Printf(s) + +it would lead to the following output: + + Interest rate: 5%!(NOVERB). + +Similarly, forming the first parameter via string concatenation with +user input should be avoided for the same reason. When printing user +input, either use a variant of fmt.Print, or use the %s Printf verb +and pass the string as an argument.`, + Since: "2017.1", + }, + + "SA1007": &lint.Documentation{ + Title: `Invalid URL in net/url.Parse`, + Since: "2017.1", + }, + + "SA1008": &lint.Documentation{ + Title: `Non-canonical key in http.Header map`, + Text: `Keys in http.Header maps are canonical, meaning they follow a specific +combination of uppercase and lowercase letters. Methods such as +http.Header.Add and http.Header.Del convert inputs into this canonical +form before manipulating the map. + +When manipulating http.Header maps directly, as opposed to using the +provided methods, care should be taken to stick to canonical form in +order to avoid inconsistencies. The following piece of code +demonstrates one such inconsistency: + + h := http.Header{} + h["etag"] = []string{"1234"} + h.Add("etag", "5678") + fmt.Println(h) + + // Output: + // map[Etag:[5678] etag:[1234]] + +The easiest way of obtaining the canonical form of a key is to use +http.CanonicalHeaderKey.`, + Since: "2017.1", + }, + + "SA1010": &lint.Documentation{ + Title: `(*regexp.Regexp).FindAll called with n == 0, which will always return zero results`, + Text: `If n >= 0, the function returns at most n matches/submatches. To +return all results, specify a negative number.`, + Since: "2017.1", + }, + + "SA1011": &lint.Documentation{ + Title: `Various methods in the strings package expect valid UTF-8, but invalid input is provided`, + Since: "2017.1", + }, + + "SA1012": &lint.Documentation{ + Title: `A nil context.Context is being passed to a function, consider using context.TODO instead`, + Since: "2017.1", + }, + + "SA1013": &lint.Documentation{ + Title: `io.Seeker.Seek is being called with the whence constant as the first argument, but it should be the second`, + Since: "2017.1", + }, + + "SA1014": &lint.Documentation{ + Title: `Non-pointer value passed to Unmarshal or Decode`, + Since: "2017.1", + }, + + "SA1015": &lint.Documentation{ + Title: `Using time.Tick in a way that will leak. Consider using time.NewTicker, and only use time.Tick in tests, commands and endless functions`, + Since: "2017.1", + }, + + "SA1016": &lint.Documentation{ + Title: `Trapping a signal that cannot be trapped`, + Text: `Not all signals can be intercepted by a process. Speficially, on +UNIX-like systems, the syscall.SIGKILL and syscall.SIGSTOP signals are +never passed to the process, but instead handled directly by the +kernel. It is therefore pointless to try and handle these signals.`, + Since: "2017.1", + }, + + "SA1017": &lint.Documentation{ + Title: `Channels used with os/signal.Notify should be buffered`, + Text: `The os/signal package uses non-blocking channel sends when delivering +signals. If the receiving end of the channel isn't ready and the +channel is either unbuffered or full, the signal will be dropped. To +avoid missing signals, the channel should be buffered and of the +appropriate size. For a channel used for notification of just one +signal value, a buffer of size 1 is sufficient.`, + Since: "2017.1", + }, + + "SA1018": &lint.Documentation{ + Title: `strings.Replace called with n == 0, which does nothing`, + Text: `With n == 0, zero instances will be replaced. To replace all +instances, use a negative number, or use strings.ReplaceAll.`, + Since: "2017.1", + }, + + "SA1019": &lint.Documentation{ + Title: `Using a deprecated function, variable, constant or field`, + Since: "2017.1", + }, + + "SA1020": &lint.Documentation{ + Title: `Using an invalid host:port pair with a net.Listen-related function`, + Since: "2017.1", + }, + + "SA1021": &lint.Documentation{ + Title: `Using bytes.Equal to compare two net.IP`, + Text: `A net.IP stores an IPv4 or IPv6 address as a slice of bytes. The +length of the slice for an IPv4 address, however, can be either 4 or +16 bytes long, using different ways of representing IPv4 addresses. In +order to correctly compare two net.IPs, the net.IP.Equal method should +be used, as it takes both representations into account.`, + Since: "2017.1", + }, + + "SA1023": &lint.Documentation{ + Title: `Modifying the buffer in an io.Writer implementation`, + Text: `Write must not modify the slice data, even temporarily.`, + Since: "2017.1", + }, + + "SA1024": &lint.Documentation{ + Title: `A string cutset contains duplicate characters`, + Text: `The strings.TrimLeft and strings.TrimRight functions take cutsets, not +prefixes. A cutset is treated as a set of characters to remove from a +string. For example, + + strings.TrimLeft("42133word", "1234")) + +will result in the string "word" – any characters that are 1, 2, 3 or +4 are cut from the left of the string. + +In order to remove one string from another, use strings.TrimPrefix instead.`, + Since: "2017.1", + }, + + "SA1025": &lint.Documentation{ + Title: `It is not possible to use (*time.Timer).Reset's return value correctly`, + Since: "2019.1", + }, + + "SA1026": &lint.Documentation{ + Title: `Cannot marshal channels or functions`, + Since: "2019.2", + }, + + "SA1027": &lint.Documentation{ + Title: `Atomic access to 64-bit variable must be 64-bit aligned`, + Text: `On ARM, x86-32, and 32-bit MIPS, it is the caller's responsibility to +arrange for 64-bit alignment of 64-bit words accessed atomically. The +first word in a variable or in an allocated struct, array, or slice +can be relied upon to be 64-bit aligned. + +You can use the structlayout tool to inspect the alignment of fields +in a struct.`, + Since: "2019.2", + }, + + "SA2000": &lint.Documentation{ + Title: `sync.WaitGroup.Add called inside the goroutine, leading to a race condition`, + Since: "2017.1", + }, + + "SA2001": &lint.Documentation{ + Title: `Empty critical section, did you mean to defer the unlock?`, + Text: `Empty critical sections of the kind + + mu.Lock() + mu.Unlock() + +are very often a typo, and the following was intended instead: + + mu.Lock() + defer mu.Unlock() + +Do note that sometimes empty critical sections can be useful, as a +form of signaling to wait on another goroutine. Many times, there are +simpler ways of achieving the same effect. When that isn't the case, +the code should be amply commented to avoid confusion. Combining such +comments with a //lint:ignore directive can be used to suppress this +rare false positive.`, + Since: "2017.1", + }, + + "SA2002": &lint.Documentation{ + Title: `Called testing.T.FailNow or SkipNow in a goroutine, which isn't allowed`, + Since: "2017.1", + }, + + "SA2003": &lint.Documentation{ + Title: `Deferred Lock right after locking, likely meant to defer Unlock instead`, + Since: "2017.1", + }, + + "SA3000": &lint.Documentation{ + Title: `TestMain doesn't call os.Exit, hiding test failures`, + Text: `Test executables (and in turn 'go test') exit with a non-zero status +code if any tests failed. When specifying your own TestMain function, +it is your responsibility to arrange for this, by calling os.Exit with +the correct code. The correct code is returned by (*testing.M).Run, so +the usual way of implementing TestMain is to end it with +os.Exit(m.Run()).`, + Since: "2017.1", + }, + + "SA3001": &lint.Documentation{ + Title: `Assigning to b.N in benchmarks distorts the results`, + Text: `The testing package dynamically sets b.N to improve the reliability of +benchmarks and uses it in computations to determine the duration of a +single operation. Benchmark code must not alter b.N as this would +falsify results.`, + Since: "2017.1", + }, + + "SA4000": &lint.Documentation{ + Title: `Boolean expression has identical expressions on both sides`, + Since: "2017.1", + }, + + "SA4001": &lint.Documentation{ + Title: `&*x gets simplified to x, it does not copy x`, + Since: "2017.1", + }, + + "SA4002": &lint.Documentation{ + Title: `Comparing strings with known different sizes has predictable results`, + Since: "2017.1", + }, + + "SA4003": &lint.Documentation{ + Title: `Comparing unsigned values against negative values is pointless`, + Since: "2017.1", + }, + + "SA4004": &lint.Documentation{ + Title: `The loop exits unconditionally after one iteration`, + Since: "2017.1", + }, + + "SA4005": &lint.Documentation{ + Title: `Field assignment that will never be observed. Did you mean to use a pointer receiver?`, + Since: "2017.1", + }, + + "SA4006": &lint.Documentation{ + Title: `A value assigned to a variable is never read before being overwritten. Forgotten error check or dead code?`, + Since: "2017.1", + }, + + "SA4008": &lint.Documentation{ + Title: `The variable in the loop condition never changes, are you incrementing the wrong variable?`, + Since: "2017.1", + }, + + "SA4009": &lint.Documentation{ + Title: `A function argument is overwritten before its first use`, + Since: "2017.1", + }, + + "SA4010": &lint.Documentation{ + Title: `The result of append will never be observed anywhere`, + Since: "2017.1", + }, + + "SA4011": &lint.Documentation{ + Title: `Break statement with no effect. Did you mean to break out of an outer loop?`, + Since: "2017.1", + }, + + "SA4012": &lint.Documentation{ + Title: `Comparing a value against NaN even though no value is equal to NaN`, + Since: "2017.1", + }, + + "SA4013": &lint.Documentation{ + Title: `Negating a boolean twice (!!b) is the same as writing b. This is either redundant, or a typo.`, + Since: "2017.1", + }, + + "SA4014": &lint.Documentation{ + Title: `An if/else if chain has repeated conditions and no side-effects; if the condition didn't match the first time, it won't match the second time, either`, + Since: "2017.1", + }, + + "SA4015": &lint.Documentation{ + Title: `Calling functions like math.Ceil on floats converted from integers doesn't do anything useful`, + Since: "2017.1", + }, + + "SA4016": &lint.Documentation{ + Title: `Certain bitwise operations, such as x ^ 0, do not do anything useful`, + Since: "2017.1", + }, + + "SA4017": &lint.Documentation{ + Title: `A pure function's return value is discarded, making the call pointless`, + Since: "2017.1", + }, + + "SA4018": &lint.Documentation{ + Title: `Self-assignment of variables`, + Since: "2017.1", + }, + + "SA4019": &lint.Documentation{ + Title: `Multiple, identical build constraints in the same file`, + Since: "2017.1", + }, + + "SA4020": &lint.Documentation{ + Title: `Unreachable case clause in a type switch`, + Text: `In a type switch like the following + + type T struct{} + func (T) Read(b []byte) (int, error) { return 0, nil } + + var v interface{} = T{} + + switch v.(type) { + case io.Reader: + // ... + case T: + // unreachable + } + +the second case clause can never be reached because T implements +io.Reader and case clauses are evaluated in source order. + +Another example: + + type T struct{} + func (T) Read(b []byte) (int, error) { return 0, nil } + func (T) Close() error { return nil } + + var v interface{} = T{} + + switch v.(type) { + case io.Reader: + // ... + case io.ReadCloser: + // unreachable + } + +Even though T has a Close method and thus implements io.ReadCloser, +io.Reader will always match first. The method set of io.Reader is a +subset of io.ReadCloser. Thus it is impossible to match the second +case without matching the first case. + + +Structurally equivalent interfaces + +A special case of the previous example are structurally identical +interfaces. Given these declarations + + type T error + type V error + + func doSomething() error { + err, ok := doAnotherThing() + if ok { + return T(err) + } + + return U(err) + } + +the following type switch will have an unreachable case clause: + + switch doSomething().(type) { + case T: + // ... + case V: + // unreachable + } + +T will always match before V because they are structurally equivalent +and therefore doSomething()'s return value implements both.`, + Since: "2019.2", + }, + + "SA4021": &lint.Documentation{ + Title: `x = append(y) is equivalent to x = y`, + Since: "2019.2", + }, + + "SA5000": &lint.Documentation{ + Title: `Assignment to nil map`, + Since: "2017.1", + }, + + "SA5001": &lint.Documentation{ + Title: `Defering Close before checking for a possible error`, + Since: "2017.1", + }, + + "SA5002": &lint.Documentation{ + Title: `The empty for loop (for {}) spins and can block the scheduler`, + Since: "2017.1", + }, + + "SA5003": &lint.Documentation{ + Title: `Defers in infinite loops will never execute`, + Text: `Defers are scoped to the surrounding function, not the surrounding +block. In a function that never returns, i.e. one containing an +infinite loop, defers will never execute.`, + Since: "2017.1", + }, + + "SA5004": &lint.Documentation{ + Title: `for { select { ... with an empty default branch spins`, + Since: "2017.1", + }, + + "SA5005": &lint.Documentation{ + Title: `The finalizer references the finalized object, preventing garbage collection`, + Text: `A finalizer is a function associated with an object that runs when the +garbage collector is ready to collect said object, that is when the +object is no longer referenced by anything. + +If the finalizer references the object, however, it will always remain +as the final reference to that object, preventing the garbage +collector from collecting the object. The finalizer will never run, +and the object will never be collected, leading to a memory leak. That +is why the finalizer should instead use its first argument to operate +on the object. That way, the number of references can temporarily go +to zero before the object is being passed to the finalizer.`, + Since: "2017.1", + }, + + "SA5006": &lint.Documentation{ + Title: `Slice index out of bounds`, + Since: "2017.1", + }, + + "SA5007": &lint.Documentation{ + Title: `Infinite recursive call`, + Text: `A function that calls itself recursively needs to have an exit +condition. Otherwise it will recurse forever, until the system runs +out of memory. + +This issue can be caused by simple bugs such as forgetting to add an +exit condition. It can also happen "on purpose". Some languages have +tail call optimization which makes certain infinite recursive calls +safe to use. Go, however, does not implement TCO, and as such a loop +should be used instead.`, + Since: "2017.1", + }, + + "SA5008": &lint.Documentation{ + Title: `Invalid struct tag`, + Since: "2019.2", + }, + + "SA5009": &lint.Documentation{ + Title: `Invalid Printf call`, + Since: "2019.2", + }, + + "SA6000": &lint.Documentation{ + Title: `Using regexp.Match or related in a loop, should use regexp.Compile`, + Since: "2017.1", + }, + + "SA6001": &lint.Documentation{ + Title: `Missing an optimization opportunity when indexing maps by byte slices`, + + Text: `Map keys must be comparable, which precludes the use of byte slices. +This usually leads to using string keys and converting byte slices to +strings. + +Normally, a conversion of a byte slice to a string needs to copy the data and +causes allocations. The compiler, however, recognizes m[string(b)] and +uses the data of b directly, without copying it, because it knows that +the data can't change during the map lookup. This leads to the +counter-intuitive situation that + + k := string(b) + println(m[k]) + println(m[k]) + +will be less efficient than + + println(m[string(b)]) + println(m[string(b)]) + +because the first version needs to copy and allocate, while the second +one does not. + +For some history on this optimization, check out commit +f5f5a8b6209f84961687d993b93ea0d397f5d5bf in the Go repository.`, + Since: "2017.1", + }, + + "SA6002": &lint.Documentation{ + Title: `Storing non-pointer values in sync.Pool allocates memory`, + Text: `A sync.Pool is used to avoid unnecessary allocations and reduce the +amount of work the garbage collector has to do. + +When passing a value that is not a pointer to a function that accepts +an interface, the value needs to be placed on the heap, which means an +additional allocation. Slices are a common thing to put in sync.Pools, +and they're structs with 3 fields (length, capacity, and a pointer to +an array). In order to avoid the extra allocation, one should store a +pointer to the slice instead. + +See the comments on https://go-review.googlesource.com/c/go/+/24371 +that discuss this problem.`, + Since: "2017.1", + }, + + "SA6003": &lint.Documentation{ + Title: `Converting a string to a slice of runes before ranging over it`, + Text: `You may want to loop over the runes in a string. Instead of converting +the string to a slice of runes and looping over that, you can loop +over the string itself. That is, + + for _, r := range s {} + +and + + for _, r := range []rune(s) {} + +will yield the same values. The first version, however, will be faster +and avoid unnecessary memory allocations. + +Do note that if you are interested in the indices, ranging over a +string and over a slice of runes will yield different indices. The +first one yields byte offsets, while the second one yields indices in +the slice of runes.`, + Since: "2017.1", + }, + + "SA6005": &lint.Documentation{ + Title: `Inefficient string comparison with strings.ToLower or strings.ToUpper`, + Text: `Converting two strings to the same case and comparing them like so + + if strings.ToLower(s1) == strings.ToLower(s2) { + ... + } + +is significantly more expensive than comparing them with +strings.EqualFold(s1, s2). This is due to memory usage as well as +computational complexity. + +strings.ToLower will have to allocate memory for the new strings, as +well as convert both strings fully, even if they differ on the very +first byte. strings.EqualFold, on the other hand, compares the strings +one character at a time. It doesn't need to create two intermediate +strings and can return as soon as the first non-matching character has +been found. + +For a more in-depth explanation of this issue, see +https://blog.digitalocean.com/how-to-efficiently-compare-strings-in-go/`, + Since: "2019.2", + }, + + "SA9001": &lint.Documentation{ + Title: `Defers in range loops may not run when you expect them to`, + Since: "2017.1", + }, + + "SA9002": &lint.Documentation{ + Title: `Using a non-octal os.FileMode that looks like it was meant to be in octal.`, + Since: "2017.1", + }, + + "SA9003": &lint.Documentation{ + Title: `Empty body in an if or else branch`, + Since: "2017.1", + }, + + "SA9004": &lint.Documentation{ + Title: `Only the first constant has an explicit type`, + + Text: `In a constant declaration such as the following: + + const ( + First byte = 1 + Second = 2 + ) + +the constant Second does not have the same type as the constant First. +This construct shouldn't be confused with + + const ( + First byte = iota + Second + ) + +where First and Second do indeed have the same type. The type is only +passed on when no explicit value is assigned to the constant. + +When declaring enumerations with explicit values it is therefore +important not to write + + const ( + EnumFirst EnumType = 1 + EnumSecond = 2 + EnumThird = 3 + ) + +This discrepancy in types can cause various confusing behaviors and +bugs. + + +Wrong type in variable declarations + +The most obvious issue with such incorrect enumerations expresses +itself as a compile error: + + package pkg + + const ( + EnumFirst uint8 = 1 + EnumSecond = 2 + ) + + func fn(useFirst bool) { + x := EnumSecond + if useFirst { + x = EnumFirst + } + } + +fails to compile with + + ./const.go:11:5: cannot use EnumFirst (type uint8) as type int in assignment + + +Losing method sets + +A more subtle issue occurs with types that have methods and optional +interfaces. Consider the following: + + package main + + import "fmt" + + type Enum int + + func (e Enum) String() string { + return "an enum" + } + + const ( + EnumFirst Enum = 1 + EnumSecond = 2 + ) + + func main() { + fmt.Println(EnumFirst) + fmt.Println(EnumSecond) + } + +This code will output + + an enum + 2 + +as EnumSecond has no explicit type, and thus defaults to int.`, + Since: "2019.1", + }, + + "SA9005": &lint.Documentation{ + Title: `Trying to marshal a struct with no public fields nor custom marshaling`, + Text: `The encoding/json and encoding/xml packages only operate on exported +fields in structs, not unexported ones. It is usually an error to try +to (un)marshal structs that only consist of unexported fields. + +This check will not flag calls involving types that define custom +marshaling behavior, e.g. via MarshalJSON methods. It will also not +flag empty structs.`, + Since: "2019.2", + }, +} diff --git a/vendor/honnef.co/go/tools/staticcheck/knowledge.go b/vendor/honnef.co/go/tools/staticcheck/knowledge.go new file mode 100644 index 000000000..4c12b866a --- /dev/null +++ b/vendor/honnef.co/go/tools/staticcheck/knowledge.go @@ -0,0 +1,25 @@ +package staticcheck + +import ( + "reflect" + + "golang.org/x/tools/go/analysis" + "honnef.co/go/tools/internal/passes/buildssa" + "honnef.co/go/tools/ssa" + "honnef.co/go/tools/staticcheck/vrp" +) + +var valueRangesAnalyzer = &analysis.Analyzer{ + Name: "vrp", + Doc: "calculate value ranges of functions", + Run: func(pass *analysis.Pass) (interface{}, error) { + m := map[*ssa.Function]vrp.Ranges{} + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + vr := vrp.BuildGraph(ssafn).Solve() + m[ssafn] = vr + } + return m, nil + }, + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + ResultType: reflect.TypeOf(map[*ssa.Function]vrp.Ranges{}), +} diff --git a/vendor/honnef.co/go/tools/staticcheck/lint.go b/vendor/honnef.co/go/tools/staticcheck/lint.go new file mode 100644 index 000000000..1558cbf94 --- /dev/null +++ b/vendor/honnef.co/go/tools/staticcheck/lint.go @@ -0,0 +1,3360 @@ +// Package staticcheck contains a linter for Go source code. +package staticcheck // import "honnef.co/go/tools/staticcheck" + +import ( + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + htmltemplate "html/template" + "net/http" + "reflect" + "regexp" + "regexp/syntax" + "sort" + "strconv" + "strings" + texttemplate "text/template" + "unicode" + + . "honnef.co/go/tools/arg" + "honnef.co/go/tools/deprecated" + "honnef.co/go/tools/facts" + "honnef.co/go/tools/functions" + "honnef.co/go/tools/internal/passes/buildssa" + "honnef.co/go/tools/internal/sharedcheck" + "honnef.co/go/tools/lint" + . "honnef.co/go/tools/lint/lintdsl" + "honnef.co/go/tools/printf" + "honnef.co/go/tools/ssa" + "honnef.co/go/tools/ssautil" + "honnef.co/go/tools/staticcheck/vrp" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" +) + +func validRegexp(call *Call) { + arg := call.Args[0] + err := ValidateRegexp(arg.Value) + if err != nil { + arg.Invalid(err.Error()) + } +} + +type runeSlice []rune + +func (rs runeSlice) Len() int { return len(rs) } +func (rs runeSlice) Less(i int, j int) bool { return rs[i] < rs[j] } +func (rs runeSlice) Swap(i int, j int) { rs[i], rs[j] = rs[j], rs[i] } + +func utf8Cutset(call *Call) { + arg := call.Args[1] + if InvalidUTF8(arg.Value) { + arg.Invalid(MsgInvalidUTF8) + } +} + +func uniqueCutset(call *Call) { + arg := call.Args[1] + if !UniqueStringCutset(arg.Value) { + arg.Invalid(MsgNonUniqueCutset) + } +} + +func unmarshalPointer(name string, arg int) CallCheck { + return func(call *Call) { + if !Pointer(call.Args[arg].Value) { + call.Args[arg].Invalid(fmt.Sprintf("%s expects to unmarshal into a pointer, but the provided value is not a pointer", name)) + } + } +} + +func pointlessIntMath(call *Call) { + if ConvertedFromInt(call.Args[0].Value) { + call.Invalid(fmt.Sprintf("calling %s on a converted integer is pointless", CallName(call.Instr.Common()))) + } +} + +func checkValidHostPort(arg int) CallCheck { + return func(call *Call) { + if !ValidHostPort(call.Args[arg].Value) { + call.Args[arg].Invalid(MsgInvalidHostPort) + } + } +} + +var ( + checkRegexpRules = map[string]CallCheck{ + "regexp.MustCompile": validRegexp, + "regexp.Compile": validRegexp, + "regexp.Match": validRegexp, + "regexp.MatchReader": validRegexp, + "regexp.MatchString": validRegexp, + } + + checkTimeParseRules = map[string]CallCheck{ + "time.Parse": func(call *Call) { + arg := call.Args[Arg("time.Parse.layout")] + err := ValidateTimeLayout(arg.Value) + if err != nil { + arg.Invalid(err.Error()) + } + }, + } + + checkEncodingBinaryRules = map[string]CallCheck{ + "encoding/binary.Write": func(call *Call) { + arg := call.Args[Arg("encoding/binary.Write.data")] + if !CanBinaryMarshal(call.Pass, arg.Value) { + arg.Invalid(fmt.Sprintf("value of type %s cannot be used with binary.Write", arg.Value.Value.Type())) + } + }, + } + + checkURLsRules = map[string]CallCheck{ + "net/url.Parse": func(call *Call) { + arg := call.Args[Arg("net/url.Parse.rawurl")] + err := ValidateURL(arg.Value) + if err != nil { + arg.Invalid(err.Error()) + } + }, + } + + checkSyncPoolValueRules = map[string]CallCheck{ + "(*sync.Pool).Put": func(call *Call) { + arg := call.Args[Arg("(*sync.Pool).Put.x")] + typ := arg.Value.Value.Type() + if !IsPointerLike(typ) { + arg.Invalid("argument should be pointer-like to avoid allocations") + } + }, + } + + checkRegexpFindAllRules = map[string]CallCheck{ + "(*regexp.Regexp).FindAll": RepeatZeroTimes("a FindAll method", 1), + "(*regexp.Regexp).FindAllIndex": RepeatZeroTimes("a FindAll method", 1), + "(*regexp.Regexp).FindAllString": RepeatZeroTimes("a FindAll method", 1), + "(*regexp.Regexp).FindAllStringIndex": RepeatZeroTimes("a FindAll method", 1), + "(*regexp.Regexp).FindAllStringSubmatch": RepeatZeroTimes("a FindAll method", 1), + "(*regexp.Regexp).FindAllStringSubmatchIndex": RepeatZeroTimes("a FindAll method", 1), + "(*regexp.Regexp).FindAllSubmatch": RepeatZeroTimes("a FindAll method", 1), + "(*regexp.Regexp).FindAllSubmatchIndex": RepeatZeroTimes("a FindAll method", 1), + } + + checkUTF8CutsetRules = map[string]CallCheck{ + "strings.IndexAny": utf8Cutset, + "strings.LastIndexAny": utf8Cutset, + "strings.ContainsAny": utf8Cutset, + "strings.Trim": utf8Cutset, + "strings.TrimLeft": utf8Cutset, + "strings.TrimRight": utf8Cutset, + } + + checkUniqueCutsetRules = map[string]CallCheck{ + "strings.Trim": uniqueCutset, + "strings.TrimLeft": uniqueCutset, + "strings.TrimRight": uniqueCutset, + } + + checkUnmarshalPointerRules = map[string]CallCheck{ + "encoding/xml.Unmarshal": unmarshalPointer("xml.Unmarshal", 1), + "(*encoding/xml.Decoder).Decode": unmarshalPointer("Decode", 0), + "(*encoding/xml.Decoder).DecodeElement": unmarshalPointer("DecodeElement", 0), + "encoding/json.Unmarshal": unmarshalPointer("json.Unmarshal", 1), + "(*encoding/json.Decoder).Decode": unmarshalPointer("Decode", 0), + } + + checkUnbufferedSignalChanRules = map[string]CallCheck{ + "os/signal.Notify": func(call *Call) { + arg := call.Args[Arg("os/signal.Notify.c")] + if UnbufferedChannel(arg.Value) { + arg.Invalid("the channel used with signal.Notify should be buffered") + } + }, + } + + checkMathIntRules = map[string]CallCheck{ + "math.Ceil": pointlessIntMath, + "math.Floor": pointlessIntMath, + "math.IsNaN": pointlessIntMath, + "math.Trunc": pointlessIntMath, + "math.IsInf": pointlessIntMath, + } + + checkStringsReplaceZeroRules = map[string]CallCheck{ + "strings.Replace": RepeatZeroTimes("strings.Replace", 3), + "bytes.Replace": RepeatZeroTimes("bytes.Replace", 3), + } + + checkListenAddressRules = map[string]CallCheck{ + "net/http.ListenAndServe": checkValidHostPort(0), + "net/http.ListenAndServeTLS": checkValidHostPort(0), + } + + checkBytesEqualIPRules = map[string]CallCheck{ + "bytes.Equal": func(call *Call) { + if ConvertedFrom(call.Args[Arg("bytes.Equal.a")].Value, "net.IP") && + ConvertedFrom(call.Args[Arg("bytes.Equal.b")].Value, "net.IP") { + call.Invalid("use net.IP.Equal to compare net.IPs, not bytes.Equal") + } + }, + } + + checkRegexpMatchLoopRules = map[string]CallCheck{ + "regexp.Match": loopedRegexp("regexp.Match"), + "regexp.MatchReader": loopedRegexp("regexp.MatchReader"), + "regexp.MatchString": loopedRegexp("regexp.MatchString"), + } + + checkNoopMarshal = map[string]CallCheck{ + // TODO(dh): should we really flag XML? Even an empty struct + // produces a non-zero amount of data, namely its type name. + // Let's see if we encounter any false positives. + // + // Also, should we flag gob? + "encoding/json.Marshal": checkNoopMarshalImpl(Arg("json.Marshal.v"), "MarshalJSON", "MarshalText"), + "encoding/xml.Marshal": checkNoopMarshalImpl(Arg("xml.Marshal.v"), "MarshalXML", "MarshalText"), + "(*encoding/json.Encoder).Encode": checkNoopMarshalImpl(Arg("(*encoding/json.Encoder).Encode.v"), "MarshalJSON", "MarshalText"), + "(*encoding/xml.Encoder).Encode": checkNoopMarshalImpl(Arg("(*encoding/xml.Encoder).Encode.v"), "MarshalXML", "MarshalText"), + + "encoding/json.Unmarshal": checkNoopMarshalImpl(Arg("json.Unmarshal.v"), "UnmarshalJSON", "UnmarshalText"), + "encoding/xml.Unmarshal": checkNoopMarshalImpl(Arg("xml.Unmarshal.v"), "UnmarshalXML", "UnmarshalText"), + "(*encoding/json.Decoder).Decode": checkNoopMarshalImpl(Arg("(*encoding/json.Decoder).Decode.v"), "UnmarshalJSON", "UnmarshalText"), + "(*encoding/xml.Decoder).Decode": checkNoopMarshalImpl(Arg("(*encoding/xml.Decoder).Decode.v"), "UnmarshalXML", "UnmarshalText"), + } + + checkUnsupportedMarshal = map[string]CallCheck{ + "encoding/json.Marshal": checkUnsupportedMarshalImpl(Arg("json.Marshal.v"), "json", "MarshalJSON", "MarshalText"), + "encoding/xml.Marshal": checkUnsupportedMarshalImpl(Arg("xml.Marshal.v"), "xml", "MarshalXML", "MarshalText"), + "(*encoding/json.Encoder).Encode": checkUnsupportedMarshalImpl(Arg("(*encoding/json.Encoder).Encode.v"), "json", "MarshalJSON", "MarshalText"), + "(*encoding/xml.Encoder).Encode": checkUnsupportedMarshalImpl(Arg("(*encoding/xml.Encoder).Encode.v"), "xml", "MarshalXML", "MarshalText"), + } + + checkAtomicAlignment = map[string]CallCheck{ + "sync/atomic.AddInt64": checkAtomicAlignmentImpl, + "sync/atomic.AddUint64": checkAtomicAlignmentImpl, + "sync/atomic.CompareAndSwapInt64": checkAtomicAlignmentImpl, + "sync/atomic.CompareAndSwapUint64": checkAtomicAlignmentImpl, + "sync/atomic.LoadInt64": checkAtomicAlignmentImpl, + "sync/atomic.LoadUint64": checkAtomicAlignmentImpl, + "sync/atomic.StoreInt64": checkAtomicAlignmentImpl, + "sync/atomic.StoreUint64": checkAtomicAlignmentImpl, + "sync/atomic.SwapInt64": checkAtomicAlignmentImpl, + "sync/atomic.SwapUint64": checkAtomicAlignmentImpl, + } + + // TODO(dh): detect printf wrappers + checkPrintfRules = map[string]CallCheck{ + "fmt.Errorf": func(call *Call) { checkPrintfCall(call, 0, 1) }, + "fmt.Printf": func(call *Call) { checkPrintfCall(call, 0, 1) }, + "fmt.Sprintf": func(call *Call) { checkPrintfCall(call, 0, 1) }, + "fmt.Fprintf": func(call *Call) { checkPrintfCall(call, 1, 2) }, + } +) + +func checkPrintfCall(call *Call, fIdx, vIdx int) { + f := call.Args[fIdx] + var args []ssa.Value + switch v := call.Args[vIdx].Value.Value.(type) { + case *ssa.Slice: + var ok bool + args, ok = ssautil.Vararg(v) + if !ok { + // We don't know what the actual arguments to the function are + return + } + case *ssa.Const: + // nil, i.e. no arguments + default: + // We don't know what the actual arguments to the function are + return + } + checkPrintfCallImpl(call, f.Value.Value, args) +} + +type verbFlag int + +const ( + isInt verbFlag = 1 << iota + isBool + isFP + isString + isPointer + isPseudoPointer + isSlice + isAny + noRecurse +) + +var verbs = [...]verbFlag{ + 'b': isPseudoPointer | isInt | isFP, + 'c': isInt, + 'd': isPseudoPointer | isInt, + 'e': isFP, + 'E': isFP, + 'f': isFP, + 'F': isFP, + 'g': isFP, + 'G': isFP, + 'o': isPseudoPointer | isInt, + 'p': isSlice | isPointer | noRecurse, + 'q': isInt | isString, + 's': isString, + 't': isBool, + 'T': isAny, + 'U': isInt, + 'v': isAny, + 'X': isPseudoPointer | isInt | isString, + 'x': isPseudoPointer | isInt | isString, +} + +func checkPrintfCallImpl(call *Call, f ssa.Value, args []ssa.Value) { + var msCache *typeutil.MethodSetCache + if f.Parent() != nil { + msCache = &f.Parent().Prog.MethodSets + } + + elem := func(T types.Type, verb rune) ([]types.Type, bool) { + if verbs[verb]&noRecurse != 0 { + return []types.Type{T}, false + } + switch T := T.(type) { + case *types.Slice: + if verbs[verb]&isSlice != 0 { + return []types.Type{T}, false + } + if verbs[verb]&isString != 0 && IsType(T.Elem().Underlying(), "byte") { + return []types.Type{T}, false + } + return []types.Type{T.Elem()}, true + case *types.Map: + key := T.Key() + val := T.Elem() + return []types.Type{key, val}, true + case *types.Struct: + out := make([]types.Type, 0, T.NumFields()) + for i := 0; i < T.NumFields(); i++ { + out = append(out, T.Field(i).Type()) + } + return out, true + case *types.Array: + return []types.Type{T.Elem()}, true + default: + return []types.Type{T}, false + } + } + isInfo := func(T types.Type, info types.BasicInfo) bool { + basic, ok := T.Underlying().(*types.Basic) + return ok && basic.Info()&info != 0 + } + + isStringer := func(T types.Type, ms *types.MethodSet) bool { + sel := ms.Lookup(nil, "String") + if sel == nil { + return false + } + fn, ok := sel.Obj().(*types.Func) + if !ok { + // should be unreachable + return false + } + sig := fn.Type().(*types.Signature) + if sig.Params().Len() != 0 { + return false + } + if sig.Results().Len() != 1 { + return false + } + if !IsType(sig.Results().At(0).Type(), "string") { + return false + } + return true + } + isError := func(T types.Type, ms *types.MethodSet) bool { + sel := ms.Lookup(nil, "Error") + if sel == nil { + return false + } + fn, ok := sel.Obj().(*types.Func) + if !ok { + // should be unreachable + return false + } + sig := fn.Type().(*types.Signature) + if sig.Params().Len() != 0 { + return false + } + if sig.Results().Len() != 1 { + return false + } + if !IsType(sig.Results().At(0).Type(), "string") { + return false + } + return true + } + + isFormatter := func(T types.Type, ms *types.MethodSet) bool { + sel := ms.Lookup(nil, "Format") + if sel == nil { + return false + } + fn, ok := sel.Obj().(*types.Func) + if !ok { + // should be unreachable + return false + } + sig := fn.Type().(*types.Signature) + if sig.Params().Len() != 2 { + return false + } + // TODO(dh): check the types of the arguments for more + // precision + if sig.Results().Len() != 0 { + return false + } + return true + } + + seen := map[types.Type]bool{} + var checkType func(verb rune, T types.Type, top bool) bool + checkType = func(verb rune, T types.Type, top bool) bool { + if top { + for k := range seen { + delete(seen, k) + } + } + if seen[T] { + return true + } + seen[T] = true + if int(verb) >= len(verbs) { + // Unknown verb + return true + } + + flags := verbs[verb] + if flags == 0 { + // Unknown verb + return true + } + + ms := msCache.MethodSet(T) + if isFormatter(T, ms) { + // the value is responsible for formatting itself + return true + } + + if flags&isString != 0 && (isStringer(T, ms) || isError(T, ms)) { + // Check for stringer early because we're about to dereference + return true + } + + T = T.Underlying() + if flags&(isPointer|isPseudoPointer) == 0 && top { + T = Dereference(T) + } + if flags&isPseudoPointer != 0 && top { + t := Dereference(T) + if _, ok := t.Underlying().(*types.Struct); ok { + T = t + } + } + + if _, ok := T.(*types.Interface); ok { + // We don't know what's in the interface + return true + } + + var info types.BasicInfo + if flags&isInt != 0 { + info |= types.IsInteger + } + if flags&isBool != 0 { + info |= types.IsBoolean + } + if flags&isFP != 0 { + info |= types.IsFloat | types.IsComplex + } + if flags&isString != 0 { + info |= types.IsString + } + + if info != 0 && isInfo(T, info) { + return true + } + + if flags&isString != 0 && (IsType(T, "[]byte") || isStringer(T, ms) || isError(T, ms)) { + return true + } + + if flags&isPointer != 0 && IsPointerLike(T) { + return true + } + if flags&isPseudoPointer != 0 { + switch U := T.Underlying().(type) { + case *types.Pointer: + if !top { + return true + } + + if _, ok := U.Elem().Underlying().(*types.Struct); !ok { + return true + } + case *types.Chan, *types.Signature: + return true + } + } + + if flags&isSlice != 0 { + if _, ok := T.(*types.Slice); ok { + return true + } + } + + if flags&isAny != 0 { + return true + } + + elems, ok := elem(T.Underlying(), verb) + if !ok { + return false + } + for _, elem := range elems { + if !checkType(verb, elem, false) { + return false + } + } + + return true + } + + k, ok := f.(*ssa.Const) + if !ok { + return + } + actions, err := printf.Parse(constant.StringVal(k.Value)) + if err != nil { + call.Invalid("couldn't parse format string") + return + } + + ptr := 1 + hasExplicit := false + + checkStar := func(verb printf.Verb, star printf.Argument) bool { + if star, ok := star.(printf.Star); ok { + idx := 0 + if star.Index == -1 { + idx = ptr + ptr++ + } else { + hasExplicit = true + idx = star.Index + ptr = star.Index + 1 + } + if idx == 0 { + call.Invalid(fmt.Sprintf("Printf format %s reads invalid arg 0; indices are 1-based", verb.Raw)) + return false + } + if idx > len(args) { + call.Invalid( + fmt.Sprintf("Printf format %s reads arg #%d, but call has only %d args", + verb.Raw, idx, len(args))) + return false + } + if arg, ok := args[idx-1].(*ssa.MakeInterface); ok { + if !isInfo(arg.X.Type(), types.IsInteger) { + call.Invalid(fmt.Sprintf("Printf format %s reads non-int arg #%d as argument of *", verb.Raw, idx)) + } + } + } + return true + } + + // We only report one problem per format string. Making a + // mistake with an index tends to invalidate all future + // implicit indices. + for _, action := range actions { + verb, ok := action.(printf.Verb) + if !ok { + continue + } + + if !checkStar(verb, verb.Width) || !checkStar(verb, verb.Precision) { + return + } + + off := ptr + if verb.Value != -1 { + hasExplicit = true + off = verb.Value + } + if off > len(args) { + call.Invalid( + fmt.Sprintf("Printf format %s reads arg #%d, but call has only %d args", + verb.Raw, off, len(args))) + return + } else if verb.Value == 0 && verb.Letter != '%' { + call.Invalid(fmt.Sprintf("Printf format %s reads invalid arg 0; indices are 1-based", verb.Raw)) + return + } else if off != 0 { + arg, ok := args[off-1].(*ssa.MakeInterface) + if ok { + if !checkType(verb.Letter, arg.X.Type(), true) { + call.Invalid(fmt.Sprintf("Printf format %s has arg #%d of wrong type %s", + verb.Raw, ptr, args[ptr-1].(*ssa.MakeInterface).X.Type())) + return + } + } + } + + switch verb.Value { + case -1: + // Consume next argument + ptr++ + case 0: + // Don't consume any arguments + default: + ptr = verb.Value + 1 + } + } + + if !hasExplicit && ptr <= len(args) { + call.Invalid(fmt.Sprintf("Printf call needs %d args but has %d args", ptr-1, len(args))) + } +} + +func checkAtomicAlignmentImpl(call *Call) { + sizes := call.Pass.TypesSizes + if sizes.Sizeof(types.Typ[types.Uintptr]) != 4 { + // Not running on a 32-bit platform + return + } + v, ok := call.Args[0].Value.Value.(*ssa.FieldAddr) + if !ok { + // TODO(dh): also check indexing into arrays and slices + return + } + T := v.X.Type().Underlying().(*types.Pointer).Elem().Underlying().(*types.Struct) + fields := make([]*types.Var, 0, T.NumFields()) + for i := 0; i < T.NumFields() && i <= v.Field; i++ { + fields = append(fields, T.Field(i)) + } + + off := sizes.Offsetsof(fields)[v.Field] + if off%8 != 0 { + msg := fmt.Sprintf("address of non 64-bit aligned field %s passed to %s", + T.Field(v.Field).Name(), + CallName(call.Instr.Common())) + call.Invalid(msg) + } +} + +func checkNoopMarshalImpl(argN int, meths ...string) CallCheck { + return func(call *Call) { + if IsGenerated(call.Pass, call.Instr.Pos()) { + return + } + arg := call.Args[argN] + T := arg.Value.Value.Type() + Ts, ok := Dereference(T).Underlying().(*types.Struct) + if !ok { + return + } + if Ts.NumFields() == 0 { + return + } + fields := FlattenFields(Ts) + for _, field := range fields { + if field.Var.Exported() { + return + } + } + // OPT(dh): we could use a method set cache here + ms := call.Instr.Parent().Prog.MethodSets.MethodSet(T) + // TODO(dh): we're not checking the signature, which can cause false negatives. + // This isn't a huge problem, however, since vet complains about incorrect signatures. + for _, meth := range meths { + if ms.Lookup(nil, meth) != nil { + return + } + } + arg.Invalid("struct doesn't have any exported fields, nor custom marshaling") + } +} + +func checkUnsupportedMarshalImpl(argN int, tag string, meths ...string) CallCheck { + // TODO(dh): flag slices and maps of unsupported types + return func(call *Call) { + msCache := &call.Instr.Parent().Prog.MethodSets + + arg := call.Args[argN] + T := arg.Value.Value.Type() + Ts, ok := Dereference(T).Underlying().(*types.Struct) + if !ok { + return + } + ms := msCache.MethodSet(T) + // TODO(dh): we're not checking the signature, which can cause false negatives. + // This isn't a huge problem, however, since vet complains about incorrect signatures. + for _, meth := range meths { + if ms.Lookup(nil, meth) != nil { + return + } + } + fields := FlattenFields(Ts) + for _, field := range fields { + if !(field.Var.Exported()) { + continue + } + if reflect.StructTag(field.Tag).Get(tag) == "-" { + continue + } + ms := msCache.MethodSet(field.Var.Type()) + // TODO(dh): we're not checking the signature, which can cause false negatives. + // This isn't a huge problem, however, since vet complains about incorrect signatures. + for _, meth := range meths { + if ms.Lookup(nil, meth) != nil { + return + } + } + switch field.Var.Type().Underlying().(type) { + case *types.Chan, *types.Signature: + arg.Invalid(fmt.Sprintf("trying to marshal chan or func value, field %s", fieldPath(T, field.Path))) + } + } + } +} + +func fieldPath(start types.Type, indices []int) string { + p := start.String() + for _, idx := range indices { + field := Dereference(start).Underlying().(*types.Struct).Field(idx) + start = field.Type() + p += "." + field.Name() + } + return p +} + +func isInLoop(b *ssa.BasicBlock) bool { + sets := functions.FindLoops(b.Parent()) + for _, set := range sets { + if set.Has(b) { + return true + } + } + return false +} + +func CheckUntrappableSignal(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + call := node.(*ast.CallExpr) + if !IsCallToAnyAST(pass, call, + "os/signal.Ignore", "os/signal.Notify", "os/signal.Reset") { + return + } + for _, arg := range call.Args { + if conv, ok := arg.(*ast.CallExpr); ok && isName(pass, conv.Fun, "os.Signal") { + arg = conv.Args[0] + } + + if isName(pass, arg, "os.Kill") || isName(pass, arg, "syscall.SIGKILL") { + ReportNodef(pass, arg, "%s cannot be trapped (did you mean syscall.SIGTERM?)", Render(pass, arg)) + } + if isName(pass, arg, "syscall.SIGSTOP") { + ReportNodef(pass, arg, "%s signal cannot be trapped", Render(pass, arg)) + } + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.CallExpr)(nil)}, fn) + return nil, nil +} + +func CheckTemplate(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + call := node.(*ast.CallExpr) + var kind string + if IsCallToAST(pass, call, "(*text/template.Template).Parse") { + kind = "text" + } else if IsCallToAST(pass, call, "(*html/template.Template).Parse") { + kind = "html" + } else { + return + } + sel := call.Fun.(*ast.SelectorExpr) + if !IsCallToAST(pass, sel.X, "text/template.New") && + !IsCallToAST(pass, sel.X, "html/template.New") { + // TODO(dh): this is a cheap workaround for templates with + // different delims. A better solution with less false + // negatives would use data flow analysis to see where the + // template comes from and where it has been + return + } + s, ok := ExprToString(pass, call.Args[Arg("(*text/template.Template).Parse.text")]) + if !ok { + return + } + var err error + switch kind { + case "text": + _, err = texttemplate.New("").Parse(s) + case "html": + _, err = htmltemplate.New("").Parse(s) + } + if err != nil { + // TODO(dominikh): whitelist other parse errors, if any + if strings.Contains(err.Error(), "unexpected") { + ReportNodef(pass, call.Args[Arg("(*text/template.Template).Parse.text")], "%s", err) + } + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.CallExpr)(nil)}, fn) + return nil, nil +} + +func CheckTimeSleepConstant(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + call := node.(*ast.CallExpr) + if !IsCallToAST(pass, call, "time.Sleep") { + return + } + lit, ok := call.Args[Arg("time.Sleep.d")].(*ast.BasicLit) + if !ok { + return + } + n, err := strconv.Atoi(lit.Value) + if err != nil { + return + } + if n == 0 || n > 120 { + // time.Sleep(0) is a seldom used pattern in concurrency + // tests. >120 might be intentional. 120 was chosen + // because the user could've meant 2 minutes. + return + } + recommendation := "time.Sleep(time.Nanosecond)" + if n != 1 { + recommendation = fmt.Sprintf("time.Sleep(%d * time.Nanosecond)", n) + } + ReportNodef(pass, call.Args[Arg("time.Sleep.d")], + "sleeping for %d nanoseconds is probably a bug. Be explicit if it isn't: %s", n, recommendation) + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.CallExpr)(nil)}, fn) + return nil, nil +} + +func CheckWaitgroupAdd(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + g := node.(*ast.GoStmt) + fun, ok := g.Call.Fun.(*ast.FuncLit) + if !ok { + return + } + if len(fun.Body.List) == 0 { + return + } + stmt, ok := fun.Body.List[0].(*ast.ExprStmt) + if !ok { + return + } + if IsCallToAST(pass, stmt.X, "(*sync.WaitGroup).Add") { + ReportNodef(pass, stmt, "should call %s before starting the goroutine to avoid a race", + Render(pass, stmt)) + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.GoStmt)(nil)}, fn) + return nil, nil +} + +func CheckInfiniteEmptyLoop(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + loop := node.(*ast.ForStmt) + if len(loop.Body.List) != 0 || loop.Post != nil { + return + } + + if loop.Init != nil { + // TODO(dh): this isn't strictly necessary, it just makes + // the check easier. + return + } + // An empty loop is bad news in two cases: 1) The loop has no + // condition. In that case, it's just a loop that spins + // forever and as fast as it can, keeping a core busy. 2) The + // loop condition only consists of variable or field reads and + // operators on those. The only way those could change their + // value is with unsynchronised access, which constitutes a + // data race. + // + // If the condition contains any function calls, its behaviour + // is dynamic and the loop might terminate. Similarly for + // channel receives. + + if loop.Cond != nil { + if hasSideEffects(loop.Cond) { + return + } + if ident, ok := loop.Cond.(*ast.Ident); ok { + if k, ok := pass.TypesInfo.ObjectOf(ident).(*types.Const); ok { + if !constant.BoolVal(k.Val()) { + // don't flag `for false {}` loops. They're a debug aid. + return + } + } + } + ReportNodef(pass, loop, "loop condition never changes or has a race condition") + } + ReportNodef(pass, loop, "this loop will spin, using 100%% CPU") + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.ForStmt)(nil)}, fn) + return nil, nil +} + +func CheckDeferInInfiniteLoop(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + mightExit := false + var defers []ast.Stmt + loop := node.(*ast.ForStmt) + if loop.Cond != nil { + return + } + fn2 := func(node ast.Node) bool { + switch stmt := node.(type) { + case *ast.ReturnStmt: + mightExit = true + return false + case *ast.BranchStmt: + // TODO(dominikh): if this sees a break in a switch or + // select, it doesn't check if it breaks the loop or + // just the select/switch. This causes some false + // negatives. + if stmt.Tok == token.BREAK { + mightExit = true + return false + } + case *ast.DeferStmt: + defers = append(defers, stmt) + case *ast.FuncLit: + // Don't look into function bodies + return false + } + return true + } + ast.Inspect(loop.Body, fn2) + if mightExit { + return + } + for _, stmt := range defers { + ReportNodef(pass, stmt, "defers in this infinite loop will never run") + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.ForStmt)(nil)}, fn) + return nil, nil +} + +func CheckDubiousDeferInChannelRangeLoop(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + loop := node.(*ast.RangeStmt) + typ := pass.TypesInfo.TypeOf(loop.X) + _, ok := typ.Underlying().(*types.Chan) + if !ok { + return + } + fn2 := func(node ast.Node) bool { + switch stmt := node.(type) { + case *ast.DeferStmt: + ReportNodef(pass, stmt, "defers in this range loop won't run unless the channel gets closed") + case *ast.FuncLit: + // Don't look into function bodies + return false + } + return true + } + ast.Inspect(loop.Body, fn2) + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.RangeStmt)(nil)}, fn) + return nil, nil +} + +func CheckTestMainExit(pass *analysis.Pass) (interface{}, error) { + var ( + fnmain ast.Node + callsExit bool + callsRun bool + arg types.Object + ) + fn := func(node ast.Node, push bool) bool { + if !push { + if fnmain != nil && node == fnmain { + if !callsExit && callsRun { + ReportNodef(pass, fnmain, "TestMain should call os.Exit to set exit code") + } + fnmain = nil + callsExit = false + callsRun = false + arg = nil + } + return true + } + + switch node := node.(type) { + case *ast.FuncDecl: + if fnmain != nil { + return true + } + if !isTestMain(pass, node) { + return false + } + fnmain = node + arg = pass.TypesInfo.ObjectOf(node.Type.Params.List[0].Names[0]) + return true + case *ast.CallExpr: + if IsCallToAST(pass, node, "os.Exit") { + callsExit = true + return false + } + sel, ok := node.Fun.(*ast.SelectorExpr) + if !ok { + return true + } + ident, ok := sel.X.(*ast.Ident) + if !ok { + return true + } + if arg != pass.TypesInfo.ObjectOf(ident) { + return true + } + if sel.Sel.Name == "Run" { + callsRun = true + return false + } + return true + default: + // unreachable + return true + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Nodes([]ast.Node{(*ast.FuncDecl)(nil), (*ast.CallExpr)(nil)}, fn) + return nil, nil +} + +func isTestMain(pass *analysis.Pass, decl *ast.FuncDecl) bool { + if decl.Name.Name != "TestMain" { + return false + } + if len(decl.Type.Params.List) != 1 { + return false + } + arg := decl.Type.Params.List[0] + if len(arg.Names) != 1 { + return false + } + return IsOfType(pass, arg.Type, "*testing.M") +} + +func CheckExec(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + call := node.(*ast.CallExpr) + if !IsCallToAST(pass, call, "os/exec.Command") { + return + } + val, ok := ExprToString(pass, call.Args[Arg("os/exec.Command.name")]) + if !ok { + return + } + if !strings.Contains(val, " ") || strings.Contains(val, `\`) || strings.Contains(val, "/") { + return + } + ReportNodef(pass, call.Args[Arg("os/exec.Command.name")], + "first argument to exec.Command looks like a shell command, but a program name or path are expected") + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.CallExpr)(nil)}, fn) + return nil, nil +} + +func CheckLoopEmptyDefault(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + loop := node.(*ast.ForStmt) + if len(loop.Body.List) != 1 || loop.Cond != nil || loop.Init != nil { + return + } + sel, ok := loop.Body.List[0].(*ast.SelectStmt) + if !ok { + return + } + for _, c := range sel.Body.List { + if comm, ok := c.(*ast.CommClause); ok && comm.Comm == nil && len(comm.Body) == 0 { + ReportNodef(pass, comm, "should not have an empty default case in a for+select loop. The loop will spin.") + } + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.ForStmt)(nil)}, fn) + return nil, nil +} + +func CheckLhsRhsIdentical(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + op := node.(*ast.BinaryExpr) + switch op.Op { + case token.EQL, token.NEQ: + if basic, ok := pass.TypesInfo.TypeOf(op.X).Underlying().(*types.Basic); ok { + if kind := basic.Kind(); kind == types.Float32 || kind == types.Float64 { + // f == f and f != f might be used to check for NaN + return + } + } + case token.SUB, token.QUO, token.AND, token.REM, token.OR, token.XOR, token.AND_NOT, + token.LAND, token.LOR, token.LSS, token.GTR, token.LEQ, token.GEQ: + default: + // For some ops, such as + and *, it can make sense to + // have identical operands + return + } + + if Render(pass, op.X) != Render(pass, op.Y) { + return + } + l1, ok1 := op.X.(*ast.BasicLit) + l2, ok2 := op.Y.(*ast.BasicLit) + if ok1 && ok2 && l1.Kind == token.INT && l2.Kind == l1.Kind && l1.Value == "0" && l2.Value == l1.Value && IsGenerated(pass, l1.Pos()) { + // cgo generates the following function call: + // _cgoCheckPointer(_cgoBase0, 0 == 0) – it uses 0 == 0 + // instead of true in case the user shadowed the + // identifier. Ideally we'd restrict this exception to + // calls of _cgoCheckPointer, but it's not worth the + // hassle of keeping track of the stack. + // are very rare to begin with, and we're mostly checking + // for them to catch typos such as 1 == 1 where the user + // meant to type i == 1. The odds of a false negative for + // 0 == 0 are slim. + return + } + ReportNodef(pass, op, "identical expressions on the left and right side of the '%s' operator", op.Op) + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.BinaryExpr)(nil)}, fn) + return nil, nil +} + +func CheckScopedBreak(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + var body *ast.BlockStmt + switch node := node.(type) { + case *ast.ForStmt: + body = node.Body + case *ast.RangeStmt: + body = node.Body + default: + panic(fmt.Sprintf("unreachable: %T", node)) + } + for _, stmt := range body.List { + var blocks [][]ast.Stmt + switch stmt := stmt.(type) { + case *ast.SwitchStmt: + for _, c := range stmt.Body.List { + blocks = append(blocks, c.(*ast.CaseClause).Body) + } + case *ast.SelectStmt: + for _, c := range stmt.Body.List { + blocks = append(blocks, c.(*ast.CommClause).Body) + } + default: + continue + } + + for _, body := range blocks { + if len(body) == 0 { + continue + } + lasts := []ast.Stmt{body[len(body)-1]} + // TODO(dh): unfold all levels of nested block + // statements, not just a single level if statement + if ifs, ok := lasts[0].(*ast.IfStmt); ok { + if len(ifs.Body.List) == 0 { + continue + } + lasts[0] = ifs.Body.List[len(ifs.Body.List)-1] + + if block, ok := ifs.Else.(*ast.BlockStmt); ok { + if len(block.List) != 0 { + lasts = append(lasts, block.List[len(block.List)-1]) + } + } + } + for _, last := range lasts { + branch, ok := last.(*ast.BranchStmt) + if !ok || branch.Tok != token.BREAK || branch.Label != nil { + continue + } + ReportNodef(pass, branch, "ineffective break statement. Did you mean to break out of the outer loop?") + } + } + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.ForStmt)(nil), (*ast.RangeStmt)(nil)}, fn) + return nil, nil +} + +func CheckUnsafePrintf(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + call := node.(*ast.CallExpr) + var arg int + if IsCallToAnyAST(pass, call, "fmt.Printf", "fmt.Sprintf", "log.Printf") { + arg = Arg("fmt.Printf.format") + } else if IsCallToAnyAST(pass, call, "fmt.Fprintf") { + arg = Arg("fmt.Fprintf.format") + } else { + return + } + if len(call.Args) != arg+1 { + return + } + switch call.Args[arg].(type) { + case *ast.CallExpr, *ast.Ident: + default: + return + } + ReportNodef(pass, call.Args[arg], + "printf-style function with dynamic format string and no further arguments should use print-style function instead") + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.CallExpr)(nil)}, fn) + return nil, nil +} + +func CheckEarlyDefer(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + block := node.(*ast.BlockStmt) + if len(block.List) < 2 { + return + } + for i, stmt := range block.List { + if i == len(block.List)-1 { + break + } + assign, ok := stmt.(*ast.AssignStmt) + if !ok { + continue + } + if len(assign.Rhs) != 1 { + continue + } + if len(assign.Lhs) < 2 { + continue + } + if lhs, ok := assign.Lhs[len(assign.Lhs)-1].(*ast.Ident); ok && lhs.Name == "_" { + continue + } + call, ok := assign.Rhs[0].(*ast.CallExpr) + if !ok { + continue + } + sig, ok := pass.TypesInfo.TypeOf(call.Fun).(*types.Signature) + if !ok { + continue + } + if sig.Results().Len() < 2 { + continue + } + last := sig.Results().At(sig.Results().Len() - 1) + // FIXME(dh): check that it's error from universe, not + // another type of the same name + if last.Type().String() != "error" { + continue + } + lhs, ok := assign.Lhs[0].(*ast.Ident) + if !ok { + continue + } + def, ok := block.List[i+1].(*ast.DeferStmt) + if !ok { + continue + } + sel, ok := def.Call.Fun.(*ast.SelectorExpr) + if !ok { + continue + } + ident, ok := selectorX(sel).(*ast.Ident) + if !ok { + continue + } + if ident.Obj != lhs.Obj { + continue + } + if sel.Sel.Name != "Close" { + continue + } + ReportNodef(pass, def, "should check returned error before deferring %s", Render(pass, def.Call)) + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.BlockStmt)(nil)}, fn) + return nil, nil +} + +func selectorX(sel *ast.SelectorExpr) ast.Node { + switch x := sel.X.(type) { + case *ast.SelectorExpr: + return selectorX(x) + default: + return x + } +} + +func CheckEmptyCriticalSection(pass *analysis.Pass) (interface{}, error) { + // Initially it might seem like this check would be easier to + // implement in SSA. After all, we're only checking for two + // consecutive method calls. In reality, however, there may be any + // number of other instructions between the lock and unlock, while + // still constituting an empty critical section. For example, + // given `m.x().Lock(); m.x().Unlock()`, there will be a call to + // x(). In the AST-based approach, this has a tiny potential for a + // false positive (the second call to x might be doing work that + // is protected by the mutex). In an SSA-based approach, however, + // it would miss a lot of real bugs. + + mutexParams := func(s ast.Stmt) (x ast.Expr, funcName string, ok bool) { + expr, ok := s.(*ast.ExprStmt) + if !ok { + return nil, "", false + } + call, ok := expr.X.(*ast.CallExpr) + if !ok { + return nil, "", false + } + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + return nil, "", false + } + + fn, ok := pass.TypesInfo.ObjectOf(sel.Sel).(*types.Func) + if !ok { + return nil, "", false + } + sig := fn.Type().(*types.Signature) + if sig.Params().Len() != 0 || sig.Results().Len() != 0 { + return nil, "", false + } + + return sel.X, fn.Name(), true + } + + fn := func(node ast.Node) { + block := node.(*ast.BlockStmt) + if len(block.List) < 2 { + return + } + for i := range block.List[:len(block.List)-1] { + sel1, method1, ok1 := mutexParams(block.List[i]) + sel2, method2, ok2 := mutexParams(block.List[i+1]) + + if !ok1 || !ok2 || Render(pass, sel1) != Render(pass, sel2) { + continue + } + if (method1 == "Lock" && method2 == "Unlock") || + (method1 == "RLock" && method2 == "RUnlock") { + ReportNodef(pass, block.List[i+1], "empty critical section") + } + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.BlockStmt)(nil)}, fn) + return nil, nil +} + +// cgo produces code like fn(&*_Cvar_kSomeCallbacks) which we don't +// want to flag. +var cgoIdent = regexp.MustCompile(`^_C(func|var)_.+$`) + +func CheckIneffectiveCopy(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + if unary, ok := node.(*ast.UnaryExpr); ok { + if star, ok := unary.X.(*ast.StarExpr); ok && unary.Op == token.AND { + ident, ok := star.X.(*ast.Ident) + if !ok || !cgoIdent.MatchString(ident.Name) { + ReportNodef(pass, unary, "&*x will be simplified to x. It will not copy x.") + } + } + } + + if star, ok := node.(*ast.StarExpr); ok { + if unary, ok := star.X.(*ast.UnaryExpr); ok && unary.Op == token.AND { + ReportNodef(pass, star, "*&x will be simplified to x. It will not copy x.") + } + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.UnaryExpr)(nil), (*ast.StarExpr)(nil)}, fn) + return nil, nil +} + +func CheckDiffSizeComparison(pass *analysis.Pass) (interface{}, error) { + ranges := pass.ResultOf[valueRangesAnalyzer].(map[*ssa.Function]vrp.Ranges) + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + for _, b := range ssafn.Blocks { + for _, ins := range b.Instrs { + binop, ok := ins.(*ssa.BinOp) + if !ok { + continue + } + if binop.Op != token.EQL && binop.Op != token.NEQ { + continue + } + _, ok1 := binop.X.(*ssa.Slice) + _, ok2 := binop.Y.(*ssa.Slice) + if !ok1 && !ok2 { + continue + } + r := ranges[ssafn] + r1, ok1 := r.Get(binop.X).(vrp.StringInterval) + r2, ok2 := r.Get(binop.Y).(vrp.StringInterval) + if !ok1 || !ok2 { + continue + } + if r1.Length.Intersection(r2.Length).Empty() { + pass.Reportf(binop.Pos(), "comparing strings of different sizes for equality will always return false") + } + } + } + } + return nil, nil +} + +func CheckCanonicalHeaderKey(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node, push bool) bool { + if !push { + return false + } + assign, ok := node.(*ast.AssignStmt) + if ok { + // TODO(dh): This risks missing some Header reads, for + // example in `h1["foo"] = h2["foo"]` – these edge + // cases are probably rare enough to ignore for now. + for _, expr := range assign.Lhs { + op, ok := expr.(*ast.IndexExpr) + if !ok { + continue + } + if IsOfType(pass, op.X, "net/http.Header") { + return false + } + } + return true + } + op, ok := node.(*ast.IndexExpr) + if !ok { + return true + } + if !IsOfType(pass, op.X, "net/http.Header") { + return true + } + s, ok := ExprToString(pass, op.Index) + if !ok { + return true + } + if s == http.CanonicalHeaderKey(s) { + return true + } + ReportNodef(pass, op, "keys in http.Header are canonicalized, %q is not canonical; fix the constant or use http.CanonicalHeaderKey", s) + return true + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Nodes([]ast.Node{(*ast.AssignStmt)(nil), (*ast.IndexExpr)(nil)}, fn) + return nil, nil +} + +func CheckBenchmarkN(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + assign := node.(*ast.AssignStmt) + if len(assign.Lhs) != 1 || len(assign.Rhs) != 1 { + return + } + sel, ok := assign.Lhs[0].(*ast.SelectorExpr) + if !ok { + return + } + if sel.Sel.Name != "N" { + return + } + if !IsOfType(pass, sel.X, "*testing.B") { + return + } + ReportNodef(pass, assign, "should not assign to %s", Render(pass, sel)) + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.AssignStmt)(nil)}, fn) + return nil, nil +} + +func CheckUnreadVariableValues(pass *analysis.Pass) (interface{}, error) { + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + if IsExample(ssafn) { + continue + } + node := ssafn.Syntax() + if node == nil { + continue + } + if gen, ok := Generator(pass, node.Pos()); ok && gen == facts.Goyacc { + // Don't flag unused values in code generated by goyacc. + // There may be hundreds of those due to the way the state + // machine is constructed. + continue + } + + switchTags := map[ssa.Value]struct{}{} + ast.Inspect(node, func(node ast.Node) bool { + s, ok := node.(*ast.SwitchStmt) + if !ok { + return true + } + v, _ := ssafn.ValueForExpr(s.Tag) + switchTags[v] = struct{}{} + return true + }) + + hasUse := func(v ssa.Value) bool { + if _, ok := switchTags[v]; ok { + return true + } + refs := v.Referrers() + if refs == nil { + // TODO investigate why refs can be nil + return true + } + return len(FilterDebug(*refs)) > 0 + } + + ast.Inspect(node, func(node ast.Node) bool { + assign, ok := node.(*ast.AssignStmt) + if !ok { + return true + } + if len(assign.Lhs) > 1 && len(assign.Rhs) == 1 { + // Either a function call with multiple return values, + // or a comma-ok assignment + + val, _ := ssafn.ValueForExpr(assign.Rhs[0]) + if val == nil { + return true + } + refs := val.Referrers() + if refs == nil { + return true + } + for _, ref := range *refs { + ex, ok := ref.(*ssa.Extract) + if !ok { + continue + } + if !hasUse(ex) { + lhs := assign.Lhs[ex.Index] + if ident, ok := lhs.(*ast.Ident); !ok || ok && ident.Name == "_" { + continue + } + ReportNodef(pass, lhs, "this value of %s is never used", lhs) + } + } + return true + } + for i, lhs := range assign.Lhs { + rhs := assign.Rhs[i] + if ident, ok := lhs.(*ast.Ident); !ok || ok && ident.Name == "_" { + continue + } + val, _ := ssafn.ValueForExpr(rhs) + if val == nil { + continue + } + + if !hasUse(val) { + ReportNodef(pass, lhs, "this value of %s is never used", lhs) + } + } + return true + }) + } + return nil, nil +} + +func CheckPredeterminedBooleanExprs(pass *analysis.Pass) (interface{}, error) { + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + for _, block := range ssafn.Blocks { + for _, ins := range block.Instrs { + ssabinop, ok := ins.(*ssa.BinOp) + if !ok { + continue + } + switch ssabinop.Op { + case token.GTR, token.LSS, token.EQL, token.NEQ, token.LEQ, token.GEQ: + default: + continue + } + + xs, ok1 := consts(ssabinop.X, nil, nil) + ys, ok2 := consts(ssabinop.Y, nil, nil) + if !ok1 || !ok2 || len(xs) == 0 || len(ys) == 0 { + continue + } + + trues := 0 + for _, x := range xs { + for _, y := range ys { + if x.Value == nil { + if y.Value == nil { + trues++ + } + continue + } + if constant.Compare(x.Value, ssabinop.Op, y.Value) { + trues++ + } + } + } + b := trues != 0 + if trues == 0 || trues == len(xs)*len(ys) { + pass.Reportf(ssabinop.Pos(), "binary expression is always %t for all possible values (%s %s %s)", + b, xs, ssabinop.Op, ys) + } + } + } + } + return nil, nil +} + +func CheckNilMaps(pass *analysis.Pass) (interface{}, error) { + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + for _, block := range ssafn.Blocks { + for _, ins := range block.Instrs { + mu, ok := ins.(*ssa.MapUpdate) + if !ok { + continue + } + c, ok := mu.Map.(*ssa.Const) + if !ok { + continue + } + if c.Value != nil { + continue + } + pass.Reportf(mu.Pos(), "assignment to nil map") + } + } + } + return nil, nil +} + +func CheckExtremeComparison(pass *analysis.Pass) (interface{}, error) { + isobj := func(expr ast.Expr, name string) bool { + sel, ok := expr.(*ast.SelectorExpr) + if !ok { + return false + } + return IsObject(pass.TypesInfo.ObjectOf(sel.Sel), name) + } + + fn := func(node ast.Node) { + expr := node.(*ast.BinaryExpr) + tx := pass.TypesInfo.TypeOf(expr.X) + basic, ok := tx.Underlying().(*types.Basic) + if !ok { + return + } + + var max string + var min string + + switch basic.Kind() { + case types.Uint8: + max = "math.MaxUint8" + case types.Uint16: + max = "math.MaxUint16" + case types.Uint32: + max = "math.MaxUint32" + case types.Uint64: + max = "math.MaxUint64" + case types.Uint: + max = "math.MaxUint64" + + case types.Int8: + min = "math.MinInt8" + max = "math.MaxInt8" + case types.Int16: + min = "math.MinInt16" + max = "math.MaxInt16" + case types.Int32: + min = "math.MinInt32" + max = "math.MaxInt32" + case types.Int64: + min = "math.MinInt64" + max = "math.MaxInt64" + case types.Int: + min = "math.MinInt64" + max = "math.MaxInt64" + } + + if (expr.Op == token.GTR || expr.Op == token.GEQ) && isobj(expr.Y, max) || + (expr.Op == token.LSS || expr.Op == token.LEQ) && isobj(expr.X, max) { + ReportNodef(pass, expr, "no value of type %s is greater than %s", basic, max) + } + if expr.Op == token.LEQ && isobj(expr.Y, max) || + expr.Op == token.GEQ && isobj(expr.X, max) { + ReportNodef(pass, expr, "every value of type %s is <= %s", basic, max) + } + + if (basic.Info() & types.IsUnsigned) != 0 { + if (expr.Op == token.LSS || expr.Op == token.LEQ) && IsIntLiteral(expr.Y, "0") || + (expr.Op == token.GTR || expr.Op == token.GEQ) && IsIntLiteral(expr.X, "0") { + ReportNodef(pass, expr, "no value of type %s is less than 0", basic) + } + if expr.Op == token.GEQ && IsIntLiteral(expr.Y, "0") || + expr.Op == token.LEQ && IsIntLiteral(expr.X, "0") { + ReportNodef(pass, expr, "every value of type %s is >= 0", basic) + } + } else { + if (expr.Op == token.LSS || expr.Op == token.LEQ) && isobj(expr.Y, min) || + (expr.Op == token.GTR || expr.Op == token.GEQ) && isobj(expr.X, min) { + ReportNodef(pass, expr, "no value of type %s is less than %s", basic, min) + } + if expr.Op == token.GEQ && isobj(expr.Y, min) || + expr.Op == token.LEQ && isobj(expr.X, min) { + ReportNodef(pass, expr, "every value of type %s is >= %s", basic, min) + } + } + + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.BinaryExpr)(nil)}, fn) + return nil, nil +} + +func consts(val ssa.Value, out []*ssa.Const, visitedPhis map[string]bool) ([]*ssa.Const, bool) { + if visitedPhis == nil { + visitedPhis = map[string]bool{} + } + var ok bool + switch val := val.(type) { + case *ssa.Phi: + if visitedPhis[val.Name()] { + break + } + visitedPhis[val.Name()] = true + vals := val.Operands(nil) + for _, phival := range vals { + out, ok = consts(*phival, out, visitedPhis) + if !ok { + return nil, false + } + } + case *ssa.Const: + out = append(out, val) + case *ssa.Convert: + out, ok = consts(val.X, out, visitedPhis) + if !ok { + return nil, false + } + default: + return nil, false + } + if len(out) < 2 { + return out, true + } + uniq := []*ssa.Const{out[0]} + for _, val := range out[1:] { + if val.Value == uniq[len(uniq)-1].Value { + continue + } + uniq = append(uniq, val) + } + return uniq, true +} + +func CheckLoopCondition(pass *analysis.Pass) (interface{}, error) { + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + fn := func(node ast.Node) bool { + loop, ok := node.(*ast.ForStmt) + if !ok { + return true + } + if loop.Init == nil || loop.Cond == nil || loop.Post == nil { + return true + } + init, ok := loop.Init.(*ast.AssignStmt) + if !ok || len(init.Lhs) != 1 || len(init.Rhs) != 1 { + return true + } + cond, ok := loop.Cond.(*ast.BinaryExpr) + if !ok { + return true + } + x, ok := cond.X.(*ast.Ident) + if !ok { + return true + } + lhs, ok := init.Lhs[0].(*ast.Ident) + if !ok { + return true + } + if x.Obj != lhs.Obj { + return true + } + if _, ok := loop.Post.(*ast.IncDecStmt); !ok { + return true + } + + v, isAddr := ssafn.ValueForExpr(cond.X) + if v == nil || isAddr { + return true + } + switch v := v.(type) { + case *ssa.Phi: + ops := v.Operands(nil) + if len(ops) != 2 { + return true + } + _, ok := (*ops[0]).(*ssa.Const) + if !ok { + return true + } + sigma, ok := (*ops[1]).(*ssa.Sigma) + if !ok { + return true + } + if sigma.X != v { + return true + } + case *ssa.UnOp: + return true + } + ReportNodef(pass, cond, "variable in loop condition never changes") + + return true + } + Inspect(ssafn.Syntax(), fn) + } + return nil, nil +} + +func CheckArgOverwritten(pass *analysis.Pass) (interface{}, error) { + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + fn := func(node ast.Node) bool { + var typ *ast.FuncType + var body *ast.BlockStmt + switch fn := node.(type) { + case *ast.FuncDecl: + typ = fn.Type + body = fn.Body + case *ast.FuncLit: + typ = fn.Type + body = fn.Body + } + if body == nil { + return true + } + if len(typ.Params.List) == 0 { + return true + } + for _, field := range typ.Params.List { + for _, arg := range field.Names { + obj := pass.TypesInfo.ObjectOf(arg) + var ssaobj *ssa.Parameter + for _, param := range ssafn.Params { + if param.Object() == obj { + ssaobj = param + break + } + } + if ssaobj == nil { + continue + } + refs := ssaobj.Referrers() + if refs == nil { + continue + } + if len(FilterDebug(*refs)) != 0 { + continue + } + + assigned := false + ast.Inspect(body, func(node ast.Node) bool { + assign, ok := node.(*ast.AssignStmt) + if !ok { + return true + } + for _, lhs := range assign.Lhs { + ident, ok := lhs.(*ast.Ident) + if !ok { + continue + } + if pass.TypesInfo.ObjectOf(ident) == obj { + assigned = true + return false + } + } + return true + }) + if assigned { + ReportNodef(pass, arg, "argument %s is overwritten before first use", arg) + } + } + } + return true + } + Inspect(ssafn.Syntax(), fn) + } + return nil, nil +} + +func CheckIneffectiveLoop(pass *analysis.Pass) (interface{}, error) { + // This check detects some, but not all unconditional loop exits. + // We give up in the following cases: + // + // - a goto anywhere in the loop. The goto might skip over our + // return, and we don't check that it doesn't. + // + // - any nested, unlabelled continue, even if it is in another + // loop or closure. + fn := func(node ast.Node) { + var body *ast.BlockStmt + switch fn := node.(type) { + case *ast.FuncDecl: + body = fn.Body + case *ast.FuncLit: + body = fn.Body + default: + panic(fmt.Sprintf("unreachable: %T", node)) + } + if body == nil { + return + } + labels := map[*ast.Object]ast.Stmt{} + ast.Inspect(body, func(node ast.Node) bool { + label, ok := node.(*ast.LabeledStmt) + if !ok { + return true + } + labels[label.Label.Obj] = label.Stmt + return true + }) + + ast.Inspect(body, func(node ast.Node) bool { + var loop ast.Node + var body *ast.BlockStmt + switch node := node.(type) { + case *ast.ForStmt: + body = node.Body + loop = node + case *ast.RangeStmt: + typ := pass.TypesInfo.TypeOf(node.X) + if _, ok := typ.Underlying().(*types.Map); ok { + // looping once over a map is a valid pattern for + // getting an arbitrary element. + return true + } + body = node.Body + loop = node + default: + return true + } + if len(body.List) < 2 { + // avoid flagging the somewhat common pattern of using + // a range loop to get the first element in a slice, + // or the first rune in a string. + return true + } + var unconditionalExit ast.Node + hasBranching := false + for _, stmt := range body.List { + switch stmt := stmt.(type) { + case *ast.BranchStmt: + switch stmt.Tok { + case token.BREAK: + if stmt.Label == nil || labels[stmt.Label.Obj] == loop { + unconditionalExit = stmt + } + case token.CONTINUE: + if stmt.Label == nil || labels[stmt.Label.Obj] == loop { + unconditionalExit = nil + return false + } + } + case *ast.ReturnStmt: + unconditionalExit = stmt + case *ast.IfStmt, *ast.ForStmt, *ast.RangeStmt, *ast.SwitchStmt, *ast.SelectStmt: + hasBranching = true + } + } + if unconditionalExit == nil || !hasBranching { + return false + } + ast.Inspect(body, func(node ast.Node) bool { + if branch, ok := node.(*ast.BranchStmt); ok { + + switch branch.Tok { + case token.GOTO: + unconditionalExit = nil + return false + case token.CONTINUE: + if branch.Label != nil && labels[branch.Label.Obj] != loop { + return true + } + unconditionalExit = nil + return false + } + } + return true + }) + if unconditionalExit != nil { + ReportNodef(pass, unconditionalExit, "the surrounding loop is unconditionally terminated") + } + return true + }) + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.FuncDecl)(nil), (*ast.FuncLit)(nil)}, fn) + return nil, nil +} + +func CheckNilContext(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + call := node.(*ast.CallExpr) + if len(call.Args) == 0 { + return + } + if typ, ok := pass.TypesInfo.TypeOf(call.Args[0]).(*types.Basic); !ok || typ.Kind() != types.UntypedNil { + return + } + sig, ok := pass.TypesInfo.TypeOf(call.Fun).(*types.Signature) + if !ok { + return + } + if sig.Params().Len() == 0 { + return + } + if !IsType(sig.Params().At(0).Type(), "context.Context") { + return + } + ReportNodef(pass, call.Args[0], + "do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use") + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.CallExpr)(nil)}, fn) + return nil, nil +} + +func CheckSeeker(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + call := node.(*ast.CallExpr) + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + return + } + if sel.Sel.Name != "Seek" { + return + } + if len(call.Args) != 2 { + return + } + arg0, ok := call.Args[Arg("(io.Seeker).Seek.offset")].(*ast.SelectorExpr) + if !ok { + return + } + switch arg0.Sel.Name { + case "SeekStart", "SeekCurrent", "SeekEnd": + default: + return + } + pkg, ok := arg0.X.(*ast.Ident) + if !ok { + return + } + if pkg.Name != "io" { + return + } + ReportNodef(pass, call, "the first argument of io.Seeker is the offset, but an io.Seek* constant is being used instead") + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.CallExpr)(nil)}, fn) + return nil, nil +} + +func CheckIneffectiveAppend(pass *analysis.Pass) (interface{}, error) { + isAppend := func(ins ssa.Value) bool { + call, ok := ins.(*ssa.Call) + if !ok { + return false + } + if call.Call.IsInvoke() { + return false + } + if builtin, ok := call.Call.Value.(*ssa.Builtin); !ok || builtin.Name() != "append" { + return false + } + return true + } + + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + for _, block := range ssafn.Blocks { + for _, ins := range block.Instrs { + val, ok := ins.(ssa.Value) + if !ok || !isAppend(val) { + continue + } + + isUsed := false + visited := map[ssa.Instruction]bool{} + var walkRefs func(refs []ssa.Instruction) + walkRefs = func(refs []ssa.Instruction) { + loop: + for _, ref := range refs { + if visited[ref] { + continue + } + visited[ref] = true + if _, ok := ref.(*ssa.DebugRef); ok { + continue + } + switch ref := ref.(type) { + case *ssa.Phi: + walkRefs(*ref.Referrers()) + case *ssa.Sigma: + walkRefs(*ref.Referrers()) + case ssa.Value: + if !isAppend(ref) { + isUsed = true + } else { + walkRefs(*ref.Referrers()) + } + case ssa.Instruction: + isUsed = true + break loop + } + } + } + refs := val.Referrers() + if refs == nil { + continue + } + walkRefs(*refs) + if !isUsed { + pass.Reportf(ins.Pos(), "this result of append is never used, except maybe in other appends") + } + } + } + } + return nil, nil +} + +func CheckConcurrentTesting(pass *analysis.Pass) (interface{}, error) { + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + for _, block := range ssafn.Blocks { + for _, ins := range block.Instrs { + gostmt, ok := ins.(*ssa.Go) + if !ok { + continue + } + var fn *ssa.Function + switch val := gostmt.Call.Value.(type) { + case *ssa.Function: + fn = val + case *ssa.MakeClosure: + fn = val.Fn.(*ssa.Function) + default: + continue + } + if fn.Blocks == nil { + continue + } + for _, block := range fn.Blocks { + for _, ins := range block.Instrs { + call, ok := ins.(*ssa.Call) + if !ok { + continue + } + if call.Call.IsInvoke() { + continue + } + callee := call.Call.StaticCallee() + if callee == nil { + continue + } + recv := callee.Signature.Recv() + if recv == nil { + continue + } + if !IsType(recv.Type(), "*testing.common") { + continue + } + fn, ok := call.Call.StaticCallee().Object().(*types.Func) + if !ok { + continue + } + name := fn.Name() + switch name { + case "FailNow", "Fatal", "Fatalf", "SkipNow", "Skip", "Skipf": + default: + continue + } + pass.Reportf(gostmt.Pos(), "the goroutine calls T.%s, which must be called in the same goroutine as the test", name) + } + } + } + } + } + return nil, nil +} + +func eachCall(ssafn *ssa.Function, fn func(caller *ssa.Function, site ssa.CallInstruction, callee *ssa.Function)) { + for _, b := range ssafn.Blocks { + for _, instr := range b.Instrs { + if site, ok := instr.(ssa.CallInstruction); ok { + if g := site.Common().StaticCallee(); g != nil { + fn(ssafn, site, g) + } + } + } + } +} + +func CheckCyclicFinalizer(pass *analysis.Pass) (interface{}, error) { + fn := func(caller *ssa.Function, site ssa.CallInstruction, callee *ssa.Function) { + if callee.RelString(nil) != "runtime.SetFinalizer" { + return + } + arg0 := site.Common().Args[Arg("runtime.SetFinalizer.obj")] + if iface, ok := arg0.(*ssa.MakeInterface); ok { + arg0 = iface.X + } + unop, ok := arg0.(*ssa.UnOp) + if !ok { + return + } + v, ok := unop.X.(*ssa.Alloc) + if !ok { + return + } + arg1 := site.Common().Args[Arg("runtime.SetFinalizer.finalizer")] + if iface, ok := arg1.(*ssa.MakeInterface); ok { + arg1 = iface.X + } + mc, ok := arg1.(*ssa.MakeClosure) + if !ok { + return + } + for _, b := range mc.Bindings { + if b == v { + pos := lint.DisplayPosition(pass.Fset, mc.Fn.Pos()) + pass.Reportf(site.Pos(), "the finalizer closes over the object, preventing the finalizer from ever running (at %s)", pos) + } + } + } + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + eachCall(ssafn, fn) + } + return nil, nil +} + +/* +func CheckSliceOutOfBounds(pass *analysis.Pass) (interface{}, error) { + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + for _, block := range ssafn.Blocks { + for _, ins := range block.Instrs { + ia, ok := ins.(*ssa.IndexAddr) + if !ok { + continue + } + if _, ok := ia.X.Type().Underlying().(*types.Slice); !ok { + continue + } + sr, ok1 := c.funcDescs.Get(ssafn).Ranges[ia.X].(vrp.SliceInterval) + idxr, ok2 := c.funcDescs.Get(ssafn).Ranges[ia.Index].(vrp.IntInterval) + if !ok1 || !ok2 || !sr.IsKnown() || !idxr.IsKnown() || sr.Length.Empty() || idxr.Empty() { + continue + } + if idxr.Lower.Cmp(sr.Length.Upper) >= 0 { + ReportNodef(pass, ia, "index out of bounds") + } + } + } + } + return nil, nil +} +*/ + +func CheckDeferLock(pass *analysis.Pass) (interface{}, error) { + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + for _, block := range ssafn.Blocks { + instrs := FilterDebug(block.Instrs) + if len(instrs) < 2 { + continue + } + for i, ins := range instrs[:len(instrs)-1] { + call, ok := ins.(*ssa.Call) + if !ok { + continue + } + if !IsCallTo(call.Common(), "(*sync.Mutex).Lock") && !IsCallTo(call.Common(), "(*sync.RWMutex).RLock") { + continue + } + nins, ok := instrs[i+1].(*ssa.Defer) + if !ok { + continue + } + if !IsCallTo(&nins.Call, "(*sync.Mutex).Lock") && !IsCallTo(&nins.Call, "(*sync.RWMutex).RLock") { + continue + } + if call.Common().Args[0] != nins.Call.Args[0] { + continue + } + name := shortCallName(call.Common()) + alt := "" + switch name { + case "Lock": + alt = "Unlock" + case "RLock": + alt = "RUnlock" + } + pass.Reportf(nins.Pos(), "deferring %s right after having locked already; did you mean to defer %s?", name, alt) + } + } + } + return nil, nil +} + +func CheckNaNComparison(pass *analysis.Pass) (interface{}, error) { + isNaN := func(v ssa.Value) bool { + call, ok := v.(*ssa.Call) + if !ok { + return false + } + return IsCallTo(call.Common(), "math.NaN") + } + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + for _, block := range ssafn.Blocks { + for _, ins := range block.Instrs { + ins, ok := ins.(*ssa.BinOp) + if !ok { + continue + } + if isNaN(ins.X) || isNaN(ins.Y) { + pass.Reportf(ins.Pos(), "no value is equal to NaN, not even NaN itself") + } + } + } + } + return nil, nil +} + +func CheckInfiniteRecursion(pass *analysis.Pass) (interface{}, error) { + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + eachCall(ssafn, func(caller *ssa.Function, site ssa.CallInstruction, callee *ssa.Function) { + if callee != ssafn { + return + } + if _, ok := site.(*ssa.Go); ok { + // Recursively spawning goroutines doesn't consume + // stack space infinitely, so don't flag it. + return + } + + block := site.Block() + canReturn := false + for _, b := range ssafn.Blocks { + if block.Dominates(b) { + continue + } + if len(b.Instrs) == 0 { + continue + } + if _, ok := b.Instrs[len(b.Instrs)-1].(*ssa.Return); ok { + canReturn = true + break + } + } + if canReturn { + return + } + pass.Reportf(site.Pos(), "infinite recursive call") + }) + } + return nil, nil +} + +func objectName(obj types.Object) string { + if obj == nil { + return "" + } + var name string + if obj.Pkg() != nil && obj.Pkg().Scope().Lookup(obj.Name()) == obj { + s := obj.Pkg().Path() + if s != "" { + name += s + "." + } + } + name += obj.Name() + return name +} + +func isName(pass *analysis.Pass, expr ast.Expr, name string) bool { + var obj types.Object + switch expr := expr.(type) { + case *ast.Ident: + obj = pass.TypesInfo.ObjectOf(expr) + case *ast.SelectorExpr: + obj = pass.TypesInfo.ObjectOf(expr.Sel) + } + return objectName(obj) == name +} + +func CheckLeakyTimeTick(pass *analysis.Pass) (interface{}, error) { + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + if IsInMain(pass, ssafn) || IsInTest(pass, ssafn) { + continue + } + for _, block := range ssafn.Blocks { + for _, ins := range block.Instrs { + call, ok := ins.(*ssa.Call) + if !ok || !IsCallTo(call.Common(), "time.Tick") { + continue + } + if !functions.Terminates(call.Parent()) { + continue + } + pass.Reportf(call.Pos(), "using time.Tick leaks the underlying ticker, consider using it only in endless functions, tests and the main package, and use time.NewTicker here") + } + } + } + return nil, nil +} + +func CheckDoubleNegation(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + unary1 := node.(*ast.UnaryExpr) + unary2, ok := unary1.X.(*ast.UnaryExpr) + if !ok { + return + } + if unary1.Op != token.NOT || unary2.Op != token.NOT { + return + } + ReportNodef(pass, unary1, "negating a boolean twice has no effect; is this a typo?") + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.UnaryExpr)(nil)}, fn) + return nil, nil +} + +func hasSideEffects(node ast.Node) bool { + dynamic := false + ast.Inspect(node, func(node ast.Node) bool { + switch node := node.(type) { + case *ast.CallExpr: + dynamic = true + return false + case *ast.UnaryExpr: + if node.Op == token.ARROW { + dynamic = true + return false + } + } + return true + }) + return dynamic +} + +func CheckRepeatedIfElse(pass *analysis.Pass) (interface{}, error) { + seen := map[ast.Node]bool{} + + var collectConds func(ifstmt *ast.IfStmt, inits []ast.Stmt, conds []ast.Expr) ([]ast.Stmt, []ast.Expr) + collectConds = func(ifstmt *ast.IfStmt, inits []ast.Stmt, conds []ast.Expr) ([]ast.Stmt, []ast.Expr) { + seen[ifstmt] = true + if ifstmt.Init != nil { + inits = append(inits, ifstmt.Init) + } + conds = append(conds, ifstmt.Cond) + if elsestmt, ok := ifstmt.Else.(*ast.IfStmt); ok { + return collectConds(elsestmt, inits, conds) + } + return inits, conds + } + fn := func(node ast.Node) { + ifstmt := node.(*ast.IfStmt) + if seen[ifstmt] { + return + } + inits, conds := collectConds(ifstmt, nil, nil) + if len(inits) > 0 { + return + } + for _, cond := range conds { + if hasSideEffects(cond) { + return + } + } + counts := map[string]int{} + for _, cond := range conds { + s := Render(pass, cond) + counts[s]++ + if counts[s] == 2 { + ReportNodef(pass, cond, "this condition occurs multiple times in this if/else if chain") + } + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.IfStmt)(nil)}, fn) + return nil, nil +} + +func CheckSillyBitwiseOps(pass *analysis.Pass) (interface{}, error) { + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + for _, block := range ssafn.Blocks { + for _, ins := range block.Instrs { + ins, ok := ins.(*ssa.BinOp) + if !ok { + continue + } + + if c, ok := ins.Y.(*ssa.Const); !ok || c.Value == nil || c.Value.Kind() != constant.Int || c.Uint64() != 0 { + continue + } + switch ins.Op { + case token.AND, token.OR, token.XOR: + default: + // we do not flag shifts because too often, x<<0 is part + // of a pattern, x<<0, x<<8, x<<16, ... + continue + } + path, _ := astutil.PathEnclosingInterval(File(pass, ins), ins.Pos(), ins.Pos()) + if len(path) == 0 { + continue + } + if node, ok := path[0].(*ast.BinaryExpr); !ok || !IsZero(node.Y) { + continue + } + + switch ins.Op { + case token.AND: + pass.Reportf(ins.Pos(), "x & 0 always equals 0") + case token.OR, token.XOR: + pass.Reportf(ins.Pos(), "x %s 0 always equals x", ins.Op) + } + } + } + } + return nil, nil +} + +func CheckNonOctalFileMode(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + call := node.(*ast.CallExpr) + sig, ok := pass.TypesInfo.TypeOf(call.Fun).(*types.Signature) + if !ok { + return + } + n := sig.Params().Len() + var args []int + for i := 0; i < n; i++ { + typ := sig.Params().At(i).Type() + if IsType(typ, "os.FileMode") { + args = append(args, i) + } + } + for _, i := range args { + lit, ok := call.Args[i].(*ast.BasicLit) + if !ok { + continue + } + if len(lit.Value) == 3 && + lit.Value[0] != '0' && + lit.Value[0] >= '0' && lit.Value[0] <= '7' && + lit.Value[1] >= '0' && lit.Value[1] <= '7' && + lit.Value[2] >= '0' && lit.Value[2] <= '7' { + + v, err := strconv.ParseInt(lit.Value, 10, 64) + if err != nil { + continue + } + ReportNodef(pass, call.Args[i], "file mode '%s' evaluates to %#o; did you mean '0%s'?", lit.Value, v, lit.Value) + } + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.CallExpr)(nil)}, fn) + return nil, nil +} + +func CheckPureFunctions(pass *analysis.Pass) (interface{}, error) { + pure := pass.ResultOf[facts.Purity].(facts.PurityResult) + +fnLoop: + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + if IsInTest(pass, ssafn) { + params := ssafn.Signature.Params() + for i := 0; i < params.Len(); i++ { + param := params.At(i) + if IsType(param.Type(), "*testing.B") { + // Ignore discarded pure functions in code related + // to benchmarks. Instead of matching BenchmarkFoo + // functions, we match any function accepting a + // *testing.B. Benchmarks sometimes call generic + // functions for doing the actual work, and + // checking for the parameter is a lot easier and + // faster than analyzing call trees. + continue fnLoop + } + } + } + + for _, b := range ssafn.Blocks { + for _, ins := range b.Instrs { + ins, ok := ins.(*ssa.Call) + if !ok { + continue + } + refs := ins.Referrers() + if refs == nil || len(FilterDebug(*refs)) > 0 { + continue + } + callee := ins.Common().StaticCallee() + if callee == nil { + continue + } + if callee.Object() == nil { + // TODO(dh): support anonymous functions + continue + } + if _, ok := pure[callee.Object().(*types.Func)]; ok { + pass.Reportf(ins.Pos(), "%s is a pure function but its return value is ignored", callee.Name()) + continue + } + } + } + } + return nil, nil +} + +func CheckDeprecated(pass *analysis.Pass) (interface{}, error) { + deprs := pass.ResultOf[facts.Deprecated].(facts.DeprecatedResult) + + // Selectors can appear outside of function literals, e.g. when + // declaring package level variables. + + var tfn types.Object + stack := 0 + fn := func(node ast.Node, push bool) bool { + if !push { + stack-- + return false + } + stack++ + if stack == 1 { + tfn = nil + } + if fn, ok := node.(*ast.FuncDecl); ok { + tfn = pass.TypesInfo.ObjectOf(fn.Name) + } + sel, ok := node.(*ast.SelectorExpr) + if !ok { + return true + } + + obj := pass.TypesInfo.ObjectOf(sel.Sel) + if obj.Pkg() == nil { + return true + } + if pass.Pkg == obj.Pkg() || obj.Pkg().Path()+"_test" == pass.Pkg.Path() { + // Don't flag stuff in our own package + return true + } + if depr, ok := deprs.Objects[obj]; ok { + // Look for the first available alternative, not the first + // version something was deprecated in. If a function was + // deprecated in Go 1.6, an alternative has been available + // already in 1.0, and we're targeting 1.2, it still + // makes sense to use the alternative from 1.0, to be + // future-proof. + minVersion := deprecated.Stdlib[SelectorName(pass, sel)].AlternativeAvailableSince + if !IsGoVersion(pass, minVersion) { + return true + } + + if tfn != nil { + if _, ok := deprs.Objects[tfn]; ok { + // functions that are deprecated may use deprecated + // symbols + return true + } + } + ReportNodef(pass, sel, "%s is deprecated: %s", Render(pass, sel), depr.Msg) + return true + } + return true + } + + imps := map[string]*types.Package{} + for _, imp := range pass.Pkg.Imports() { + imps[imp.Path()] = imp + } + fn2 := func(node ast.Node) { + spec := node.(*ast.ImportSpec) + p := spec.Path.Value + path := p[1 : len(p)-1] + imp := imps[path] + if depr, ok := deprs.Packages[imp]; ok { + ReportNodef(pass, spec, "Package %s is deprecated: %s", path, depr.Msg) + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Nodes(nil, fn) + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.ImportSpec)(nil)}, fn2) + return nil, nil +} + +func callChecker(rules map[string]CallCheck) func(pass *analysis.Pass) (interface{}, error) { + return func(pass *analysis.Pass) (interface{}, error) { + return checkCalls(pass, rules) + } +} + +func checkCalls(pass *analysis.Pass, rules map[string]CallCheck) (interface{}, error) { + ranges := pass.ResultOf[valueRangesAnalyzer].(map[*ssa.Function]vrp.Ranges) + fn := func(caller *ssa.Function, site ssa.CallInstruction, callee *ssa.Function) { + obj, ok := callee.Object().(*types.Func) + if !ok { + return + } + + r, ok := rules[lint.FuncName(obj)] + if !ok { + return + } + var args []*Argument + ssaargs := site.Common().Args + if callee.Signature.Recv() != nil { + ssaargs = ssaargs[1:] + } + for _, arg := range ssaargs { + if iarg, ok := arg.(*ssa.MakeInterface); ok { + arg = iarg.X + } + vr := ranges[site.Parent()][arg] + args = append(args, &Argument{Value: Value{arg, vr}}) + } + call := &Call{ + Pass: pass, + Instr: site, + Args: args, + Parent: site.Parent(), + } + r(call) + for idx, arg := range call.Args { + _ = idx + for _, e := range arg.invalids { + // path, _ := astutil.PathEnclosingInterval(f.File, edge.Site.Pos(), edge.Site.Pos()) + // if len(path) < 2 { + // continue + // } + // astcall, ok := path[0].(*ast.CallExpr) + // if !ok { + // continue + // } + // pass.Reportf(astcall.Args[idx], "%s", e) + + pass.Reportf(site.Pos(), "%s", e) + } + } + for _, e := range call.invalids { + pass.Reportf(call.Instr.Common().Pos(), "%s", e) + } + } + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + eachCall(ssafn, fn) + } + return nil, nil +} + +func shortCallName(call *ssa.CallCommon) string { + if call.IsInvoke() { + return "" + } + switch v := call.Value.(type) { + case *ssa.Function: + fn, ok := v.Object().(*types.Func) + if !ok { + return "" + } + return fn.Name() + case *ssa.Builtin: + return v.Name() + } + return "" +} + +func CheckWriterBufferModified(pass *analysis.Pass) (interface{}, error) { + // TODO(dh): this might be a good candidate for taint analysis. + // Taint the argument as MUST_NOT_MODIFY, then propagate that + // through functions like bytes.Split + + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + sig := ssafn.Signature + if ssafn.Name() != "Write" || sig.Recv() == nil || sig.Params().Len() != 1 || sig.Results().Len() != 2 { + continue + } + tArg, ok := sig.Params().At(0).Type().(*types.Slice) + if !ok { + continue + } + if basic, ok := tArg.Elem().(*types.Basic); !ok || basic.Kind() != types.Byte { + continue + } + if basic, ok := sig.Results().At(0).Type().(*types.Basic); !ok || basic.Kind() != types.Int { + continue + } + if named, ok := sig.Results().At(1).Type().(*types.Named); !ok || !IsType(named, "error") { + continue + } + + for _, block := range ssafn.Blocks { + for _, ins := range block.Instrs { + switch ins := ins.(type) { + case *ssa.Store: + addr, ok := ins.Addr.(*ssa.IndexAddr) + if !ok { + continue + } + if addr.X != ssafn.Params[1] { + continue + } + pass.Reportf(ins.Pos(), "io.Writer.Write must not modify the provided buffer, not even temporarily") + case *ssa.Call: + if !IsCallTo(ins.Common(), "append") { + continue + } + if ins.Common().Args[0] != ssafn.Params[1] { + continue + } + pass.Reportf(ins.Pos(), "io.Writer.Write must not modify the provided buffer, not even temporarily") + } + } + } + } + return nil, nil +} + +func loopedRegexp(name string) CallCheck { + return func(call *Call) { + if len(extractConsts(call.Args[0].Value.Value)) == 0 { + return + } + if !isInLoop(call.Instr.Block()) { + return + } + call.Invalid(fmt.Sprintf("calling %s in a loop has poor performance, consider using regexp.Compile", name)) + } +} + +func CheckEmptyBranch(pass *analysis.Pass) (interface{}, error) { + for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + if ssafn.Syntax() == nil { + continue + } + if IsExample(ssafn) { + continue + } + fn := func(node ast.Node) bool { + ifstmt, ok := node.(*ast.IfStmt) + if !ok { + return true + } + if ifstmt.Else != nil { + b, ok := ifstmt.Else.(*ast.BlockStmt) + if !ok || len(b.List) != 0 { + return true + } + ReportfFG(pass, ifstmt.Else.Pos(), "empty branch") + } + if len(ifstmt.Body.List) != 0 { + return true + } + ReportfFG(pass, ifstmt.Pos(), "empty branch") + return true + } + Inspect(ssafn.Syntax(), fn) + } + return nil, nil +} + +func CheckMapBytesKey(pass *analysis.Pass) (interface{}, error) { + for _, fn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + for _, b := range fn.Blocks { + insLoop: + for _, ins := range b.Instrs { + // find []byte -> string conversions + conv, ok := ins.(*ssa.Convert) + if !ok || conv.Type() != types.Universe.Lookup("string").Type() { + continue + } + if s, ok := conv.X.Type().(*types.Slice); !ok || s.Elem() != types.Universe.Lookup("byte").Type() { + continue + } + refs := conv.Referrers() + // need at least two (DebugRef) references: the + // conversion and the *ast.Ident + if refs == nil || len(*refs) < 2 { + continue + } + ident := false + // skip first reference, that's the conversion itself + for _, ref := range (*refs)[1:] { + switch ref := ref.(type) { + case *ssa.DebugRef: + if _, ok := ref.Expr.(*ast.Ident); !ok { + // the string seems to be used somewhere + // unexpected; the default branch should + // catch this already, but be safe + continue insLoop + } else { + ident = true + } + case *ssa.Lookup: + default: + // the string is used somewhere else than a + // map lookup + continue insLoop + } + } + + // the result of the conversion wasn't assigned to an + // identifier + if !ident { + continue + } + pass.Reportf(conv.Pos(), "m[string(key)] would be more efficient than k := string(key); m[k]") + } + } + } + return nil, nil +} + +func CheckRangeStringRunes(pass *analysis.Pass) (interface{}, error) { + return sharedcheck.CheckRangeStringRunes(pass) +} + +func CheckSelfAssignment(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + assign := node.(*ast.AssignStmt) + if assign.Tok != token.ASSIGN || len(assign.Lhs) != len(assign.Rhs) { + return + } + for i, stmt := range assign.Lhs { + rlh := Render(pass, stmt) + rrh := Render(pass, assign.Rhs[i]) + if rlh == rrh { + ReportfFG(pass, assign.Pos(), "self-assignment of %s to %s", rrh, rlh) + } + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.AssignStmt)(nil)}, fn) + return nil, nil +} + +func buildTagsIdentical(s1, s2 []string) bool { + if len(s1) != len(s2) { + return false + } + s1s := make([]string, len(s1)) + copy(s1s, s1) + sort.Strings(s1s) + s2s := make([]string, len(s2)) + copy(s2s, s2) + sort.Strings(s2s) + for i, s := range s1s { + if s != s2s[i] { + return false + } + } + return true +} + +func CheckDuplicateBuildConstraints(pass *analysis.Pass) (interface{}, error) { + for _, f := range pass.Files { + constraints := buildTags(f) + for i, constraint1 := range constraints { + for j, constraint2 := range constraints { + if i >= j { + continue + } + if buildTagsIdentical(constraint1, constraint2) { + ReportfFG(pass, f.Pos(), "identical build constraints %q and %q", + strings.Join(constraint1, " "), + strings.Join(constraint2, " ")) + } + } + } + } + return nil, nil +} + +func CheckSillyRegexp(pass *analysis.Pass) (interface{}, error) { + // We could use the rule checking engine for this, but the + // arguments aren't really invalid. + for _, fn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + for _, b := range fn.Blocks { + for _, ins := range b.Instrs { + call, ok := ins.(*ssa.Call) + if !ok { + continue + } + switch CallName(call.Common()) { + case "regexp.MustCompile", "regexp.Compile", "regexp.Match", "regexp.MatchReader", "regexp.MatchString": + default: + continue + } + c, ok := call.Common().Args[0].(*ssa.Const) + if !ok { + continue + } + s := constant.StringVal(c.Value) + re, err := syntax.Parse(s, 0) + if err != nil { + continue + } + if re.Op != syntax.OpLiteral && re.Op != syntax.OpEmptyMatch { + continue + } + pass.Reportf(call.Pos(), "regular expression does not contain any meta characters") + } + } + } + return nil, nil +} + +func CheckMissingEnumTypesInDeclaration(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + decl := node.(*ast.GenDecl) + if !decl.Lparen.IsValid() { + return + } + if decl.Tok != token.CONST { + return + } + + groups := GroupSpecs(pass.Fset, decl.Specs) + groupLoop: + for _, group := range groups { + if len(group) < 2 { + continue + } + if group[0].(*ast.ValueSpec).Type == nil { + // first constant doesn't have a type + continue groupLoop + } + for i, spec := range group { + spec := spec.(*ast.ValueSpec) + if len(spec.Names) != 1 || len(spec.Values) != 1 { + continue groupLoop + } + switch v := spec.Values[0].(type) { + case *ast.BasicLit: + case *ast.UnaryExpr: + if _, ok := v.X.(*ast.BasicLit); !ok { + continue groupLoop + } + default: + // if it's not a literal it might be typed, such as + // time.Microsecond = 1000 * Nanosecond + continue groupLoop + } + if i == 0 { + continue + } + if spec.Type != nil { + continue groupLoop + } + } + ReportNodef(pass, group[0], "only the first constant in this group has an explicit type") + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.GenDecl)(nil)}, fn) + return nil, nil +} + +func CheckTimerResetReturnValue(pass *analysis.Pass) (interface{}, error) { + for _, fn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + for _, block := range fn.Blocks { + for _, ins := range block.Instrs { + call, ok := ins.(*ssa.Call) + if !ok { + continue + } + if !IsCallTo(call.Common(), "(*time.Timer).Reset") { + continue + } + refs := call.Referrers() + if refs == nil { + continue + } + for _, ref := range FilterDebug(*refs) { + ifstmt, ok := ref.(*ssa.If) + if !ok { + continue + } + + found := false + for _, succ := range ifstmt.Block().Succs { + if len(succ.Preds) != 1 { + // Merge point, not a branch in the + // syntactical sense. + + // FIXME(dh): this is broken for if + // statements a la "if x || y" + continue + } + ssautil.Walk(succ, func(b *ssa.BasicBlock) bool { + if !succ.Dominates(b) { + // We've reached the end of the branch + return false + } + for _, ins := range b.Instrs { + // TODO(dh): we should check that + // we're receiving from the channel of + // a time.Timer to further reduce + // false positives. Not a key + // priority, considering the rarity of + // Reset and the tiny likeliness of a + // false positive + if ins, ok := ins.(*ssa.UnOp); ok && ins.Op == token.ARROW && IsType(ins.X.Type(), "<-chan time.Time") { + found = true + return false + } + } + return true + }) + } + + if found { + pass.Reportf(call.Pos(), "it is not possible to use Reset's return value correctly, as there is a race condition between draining the channel and the new timer expiring") + } + } + } + } + } + return nil, nil +} + +func CheckToLowerToUpperComparison(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + binExpr := node.(*ast.BinaryExpr) + + var negative bool + switch binExpr.Op { + case token.EQL: + negative = false + case token.NEQ: + negative = true + default: + return + } + + const ( + lo = "strings.ToLower" + up = "strings.ToUpper" + ) + + var call string + if IsCallToAST(pass, binExpr.X, lo) && IsCallToAST(pass, binExpr.Y, lo) { + call = lo + } else if IsCallToAST(pass, binExpr.X, up) && IsCallToAST(pass, binExpr.Y, up) { + call = up + } else { + return + } + + bang := "" + if negative { + bang = "!" + } + + ReportNodef(pass, binExpr, "should use %sstrings.EqualFold(a, b) instead of %s(a) %s %s(b)", bang, call, binExpr.Op, call) + } + + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.BinaryExpr)(nil)}, fn) + return nil, nil +} + +func CheckUnreachableTypeCases(pass *analysis.Pass) (interface{}, error) { + // Check if T subsumes V in a type switch. T subsumes V if T is an interface and T's method set is a subset of V's method set. + subsumes := func(T, V types.Type) bool { + tIface, ok := T.Underlying().(*types.Interface) + if !ok { + return false + } + + return types.Implements(V, tIface) + } + + subsumesAny := func(Ts, Vs []types.Type) (types.Type, types.Type, bool) { + for _, T := range Ts { + for _, V := range Vs { + if subsumes(T, V) { + return T, V, true + } + } + } + + return nil, nil, false + } + + fn := func(node ast.Node) { + tsStmt := node.(*ast.TypeSwitchStmt) + + type ccAndTypes struct { + cc *ast.CaseClause + types []types.Type + } + + // All asserted types in the order of case clauses. + ccs := make([]ccAndTypes, 0, len(tsStmt.Body.List)) + for _, stmt := range tsStmt.Body.List { + cc, _ := stmt.(*ast.CaseClause) + + // Exclude the 'default' case. + if len(cc.List) == 0 { + continue + } + + Ts := make([]types.Type, len(cc.List)) + for i, expr := range cc.List { + Ts[i] = pass.TypesInfo.TypeOf(expr) + } + + ccs = append(ccs, ccAndTypes{cc: cc, types: Ts}) + } + + if len(ccs) <= 1 { + // Zero or one case clauses, nothing to check. + return + } + + // Check if case clauses following cc have types that are subsumed by cc. + for i, cc := range ccs[:len(ccs)-1] { + for _, next := range ccs[i+1:] { + if T, V, yes := subsumesAny(cc.types, next.types); yes { + ReportNodef(pass, next.cc, "unreachable case clause: %s will always match before %s", T.String(), V.String()) + } + } + } + } + + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.TypeSwitchStmt)(nil)}, fn) + return nil, nil +} + +func CheckSingleArgAppend(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + if !IsCallToAST(pass, node, "append") { + return + } + call := node.(*ast.CallExpr) + if len(call.Args) != 1 { + return + } + ReportfFG(pass, call.Pos(), "x = append(y) is equivalent to x = y") + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.CallExpr)(nil)}, fn) + return nil, nil +} + +func CheckStructTags(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + for _, field := range node.(*ast.StructType).Fields.List { + if field.Tag == nil { + continue + } + tags, err := parseStructTag(field.Tag.Value[1 : len(field.Tag.Value)-1]) + if err != nil { + ReportNodef(pass, field.Tag, "unparseable struct tag: %s", err) + continue + } + for k, v := range tags { + if len(v) > 1 { + ReportNodef(pass, field.Tag, "duplicate struct tag %q", k) + continue + } + + switch k { + case "json": + checkJSONTag(pass, field, v[0]) + case "xml": + checkXMLTag(pass, field, v[0]) + } + } + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.StructType)(nil)}, fn) + return nil, nil +} + +func checkJSONTag(pass *analysis.Pass, field *ast.Field, tag string) { + //lint:ignore SA9003 TODO(dh): should we flag empty tags? + if len(tag) == 0 { + } + fields := strings.Split(tag, ",") + for _, r := range fields[0] { + if !unicode.IsLetter(r) && !unicode.IsDigit(r) && !strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", r) { + ReportNodef(pass, field.Tag, "invalid JSON field name %q", fields[0]) + } + } + var co, cs, ci int + for _, s := range fields[1:] { + switch s { + case "omitempty": + co++ + case "": + // allow stuff like "-," + case "string": + cs++ + // only for string, floating point, integer and bool + T := Dereference(pass.TypesInfo.TypeOf(field.Type).Underlying()).Underlying() + basic, ok := T.(*types.Basic) + if !ok || (basic.Info()&(types.IsBoolean|types.IsInteger|types.IsFloat|types.IsString)) == 0 { + ReportNodef(pass, field.Tag, "the JSON string option only applies to fields of type string, floating point, integer or bool, or pointers to those") + } + case "inline": + ci++ + default: + ReportNodef(pass, field.Tag, "unknown JSON option %q", s) + } + } + if co > 1 { + ReportNodef(pass, field.Tag, `duplicate JSON option "omitempty"`) + } + if cs > 1 { + ReportNodef(pass, field.Tag, `duplicate JSON option "string"`) + } + if ci > 1 { + ReportNodef(pass, field.Tag, `duplicate JSON option "inline"`) + } +} + +func checkXMLTag(pass *analysis.Pass, field *ast.Field, tag string) { + //lint:ignore SA9003 TODO(dh): should we flag empty tags? + if len(tag) == 0 { + } + fields := strings.Split(tag, ",") + counts := map[string]int{} + var exclusives []string + for _, s := range fields[1:] { + switch s { + case "attr", "chardata", "cdata", "innerxml", "comment": + counts[s]++ + if counts[s] == 1 { + exclusives = append(exclusives, s) + } + case "omitempty", "any": + counts[s]++ + case "": + default: + ReportNodef(pass, field.Tag, "unknown XML option %q", s) + } + } + for k, v := range counts { + if v > 1 { + ReportNodef(pass, field.Tag, "duplicate XML option %q", k) + } + } + if len(exclusives) > 1 { + ReportNodef(pass, field.Tag, "XML options %s are mutually exclusive", strings.Join(exclusives, " and ")) + } +} diff --git a/vendor/honnef.co/go/tools/staticcheck/rules.go b/vendor/honnef.co/go/tools/staticcheck/rules.go new file mode 100644 index 000000000..0152cac1a --- /dev/null +++ b/vendor/honnef.co/go/tools/staticcheck/rules.go @@ -0,0 +1,321 @@ +package staticcheck + +import ( + "fmt" + "go/constant" + "go/types" + "net" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" + + "golang.org/x/tools/go/analysis" + . "honnef.co/go/tools/lint/lintdsl" + "honnef.co/go/tools/ssa" + "honnef.co/go/tools/staticcheck/vrp" +) + +const ( + MsgInvalidHostPort = "invalid port or service name in host:port pair" + MsgInvalidUTF8 = "argument is not a valid UTF-8 encoded string" + MsgNonUniqueCutset = "cutset contains duplicate characters" +) + +type Call struct { + Pass *analysis.Pass + Instr ssa.CallInstruction + Args []*Argument + + Parent *ssa.Function + + invalids []string +} + +func (c *Call) Invalid(msg string) { + c.invalids = append(c.invalids, msg) +} + +type Argument struct { + Value Value + invalids []string +} + +func (arg *Argument) Invalid(msg string) { + arg.invalids = append(arg.invalids, msg) +} + +type Value struct { + Value ssa.Value + Range vrp.Range +} + +type CallCheck func(call *Call) + +func extractConsts(v ssa.Value) []*ssa.Const { + switch v := v.(type) { + case *ssa.Const: + return []*ssa.Const{v} + case *ssa.MakeInterface: + return extractConsts(v.X) + default: + return nil + } +} + +func ValidateRegexp(v Value) error { + for _, c := range extractConsts(v.Value) { + if c.Value == nil { + continue + } + if c.Value.Kind() != constant.String { + continue + } + s := constant.StringVal(c.Value) + if _, err := regexp.Compile(s); err != nil { + return err + } + } + return nil +} + +func ValidateTimeLayout(v Value) error { + for _, c := range extractConsts(v.Value) { + if c.Value == nil { + continue + } + if c.Value.Kind() != constant.String { + continue + } + s := constant.StringVal(c.Value) + s = strings.Replace(s, "_", " ", -1) + s = strings.Replace(s, "Z", "-", -1) + _, err := time.Parse(s, s) + if err != nil { + return err + } + } + return nil +} + +func ValidateURL(v Value) error { + for _, c := range extractConsts(v.Value) { + if c.Value == nil { + continue + } + if c.Value.Kind() != constant.String { + continue + } + s := constant.StringVal(c.Value) + _, err := url.Parse(s) + if err != nil { + return fmt.Errorf("%q is not a valid URL: %s", s, err) + } + } + return nil +} + +func IntValue(v Value, z vrp.Z) bool { + r, ok := v.Range.(vrp.IntInterval) + if !ok || !r.IsKnown() { + return false + } + if r.Lower != r.Upper { + return false + } + if r.Lower.Cmp(z) == 0 { + return true + } + return false +} + +func InvalidUTF8(v Value) bool { + for _, c := range extractConsts(v.Value) { + if c.Value == nil { + continue + } + if c.Value.Kind() != constant.String { + continue + } + s := constant.StringVal(c.Value) + if !utf8.ValidString(s) { + return true + } + } + return false +} + +func UnbufferedChannel(v Value) bool { + r, ok := v.Range.(vrp.ChannelInterval) + if !ok || !r.IsKnown() { + return false + } + if r.Size.Lower.Cmp(vrp.NewZ(0)) == 0 && + r.Size.Upper.Cmp(vrp.NewZ(0)) == 0 { + return true + } + return false +} + +func Pointer(v Value) bool { + switch v.Value.Type().Underlying().(type) { + case *types.Pointer, *types.Interface: + return true + } + return false +} + +func ConvertedFromInt(v Value) bool { + conv, ok := v.Value.(*ssa.Convert) + if !ok { + return false + } + b, ok := conv.X.Type().Underlying().(*types.Basic) + if !ok { + return false + } + if (b.Info() & types.IsInteger) == 0 { + return false + } + return true +} + +func validEncodingBinaryType(pass *analysis.Pass, typ types.Type) bool { + typ = typ.Underlying() + switch typ := typ.(type) { + case *types.Basic: + switch typ.Kind() { + case types.Uint8, types.Uint16, types.Uint32, types.Uint64, + types.Int8, types.Int16, types.Int32, types.Int64, + types.Float32, types.Float64, types.Complex64, types.Complex128, types.Invalid: + return true + case types.Bool: + return IsGoVersion(pass, 8) + } + return false + case *types.Struct: + n := typ.NumFields() + for i := 0; i < n; i++ { + if !validEncodingBinaryType(pass, typ.Field(i).Type()) { + return false + } + } + return true + case *types.Array: + return validEncodingBinaryType(pass, typ.Elem()) + case *types.Interface: + // we can't determine if it's a valid type or not + return true + } + return false +} + +func CanBinaryMarshal(pass *analysis.Pass, v Value) bool { + typ := v.Value.Type().Underlying() + if ttyp, ok := typ.(*types.Pointer); ok { + typ = ttyp.Elem().Underlying() + } + if ttyp, ok := typ.(interface { + Elem() types.Type + }); ok { + if _, ok := ttyp.(*types.Pointer); !ok { + typ = ttyp.Elem() + } + } + + return validEncodingBinaryType(pass, typ) +} + +func RepeatZeroTimes(name string, arg int) CallCheck { + return func(call *Call) { + arg := call.Args[arg] + if IntValue(arg.Value, vrp.NewZ(0)) { + arg.Invalid(fmt.Sprintf("calling %s with n == 0 will return no results, did you mean -1?", name)) + } + } +} + +func validateServiceName(s string) bool { + if len(s) < 1 || len(s) > 15 { + return false + } + if s[0] == '-' || s[len(s)-1] == '-' { + return false + } + if strings.Contains(s, "--") { + return false + } + hasLetter := false + for _, r := range s { + if (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') { + hasLetter = true + continue + } + if r >= '0' && r <= '9' { + continue + } + return false + } + return hasLetter +} + +func validatePort(s string) bool { + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return validateServiceName(s) + } + return n >= 0 && n <= 65535 +} + +func ValidHostPort(v Value) bool { + for _, k := range extractConsts(v.Value) { + if k.Value == nil { + continue + } + if k.Value.Kind() != constant.String { + continue + } + s := constant.StringVal(k.Value) + _, port, err := net.SplitHostPort(s) + if err != nil { + return false + } + // TODO(dh): check hostname + if !validatePort(port) { + return false + } + } + return true +} + +// ConvertedFrom reports whether value v was converted from type typ. +func ConvertedFrom(v Value, typ string) bool { + change, ok := v.Value.(*ssa.ChangeType) + return ok && IsType(change.X.Type(), typ) +} + +func UniqueStringCutset(v Value) bool { + for _, c := range extractConsts(v.Value) { + if c.Value == nil { + continue + } + if c.Value.Kind() != constant.String { + continue + } + s := constant.StringVal(c.Value) + rs := runeSlice(s) + if len(rs) < 2 { + continue + } + sort.Sort(rs) + for i, r := range rs[1:] { + if rs[i] == r { + return false + } + } + } + return true +} diff --git a/vendor/honnef.co/go/tools/staticcheck/structtag.go b/vendor/honnef.co/go/tools/staticcheck/structtag.go new file mode 100644 index 000000000..38830a22c --- /dev/null +++ b/vendor/honnef.co/go/tools/staticcheck/structtag.go @@ -0,0 +1,58 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2019 Dominik Honnef. All rights reserved. + +package staticcheck + +import "strconv" + +func parseStructTag(tag string) (map[string][]string, error) { + // FIXME(dh): detect missing closing quote + out := map[string][]string{} + + for tag != "" { + // Skip leading space. + i := 0 + for i < len(tag) && tag[i] == ' ' { + i++ + } + tag = tag[i:] + if tag == "" { + break + } + + // Scan to colon. A space, a quote or a control character is a syntax error. + // Strictly speaking, control chars include the range [0x7f, 0x9f], not just + // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters + // as it is simpler to inspect the tag's bytes than the tag's runes. + i = 0 + for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { + i++ + } + if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { + break + } + name := string(tag[:i]) + tag = tag[i+1:] + + // Scan quoted string to find value. + i = 1 + for i < len(tag) && tag[i] != '"' { + if tag[i] == '\\' { + i++ + } + i++ + } + if i >= len(tag) { + break + } + qvalue := string(tag[:i+1]) + tag = tag[i+1:] + + value, err := strconv.Unquote(qvalue) + if err != nil { + return nil, err + } + out[name] = append(out[name], value) + } + return out, nil +} diff --git a/vendor/honnef.co/go/tools/staticcheck/vrp/channel.go b/vendor/honnef.co/go/tools/staticcheck/vrp/channel.go new file mode 100644 index 000000000..0ef73787b --- /dev/null +++ b/vendor/honnef.co/go/tools/staticcheck/vrp/channel.go @@ -0,0 +1,73 @@ +package vrp + +import ( + "fmt" + + "honnef.co/go/tools/ssa" +) + +type ChannelInterval struct { + Size IntInterval +} + +func (c ChannelInterval) Union(other Range) Range { + i, ok := other.(ChannelInterval) + if !ok { + i = ChannelInterval{EmptyIntInterval} + } + if c.Size.Empty() || !c.Size.IsKnown() { + return i + } + if i.Size.Empty() || !i.Size.IsKnown() { + return c + } + return ChannelInterval{ + Size: c.Size.Union(i.Size).(IntInterval), + } +} + +func (c ChannelInterval) String() string { + return c.Size.String() +} + +func (c ChannelInterval) IsKnown() bool { + return c.Size.IsKnown() +} + +type MakeChannelConstraint struct { + aConstraint + Buffer ssa.Value +} +type ChannelChangeTypeConstraint struct { + aConstraint + X ssa.Value +} + +func NewMakeChannelConstraint(buffer, y ssa.Value) Constraint { + return &MakeChannelConstraint{NewConstraint(y), buffer} +} +func NewChannelChangeTypeConstraint(x, y ssa.Value) Constraint { + return &ChannelChangeTypeConstraint{NewConstraint(y), x} +} + +func (c *MakeChannelConstraint) Operands() []ssa.Value { return []ssa.Value{c.Buffer} } +func (c *ChannelChangeTypeConstraint) Operands() []ssa.Value { return []ssa.Value{c.X} } + +func (c *MakeChannelConstraint) String() string { + return fmt.Sprintf("%s = make(chan, %s)", c.Y().Name(), c.Buffer.Name()) +} +func (c *ChannelChangeTypeConstraint) String() string { + return fmt.Sprintf("%s = changetype(%s)", c.Y().Name(), c.X.Name()) +} + +func (c *MakeChannelConstraint) Eval(g *Graph) Range { + i, ok := g.Range(c.Buffer).(IntInterval) + if !ok { + return ChannelInterval{NewIntInterval(NewZ(0), PInfinity)} + } + if i.Lower.Sign() == -1 { + i.Lower = NewZ(0) + } + return ChannelInterval{i} +} +func (c *ChannelChangeTypeConstraint) Eval(g *Graph) Range { return g.Range(c.X) } diff --git a/vendor/honnef.co/go/tools/staticcheck/vrp/int.go b/vendor/honnef.co/go/tools/staticcheck/vrp/int.go new file mode 100644 index 000000000..926bb7af3 --- /dev/null +++ b/vendor/honnef.co/go/tools/staticcheck/vrp/int.go @@ -0,0 +1,476 @@ +package vrp + +import ( + "fmt" + "go/token" + "go/types" + "math/big" + + "honnef.co/go/tools/ssa" +) + +type Zs []Z + +func (zs Zs) Len() int { + return len(zs) +} + +func (zs Zs) Less(i int, j int) bool { + return zs[i].Cmp(zs[j]) == -1 +} + +func (zs Zs) Swap(i int, j int) { + zs[i], zs[j] = zs[j], zs[i] +} + +type Z struct { + infinity int8 + integer *big.Int +} + +func NewZ(n int64) Z { + return NewBigZ(big.NewInt(n)) +} + +func NewBigZ(n *big.Int) Z { + return Z{integer: n} +} + +func (z1 Z) Infinite() bool { + return z1.infinity != 0 +} + +func (z1 Z) Add(z2 Z) Z { + if z2.Sign() == -1 { + return z1.Sub(z2.Negate()) + } + if z1 == NInfinity { + return NInfinity + } + if z1 == PInfinity { + return PInfinity + } + if z2 == PInfinity { + return PInfinity + } + + if !z1.Infinite() && !z2.Infinite() { + n := &big.Int{} + n.Add(z1.integer, z2.integer) + return NewBigZ(n) + } + + panic(fmt.Sprintf("%s + %s is not defined", z1, z2)) +} + +func (z1 Z) Sub(z2 Z) Z { + if z2.Sign() == -1 { + return z1.Add(z2.Negate()) + } + if !z1.Infinite() && !z2.Infinite() { + n := &big.Int{} + n.Sub(z1.integer, z2.integer) + return NewBigZ(n) + } + + if z1 != PInfinity && z2 == PInfinity { + return NInfinity + } + if z1.Infinite() && !z2.Infinite() { + return Z{infinity: z1.infinity} + } + if z1 == PInfinity && z2 == PInfinity { + return PInfinity + } + panic(fmt.Sprintf("%s - %s is not defined", z1, z2)) +} + +func (z1 Z) Mul(z2 Z) Z { + if (z1.integer != nil && z1.integer.Sign() == 0) || + (z2.integer != nil && z2.integer.Sign() == 0) { + return NewBigZ(&big.Int{}) + } + + if z1.infinity != 0 || z2.infinity != 0 { + return Z{infinity: int8(z1.Sign() * z2.Sign())} + } + + n := &big.Int{} + n.Mul(z1.integer, z2.integer) + return NewBigZ(n) +} + +func (z1 Z) Negate() Z { + if z1.infinity == 1 { + return NInfinity + } + if z1.infinity == -1 { + return PInfinity + } + n := &big.Int{} + n.Neg(z1.integer) + return NewBigZ(n) +} + +func (z1 Z) Sign() int { + if z1.infinity != 0 { + return int(z1.infinity) + } + return z1.integer.Sign() +} + +func (z1 Z) String() string { + if z1 == NInfinity { + return "-∞" + } + if z1 == PInfinity { + return "∞" + } + return fmt.Sprintf("%d", z1.integer) +} + +func (z1 Z) Cmp(z2 Z) int { + if z1.infinity == z2.infinity && z1.infinity != 0 { + return 0 + } + if z1 == PInfinity { + return 1 + } + if z1 == NInfinity { + return -1 + } + if z2 == NInfinity { + return 1 + } + if z2 == PInfinity { + return -1 + } + return z1.integer.Cmp(z2.integer) +} + +func MaxZ(zs ...Z) Z { + if len(zs) == 0 { + panic("Max called with no arguments") + } + if len(zs) == 1 { + return zs[0] + } + ret := zs[0] + for _, z := range zs[1:] { + if z.Cmp(ret) == 1 { + ret = z + } + } + return ret +} + +func MinZ(zs ...Z) Z { + if len(zs) == 0 { + panic("Min called with no arguments") + } + if len(zs) == 1 { + return zs[0] + } + ret := zs[0] + for _, z := range zs[1:] { + if z.Cmp(ret) == -1 { + ret = z + } + } + return ret +} + +var NInfinity = Z{infinity: -1} +var PInfinity = Z{infinity: 1} +var EmptyIntInterval = IntInterval{true, PInfinity, NInfinity} + +func InfinityFor(v ssa.Value) IntInterval { + if b, ok := v.Type().Underlying().(*types.Basic); ok { + if (b.Info() & types.IsUnsigned) != 0 { + return NewIntInterval(NewZ(0), PInfinity) + } + } + return NewIntInterval(NInfinity, PInfinity) +} + +type IntInterval struct { + known bool + Lower Z + Upper Z +} + +func NewIntInterval(l, u Z) IntInterval { + if u.Cmp(l) == -1 { + return EmptyIntInterval + } + return IntInterval{known: true, Lower: l, Upper: u} +} + +func (i IntInterval) IsKnown() bool { + return i.known +} + +func (i IntInterval) Empty() bool { + return i.Lower == PInfinity && i.Upper == NInfinity +} + +func (i IntInterval) IsMaxRange() bool { + return i.Lower == NInfinity && i.Upper == PInfinity +} + +func (i1 IntInterval) Intersection(i2 IntInterval) IntInterval { + if !i1.IsKnown() { + return i2 + } + if !i2.IsKnown() { + return i1 + } + if i1.Empty() || i2.Empty() { + return EmptyIntInterval + } + i3 := NewIntInterval(MaxZ(i1.Lower, i2.Lower), MinZ(i1.Upper, i2.Upper)) + if i3.Lower.Cmp(i3.Upper) == 1 { + return EmptyIntInterval + } + return i3 +} + +func (i1 IntInterval) Union(other Range) Range { + i2, ok := other.(IntInterval) + if !ok { + i2 = EmptyIntInterval + } + if i1.Empty() || !i1.IsKnown() { + return i2 + } + if i2.Empty() || !i2.IsKnown() { + return i1 + } + return NewIntInterval(MinZ(i1.Lower, i2.Lower), MaxZ(i1.Upper, i2.Upper)) +} + +func (i1 IntInterval) Add(i2 IntInterval) IntInterval { + if i1.Empty() || i2.Empty() { + return EmptyIntInterval + } + l1, u1, l2, u2 := i1.Lower, i1.Upper, i2.Lower, i2.Upper + return NewIntInterval(l1.Add(l2), u1.Add(u2)) +} + +func (i1 IntInterval) Sub(i2 IntInterval) IntInterval { + if i1.Empty() || i2.Empty() { + return EmptyIntInterval + } + l1, u1, l2, u2 := i1.Lower, i1.Upper, i2.Lower, i2.Upper + return NewIntInterval(l1.Sub(u2), u1.Sub(l2)) +} + +func (i1 IntInterval) Mul(i2 IntInterval) IntInterval { + if i1.Empty() || i2.Empty() { + return EmptyIntInterval + } + x1, x2 := i1.Lower, i1.Upper + y1, y2 := i2.Lower, i2.Upper + return NewIntInterval( + MinZ(x1.Mul(y1), x1.Mul(y2), x2.Mul(y1), x2.Mul(y2)), + MaxZ(x1.Mul(y1), x1.Mul(y2), x2.Mul(y1), x2.Mul(y2)), + ) +} + +func (i1 IntInterval) String() string { + if !i1.IsKnown() { + return "[⊥, ⊥]" + } + if i1.Empty() { + return "{}" + } + return fmt.Sprintf("[%s, %s]", i1.Lower, i1.Upper) +} + +type IntArithmeticConstraint struct { + aConstraint + A ssa.Value + B ssa.Value + Op token.Token + Fn func(IntInterval, IntInterval) IntInterval +} + +type IntAddConstraint struct{ *IntArithmeticConstraint } +type IntSubConstraint struct{ *IntArithmeticConstraint } +type IntMulConstraint struct{ *IntArithmeticConstraint } + +type IntConversionConstraint struct { + aConstraint + X ssa.Value +} + +type IntIntersectionConstraint struct { + aConstraint + ranges Ranges + A ssa.Value + B ssa.Value + Op token.Token + I IntInterval + resolved bool +} + +type IntIntervalConstraint struct { + aConstraint + I IntInterval +} + +func NewIntArithmeticConstraint(a, b, y ssa.Value, op token.Token, fn func(IntInterval, IntInterval) IntInterval) *IntArithmeticConstraint { + return &IntArithmeticConstraint{NewConstraint(y), a, b, op, fn} +} +func NewIntAddConstraint(a, b, y ssa.Value) Constraint { + return &IntAddConstraint{NewIntArithmeticConstraint(a, b, y, token.ADD, IntInterval.Add)} +} +func NewIntSubConstraint(a, b, y ssa.Value) Constraint { + return &IntSubConstraint{NewIntArithmeticConstraint(a, b, y, token.SUB, IntInterval.Sub)} +} +func NewIntMulConstraint(a, b, y ssa.Value) Constraint { + return &IntMulConstraint{NewIntArithmeticConstraint(a, b, y, token.MUL, IntInterval.Mul)} +} +func NewIntConversionConstraint(x, y ssa.Value) Constraint { + return &IntConversionConstraint{NewConstraint(y), x} +} +func NewIntIntersectionConstraint(a, b ssa.Value, op token.Token, ranges Ranges, y ssa.Value) Constraint { + return &IntIntersectionConstraint{ + aConstraint: NewConstraint(y), + ranges: ranges, + A: a, + B: b, + Op: op, + } +} +func NewIntIntervalConstraint(i IntInterval, y ssa.Value) Constraint { + return &IntIntervalConstraint{NewConstraint(y), i} +} + +func (c *IntArithmeticConstraint) Operands() []ssa.Value { return []ssa.Value{c.A, c.B} } +func (c *IntConversionConstraint) Operands() []ssa.Value { return []ssa.Value{c.X} } +func (c *IntIntersectionConstraint) Operands() []ssa.Value { return []ssa.Value{c.A} } +func (s *IntIntervalConstraint) Operands() []ssa.Value { return nil } + +func (c *IntArithmeticConstraint) String() string { + return fmt.Sprintf("%s = %s %s %s", c.Y().Name(), c.A.Name(), c.Op, c.B.Name()) +} +func (c *IntConversionConstraint) String() string { + return fmt.Sprintf("%s = %s(%s)", c.Y().Name(), c.Y().Type(), c.X.Name()) +} +func (c *IntIntersectionConstraint) String() string { + return fmt.Sprintf("%s = %s %s %s (%t branch)", c.Y().Name(), c.A.Name(), c.Op, c.B.Name(), c.Y().(*ssa.Sigma).Branch) +} +func (c *IntIntervalConstraint) String() string { return fmt.Sprintf("%s = %s", c.Y().Name(), c.I) } + +func (c *IntArithmeticConstraint) Eval(g *Graph) Range { + i1, i2 := g.Range(c.A).(IntInterval), g.Range(c.B).(IntInterval) + if !i1.IsKnown() || !i2.IsKnown() { + return IntInterval{} + } + return c.Fn(i1, i2) +} +func (c *IntConversionConstraint) Eval(g *Graph) Range { + s := &types.StdSizes{ + // XXX is it okay to assume the largest word size, or do we + // need to be platform specific? + WordSize: 8, + MaxAlign: 1, + } + fromI := g.Range(c.X).(IntInterval) + toI := g.Range(c.Y()).(IntInterval) + fromT := c.X.Type().Underlying().(*types.Basic) + toT := c.Y().Type().Underlying().(*types.Basic) + fromB := s.Sizeof(c.X.Type()) + toB := s.Sizeof(c.Y().Type()) + + if !fromI.IsKnown() { + return toI + } + if !toI.IsKnown() { + return fromI + } + + // uint -> sint/uint, M > N: [max(0, l1), min(2**N-1, u2)] + if (fromT.Info()&types.IsUnsigned != 0) && + toB > fromB { + + n := big.NewInt(1) + n.Lsh(n, uint(fromB*8)) + n.Sub(n, big.NewInt(1)) + return NewIntInterval( + MaxZ(NewZ(0), fromI.Lower), + MinZ(NewBigZ(n), toI.Upper), + ) + } + + // sint -> sint, M > N; [max(-∞, l1), min(2**N-1, u2)] + if (fromT.Info()&types.IsUnsigned == 0) && + (toT.Info()&types.IsUnsigned == 0) && + toB > fromB { + + n := big.NewInt(1) + n.Lsh(n, uint(fromB*8)) + n.Sub(n, big.NewInt(1)) + return NewIntInterval( + MaxZ(NInfinity, fromI.Lower), + MinZ(NewBigZ(n), toI.Upper), + ) + } + + return fromI +} +func (c *IntIntersectionConstraint) Eval(g *Graph) Range { + xi := g.Range(c.A).(IntInterval) + if !xi.IsKnown() { + return c.I + } + return xi.Intersection(c.I) +} +func (c *IntIntervalConstraint) Eval(*Graph) Range { return c.I } + +func (c *IntIntersectionConstraint) Futures() []ssa.Value { + return []ssa.Value{c.B} +} + +func (c *IntIntersectionConstraint) Resolve() { + r, ok := c.ranges[c.B].(IntInterval) + if !ok { + c.I = InfinityFor(c.Y()) + return + } + + switch c.Op { + case token.EQL: + c.I = r + case token.GTR: + c.I = NewIntInterval(r.Lower.Add(NewZ(1)), PInfinity) + case token.GEQ: + c.I = NewIntInterval(r.Lower, PInfinity) + case token.LSS: + // TODO(dh): do we need 0 instead of NInfinity for uints? + c.I = NewIntInterval(NInfinity, r.Upper.Sub(NewZ(1))) + case token.LEQ: + c.I = NewIntInterval(NInfinity, r.Upper) + case token.NEQ: + c.I = InfinityFor(c.Y()) + default: + panic("unsupported op " + c.Op.String()) + } +} + +func (c *IntIntersectionConstraint) IsKnown() bool { + return c.I.IsKnown() +} + +func (c *IntIntersectionConstraint) MarkUnresolved() { + c.resolved = false +} + +func (c *IntIntersectionConstraint) MarkResolved() { + c.resolved = true +} + +func (c *IntIntersectionConstraint) IsResolved() bool { + return c.resolved +} diff --git a/vendor/honnef.co/go/tools/staticcheck/vrp/slice.go b/vendor/honnef.co/go/tools/staticcheck/vrp/slice.go new file mode 100644 index 000000000..40658dd8d --- /dev/null +++ b/vendor/honnef.co/go/tools/staticcheck/vrp/slice.go @@ -0,0 +1,273 @@ +package vrp + +// TODO(dh): most of the constraints have implementations identical to +// that of strings. Consider reusing them. + +import ( + "fmt" + "go/types" + + "honnef.co/go/tools/ssa" +) + +type SliceInterval struct { + Length IntInterval +} + +func (s SliceInterval) Union(other Range) Range { + i, ok := other.(SliceInterval) + if !ok { + i = SliceInterval{EmptyIntInterval} + } + if s.Length.Empty() || !s.Length.IsKnown() { + return i + } + if i.Length.Empty() || !i.Length.IsKnown() { + return s + } + return SliceInterval{ + Length: s.Length.Union(i.Length).(IntInterval), + } +} +func (s SliceInterval) String() string { return s.Length.String() } +func (s SliceInterval) IsKnown() bool { return s.Length.IsKnown() } + +type SliceAppendConstraint struct { + aConstraint + A ssa.Value + B ssa.Value +} + +type SliceSliceConstraint struct { + aConstraint + X ssa.Value + Lower ssa.Value + Upper ssa.Value +} + +type ArraySliceConstraint struct { + aConstraint + X ssa.Value + Lower ssa.Value + Upper ssa.Value +} + +type SliceIntersectionConstraint struct { + aConstraint + X ssa.Value + I IntInterval +} + +type SliceLengthConstraint struct { + aConstraint + X ssa.Value +} + +type MakeSliceConstraint struct { + aConstraint + Size ssa.Value +} + +type SliceIntervalConstraint struct { + aConstraint + I IntInterval +} + +func NewSliceAppendConstraint(a, b, y ssa.Value) Constraint { + return &SliceAppendConstraint{NewConstraint(y), a, b} +} +func NewSliceSliceConstraint(x, lower, upper, y ssa.Value) Constraint { + return &SliceSliceConstraint{NewConstraint(y), x, lower, upper} +} +func NewArraySliceConstraint(x, lower, upper, y ssa.Value) Constraint { + return &ArraySliceConstraint{NewConstraint(y), x, lower, upper} +} +func NewSliceIntersectionConstraint(x ssa.Value, i IntInterval, y ssa.Value) Constraint { + return &SliceIntersectionConstraint{NewConstraint(y), x, i} +} +func NewSliceLengthConstraint(x, y ssa.Value) Constraint { + return &SliceLengthConstraint{NewConstraint(y), x} +} +func NewMakeSliceConstraint(size, y ssa.Value) Constraint { + return &MakeSliceConstraint{NewConstraint(y), size} +} +func NewSliceIntervalConstraint(i IntInterval, y ssa.Value) Constraint { + return &SliceIntervalConstraint{NewConstraint(y), i} +} + +func (c *SliceAppendConstraint) Operands() []ssa.Value { return []ssa.Value{c.A, c.B} } +func (c *SliceSliceConstraint) Operands() []ssa.Value { + ops := []ssa.Value{c.X} + if c.Lower != nil { + ops = append(ops, c.Lower) + } + if c.Upper != nil { + ops = append(ops, c.Upper) + } + return ops +} +func (c *ArraySliceConstraint) Operands() []ssa.Value { + ops := []ssa.Value{c.X} + if c.Lower != nil { + ops = append(ops, c.Lower) + } + if c.Upper != nil { + ops = append(ops, c.Upper) + } + return ops +} +func (c *SliceIntersectionConstraint) Operands() []ssa.Value { return []ssa.Value{c.X} } +func (c *SliceLengthConstraint) Operands() []ssa.Value { return []ssa.Value{c.X} } +func (c *MakeSliceConstraint) Operands() []ssa.Value { return []ssa.Value{c.Size} } +func (s *SliceIntervalConstraint) Operands() []ssa.Value { return nil } + +func (c *SliceAppendConstraint) String() string { + return fmt.Sprintf("%s = append(%s, %s)", c.Y().Name(), c.A.Name(), c.B.Name()) +} +func (c *SliceSliceConstraint) String() string { + var lname, uname string + if c.Lower != nil { + lname = c.Lower.Name() + } + if c.Upper != nil { + uname = c.Upper.Name() + } + return fmt.Sprintf("%s[%s:%s]", c.X.Name(), lname, uname) +} +func (c *ArraySliceConstraint) String() string { + var lname, uname string + if c.Lower != nil { + lname = c.Lower.Name() + } + if c.Upper != nil { + uname = c.Upper.Name() + } + return fmt.Sprintf("%s[%s:%s]", c.X.Name(), lname, uname) +} +func (c *SliceIntersectionConstraint) String() string { + return fmt.Sprintf("%s = %s.%t ⊓ %s", c.Y().Name(), c.X.Name(), c.Y().(*ssa.Sigma).Branch, c.I) +} +func (c *SliceLengthConstraint) String() string { + return fmt.Sprintf("%s = len(%s)", c.Y().Name(), c.X.Name()) +} +func (c *MakeSliceConstraint) String() string { + return fmt.Sprintf("%s = make(slice, %s)", c.Y().Name(), c.Size.Name()) +} +func (c *SliceIntervalConstraint) String() string { return fmt.Sprintf("%s = %s", c.Y().Name(), c.I) } + +func (c *SliceAppendConstraint) Eval(g *Graph) Range { + l1 := g.Range(c.A).(SliceInterval).Length + var l2 IntInterval + switch r := g.Range(c.B).(type) { + case SliceInterval: + l2 = r.Length + case StringInterval: + l2 = r.Length + default: + return SliceInterval{} + } + if !l1.IsKnown() || !l2.IsKnown() { + return SliceInterval{} + } + return SliceInterval{ + Length: l1.Add(l2), + } +} +func (c *SliceSliceConstraint) Eval(g *Graph) Range { + lr := NewIntInterval(NewZ(0), NewZ(0)) + if c.Lower != nil { + lr = g.Range(c.Lower).(IntInterval) + } + ur := g.Range(c.X).(SliceInterval).Length + if c.Upper != nil { + ur = g.Range(c.Upper).(IntInterval) + } + if !lr.IsKnown() || !ur.IsKnown() { + return SliceInterval{} + } + + ls := []Z{ + ur.Lower.Sub(lr.Lower), + ur.Upper.Sub(lr.Lower), + ur.Lower.Sub(lr.Upper), + ur.Upper.Sub(lr.Upper), + } + // TODO(dh): if we don't truncate lengths to 0 we might be able to + // easily detect slices with high < low. we'd need to treat -∞ + // specially, though. + for i, l := range ls { + if l.Sign() == -1 { + ls[i] = NewZ(0) + } + } + + return SliceInterval{ + Length: NewIntInterval(MinZ(ls...), MaxZ(ls...)), + } +} +func (c *ArraySliceConstraint) Eval(g *Graph) Range { + lr := NewIntInterval(NewZ(0), NewZ(0)) + if c.Lower != nil { + lr = g.Range(c.Lower).(IntInterval) + } + var l int64 + switch typ := c.X.Type().(type) { + case *types.Array: + l = typ.Len() + case *types.Pointer: + l = typ.Elem().(*types.Array).Len() + } + ur := NewIntInterval(NewZ(l), NewZ(l)) + if c.Upper != nil { + ur = g.Range(c.Upper).(IntInterval) + } + if !lr.IsKnown() || !ur.IsKnown() { + return SliceInterval{} + } + + ls := []Z{ + ur.Lower.Sub(lr.Lower), + ur.Upper.Sub(lr.Lower), + ur.Lower.Sub(lr.Upper), + ur.Upper.Sub(lr.Upper), + } + // TODO(dh): if we don't truncate lengths to 0 we might be able to + // easily detect slices with high < low. we'd need to treat -∞ + // specially, though. + for i, l := range ls { + if l.Sign() == -1 { + ls[i] = NewZ(0) + } + } + + return SliceInterval{ + Length: NewIntInterval(MinZ(ls...), MaxZ(ls...)), + } +} +func (c *SliceIntersectionConstraint) Eval(g *Graph) Range { + xi := g.Range(c.X).(SliceInterval) + if !xi.IsKnown() { + return c.I + } + return SliceInterval{ + Length: xi.Length.Intersection(c.I), + } +} +func (c *SliceLengthConstraint) Eval(g *Graph) Range { + i := g.Range(c.X).(SliceInterval).Length + if !i.IsKnown() { + return NewIntInterval(NewZ(0), PInfinity) + } + return i +} +func (c *MakeSliceConstraint) Eval(g *Graph) Range { + i, ok := g.Range(c.Size).(IntInterval) + if !ok { + return SliceInterval{NewIntInterval(NewZ(0), PInfinity)} + } + if i.Lower.Sign() == -1 { + i.Lower = NewZ(0) + } + return SliceInterval{i} +} +func (c *SliceIntervalConstraint) Eval(*Graph) Range { return SliceInterval{c.I} } diff --git a/vendor/honnef.co/go/tools/staticcheck/vrp/string.go b/vendor/honnef.co/go/tools/staticcheck/vrp/string.go new file mode 100644 index 000000000..e05877f9f --- /dev/null +++ b/vendor/honnef.co/go/tools/staticcheck/vrp/string.go @@ -0,0 +1,258 @@ +package vrp + +import ( + "fmt" + "go/token" + "go/types" + + "honnef.co/go/tools/ssa" +) + +type StringInterval struct { + Length IntInterval +} + +func (s StringInterval) Union(other Range) Range { + i, ok := other.(StringInterval) + if !ok { + i = StringInterval{EmptyIntInterval} + } + if s.Length.Empty() || !s.Length.IsKnown() { + return i + } + if i.Length.Empty() || !i.Length.IsKnown() { + return s + } + return StringInterval{ + Length: s.Length.Union(i.Length).(IntInterval), + } +} + +func (s StringInterval) String() string { + return s.Length.String() +} + +func (s StringInterval) IsKnown() bool { + return s.Length.IsKnown() +} + +type StringSliceConstraint struct { + aConstraint + X ssa.Value + Lower ssa.Value + Upper ssa.Value +} + +type StringIntersectionConstraint struct { + aConstraint + ranges Ranges + A ssa.Value + B ssa.Value + Op token.Token + I IntInterval + resolved bool +} + +type StringConcatConstraint struct { + aConstraint + A ssa.Value + B ssa.Value +} + +type StringLengthConstraint struct { + aConstraint + X ssa.Value +} + +type StringIntervalConstraint struct { + aConstraint + I IntInterval +} + +func NewStringSliceConstraint(x, lower, upper, y ssa.Value) Constraint { + return &StringSliceConstraint{NewConstraint(y), x, lower, upper} +} +func NewStringIntersectionConstraint(a, b ssa.Value, op token.Token, ranges Ranges, y ssa.Value) Constraint { + return &StringIntersectionConstraint{ + aConstraint: NewConstraint(y), + ranges: ranges, + A: a, + B: b, + Op: op, + } +} +func NewStringConcatConstraint(a, b, y ssa.Value) Constraint { + return &StringConcatConstraint{NewConstraint(y), a, b} +} +func NewStringLengthConstraint(x ssa.Value, y ssa.Value) Constraint { + return &StringLengthConstraint{NewConstraint(y), x} +} +func NewStringIntervalConstraint(i IntInterval, y ssa.Value) Constraint { + return &StringIntervalConstraint{NewConstraint(y), i} +} + +func (c *StringSliceConstraint) Operands() []ssa.Value { + vs := []ssa.Value{c.X} + if c.Lower != nil { + vs = append(vs, c.Lower) + } + if c.Upper != nil { + vs = append(vs, c.Upper) + } + return vs +} +func (c *StringIntersectionConstraint) Operands() []ssa.Value { return []ssa.Value{c.A} } +func (c StringConcatConstraint) Operands() []ssa.Value { return []ssa.Value{c.A, c.B} } +func (c *StringLengthConstraint) Operands() []ssa.Value { return []ssa.Value{c.X} } +func (s *StringIntervalConstraint) Operands() []ssa.Value { return nil } + +func (c *StringSliceConstraint) String() string { + var lname, uname string + if c.Lower != nil { + lname = c.Lower.Name() + } + if c.Upper != nil { + uname = c.Upper.Name() + } + return fmt.Sprintf("%s[%s:%s]", c.X.Name(), lname, uname) +} +func (c *StringIntersectionConstraint) String() string { + return fmt.Sprintf("%s = %s %s %s (%t branch)", c.Y().Name(), c.A.Name(), c.Op, c.B.Name(), c.Y().(*ssa.Sigma).Branch) +} +func (c StringConcatConstraint) String() string { + return fmt.Sprintf("%s = %s + %s", c.Y().Name(), c.A.Name(), c.B.Name()) +} +func (c *StringLengthConstraint) String() string { + return fmt.Sprintf("%s = len(%s)", c.Y().Name(), c.X.Name()) +} +func (c *StringIntervalConstraint) String() string { return fmt.Sprintf("%s = %s", c.Y().Name(), c.I) } + +func (c *StringSliceConstraint) Eval(g *Graph) Range { + lr := NewIntInterval(NewZ(0), NewZ(0)) + if c.Lower != nil { + lr = g.Range(c.Lower).(IntInterval) + } + ur := g.Range(c.X).(StringInterval).Length + if c.Upper != nil { + ur = g.Range(c.Upper).(IntInterval) + } + if !lr.IsKnown() || !ur.IsKnown() { + return StringInterval{} + } + + ls := []Z{ + ur.Lower.Sub(lr.Lower), + ur.Upper.Sub(lr.Lower), + ur.Lower.Sub(lr.Upper), + ur.Upper.Sub(lr.Upper), + } + // TODO(dh): if we don't truncate lengths to 0 we might be able to + // easily detect slices with high < low. we'd need to treat -∞ + // specially, though. + for i, l := range ls { + if l.Sign() == -1 { + ls[i] = NewZ(0) + } + } + + return StringInterval{ + Length: NewIntInterval(MinZ(ls...), MaxZ(ls...)), + } +} +func (c *StringIntersectionConstraint) Eval(g *Graph) Range { + var l IntInterval + switch r := g.Range(c.A).(type) { + case StringInterval: + l = r.Length + case IntInterval: + l = r + } + + if !l.IsKnown() { + return StringInterval{c.I} + } + return StringInterval{ + Length: l.Intersection(c.I), + } +} +func (c StringConcatConstraint) Eval(g *Graph) Range { + i1, i2 := g.Range(c.A).(StringInterval), g.Range(c.B).(StringInterval) + if !i1.Length.IsKnown() || !i2.Length.IsKnown() { + return StringInterval{} + } + return StringInterval{ + Length: i1.Length.Add(i2.Length), + } +} +func (c *StringLengthConstraint) Eval(g *Graph) Range { + i := g.Range(c.X).(StringInterval).Length + if !i.IsKnown() { + return NewIntInterval(NewZ(0), PInfinity) + } + return i +} +func (c *StringIntervalConstraint) Eval(*Graph) Range { return StringInterval{c.I} } + +func (c *StringIntersectionConstraint) Futures() []ssa.Value { + return []ssa.Value{c.B} +} + +func (c *StringIntersectionConstraint) Resolve() { + if (c.A.Type().Underlying().(*types.Basic).Info() & types.IsString) != 0 { + // comparing two strings + r, ok := c.ranges[c.B].(StringInterval) + if !ok { + c.I = NewIntInterval(NewZ(0), PInfinity) + return + } + switch c.Op { + case token.EQL: + c.I = r.Length + case token.GTR, token.GEQ: + c.I = NewIntInterval(r.Length.Lower, PInfinity) + case token.LSS, token.LEQ: + c.I = NewIntInterval(NewZ(0), r.Length.Upper) + case token.NEQ: + default: + panic("unsupported op " + c.Op.String()) + } + } else { + r, ok := c.ranges[c.B].(IntInterval) + if !ok { + c.I = NewIntInterval(NewZ(0), PInfinity) + return + } + // comparing two lengths + switch c.Op { + case token.EQL: + c.I = r + case token.GTR: + c.I = NewIntInterval(r.Lower.Add(NewZ(1)), PInfinity) + case token.GEQ: + c.I = NewIntInterval(r.Lower, PInfinity) + case token.LSS: + c.I = NewIntInterval(NInfinity, r.Upper.Sub(NewZ(1))) + case token.LEQ: + c.I = NewIntInterval(NInfinity, r.Upper) + case token.NEQ: + default: + panic("unsupported op " + c.Op.String()) + } + } +} + +func (c *StringIntersectionConstraint) IsKnown() bool { + return c.I.IsKnown() +} + +func (c *StringIntersectionConstraint) MarkUnresolved() { + c.resolved = false +} + +func (c *StringIntersectionConstraint) MarkResolved() { + c.resolved = true +} + +func (c *StringIntersectionConstraint) IsResolved() bool { + return c.resolved +} diff --git a/vendor/honnef.co/go/tools/staticcheck/vrp/vrp.go b/vendor/honnef.co/go/tools/staticcheck/vrp/vrp.go new file mode 100644 index 000000000..3c138e512 --- /dev/null +++ b/vendor/honnef.co/go/tools/staticcheck/vrp/vrp.go @@ -0,0 +1,1056 @@ +package vrp + +// TODO(dh) widening and narrowing have a lot of code in common. Make +// it reusable. + +import ( + "fmt" + "go/constant" + "go/token" + "go/types" + "math/big" + "sort" + "strings" + + "honnef.co/go/tools/lint" + "honnef.co/go/tools/ssa" +) + +type Future interface { + Constraint + Futures() []ssa.Value + Resolve() + IsKnown() bool + MarkUnresolved() + MarkResolved() + IsResolved() bool +} + +type Range interface { + Union(other Range) Range + IsKnown() bool +} + +type Constraint interface { + Y() ssa.Value + isConstraint() + String() string + Eval(*Graph) Range + Operands() []ssa.Value +} + +type aConstraint struct { + y ssa.Value +} + +func NewConstraint(y ssa.Value) aConstraint { + return aConstraint{y} +} + +func (aConstraint) isConstraint() {} +func (c aConstraint) Y() ssa.Value { return c.y } + +type PhiConstraint struct { + aConstraint + Vars []ssa.Value +} + +func NewPhiConstraint(vars []ssa.Value, y ssa.Value) Constraint { + uniqm := map[ssa.Value]struct{}{} + for _, v := range vars { + uniqm[v] = struct{}{} + } + var uniq []ssa.Value + for v := range uniqm { + uniq = append(uniq, v) + } + return &PhiConstraint{ + aConstraint: NewConstraint(y), + Vars: uniq, + } +} + +func (c *PhiConstraint) Operands() []ssa.Value { + return c.Vars +} + +func (c *PhiConstraint) Eval(g *Graph) Range { + i := Range(nil) + for _, v := range c.Vars { + i = g.Range(v).Union(i) + } + return i +} + +func (c *PhiConstraint) String() string { + names := make([]string, len(c.Vars)) + for i, v := range c.Vars { + names[i] = v.Name() + } + return fmt.Sprintf("%s = φ(%s)", c.Y().Name(), strings.Join(names, ", ")) +} + +func isSupportedType(typ types.Type) bool { + switch typ := typ.Underlying().(type) { + case *types.Basic: + switch typ.Kind() { + case types.String, types.UntypedString: + return true + default: + if (typ.Info() & types.IsInteger) == 0 { + return false + } + } + case *types.Chan: + return true + case *types.Slice: + return true + default: + return false + } + return true +} + +func ConstantToZ(c constant.Value) Z { + s := constant.ToInt(c).ExactString() + n := &big.Int{} + n.SetString(s, 10) + return NewBigZ(n) +} + +func sigmaInteger(g *Graph, ins *ssa.Sigma, cond *ssa.BinOp, ops []*ssa.Value) Constraint { + op := cond.Op + if !ins.Branch { + op = (invertToken(op)) + } + + switch op { + case token.EQL, token.GTR, token.GEQ, token.LSS, token.LEQ: + default: + return nil + } + var a, b ssa.Value + if (*ops[0]) == ins.X { + a = *ops[0] + b = *ops[1] + } else { + a = *ops[1] + b = *ops[0] + op = flipToken(op) + } + return NewIntIntersectionConstraint(a, b, op, g.ranges, ins) +} + +func sigmaString(g *Graph, ins *ssa.Sigma, cond *ssa.BinOp, ops []*ssa.Value) Constraint { + op := cond.Op + if !ins.Branch { + op = (invertToken(op)) + } + + switch op { + case token.EQL, token.GTR, token.GEQ, token.LSS, token.LEQ: + default: + return nil + } + + if ((*ops[0]).Type().Underlying().(*types.Basic).Info() & types.IsString) == 0 { + var a, b ssa.Value + call, ok := (*ops[0]).(*ssa.Call) + if ok && call.Common().Args[0] == ins.X { + a = *ops[0] + b = *ops[1] + } else { + a = *ops[1] + b = *ops[0] + op = flipToken(op) + } + return NewStringIntersectionConstraint(a, b, op, g.ranges, ins) + } + var a, b ssa.Value + if (*ops[0]) == ins.X { + a = *ops[0] + b = *ops[1] + } else { + a = *ops[1] + b = *ops[0] + op = flipToken(op) + } + return NewStringIntersectionConstraint(a, b, op, g.ranges, ins) +} + +func sigmaSlice(g *Graph, ins *ssa.Sigma, cond *ssa.BinOp, ops []*ssa.Value) Constraint { + // TODO(dh) sigmaSlice and sigmaString are a lot alike. Can they + // be merged? + // + // XXX support futures + + op := cond.Op + if !ins.Branch { + op = (invertToken(op)) + } + + k, ok := (*ops[1]).(*ssa.Const) + // XXX investigate in what cases this wouldn't be a Const + // + // XXX what if left and right are swapped? + if !ok { + return nil + } + + call, ok := (*ops[0]).(*ssa.Call) + if !ok { + return nil + } + builtin, ok := call.Common().Value.(*ssa.Builtin) + if !ok { + return nil + } + if builtin.Name() != "len" { + return nil + } + callops := call.Operands(nil) + + v := ConstantToZ(k.Value) + c := NewSliceIntersectionConstraint(*callops[1], IntInterval{}, ins).(*SliceIntersectionConstraint) + switch op { + case token.EQL: + c.I = NewIntInterval(v, v) + case token.GTR, token.GEQ: + off := int64(0) + if cond.Op == token.GTR { + off = 1 + } + c.I = NewIntInterval( + v.Add(NewZ(off)), + PInfinity, + ) + case token.LSS, token.LEQ: + off := int64(0) + if cond.Op == token.LSS { + off = -1 + } + c.I = NewIntInterval( + NInfinity, + v.Add(NewZ(off)), + ) + default: + return nil + } + return c +} + +func BuildGraph(f *ssa.Function) *Graph { + g := &Graph{ + Vertices: map[interface{}]*Vertex{}, + ranges: Ranges{}, + } + + var cs []Constraint + + ops := make([]*ssa.Value, 16) + seen := map[ssa.Value]bool{} + for _, block := range f.Blocks { + for _, ins := range block.Instrs { + ops = ins.Operands(ops[:0]) + for _, op := range ops { + if c, ok := (*op).(*ssa.Const); ok { + if seen[c] { + continue + } + seen[c] = true + if c.Value == nil { + switch c.Type().Underlying().(type) { + case *types.Slice: + cs = append(cs, NewSliceIntervalConstraint(NewIntInterval(NewZ(0), NewZ(0)), c)) + } + continue + } + switch c.Value.Kind() { + case constant.Int: + v := ConstantToZ(c.Value) + cs = append(cs, NewIntIntervalConstraint(NewIntInterval(v, v), c)) + case constant.String: + s := constant.StringVal(c.Value) + n := NewZ(int64(len(s))) + cs = append(cs, NewStringIntervalConstraint(NewIntInterval(n, n), c)) + } + } + } + } + } + for _, block := range f.Blocks { + for _, ins := range block.Instrs { + switch ins := ins.(type) { + case *ssa.Convert: + switch v := ins.Type().Underlying().(type) { + case *types.Basic: + if (v.Info() & types.IsInteger) == 0 { + continue + } + cs = append(cs, NewIntConversionConstraint(ins.X, ins)) + } + case *ssa.Call: + if static := ins.Common().StaticCallee(); static != nil { + if fn, ok := static.Object().(*types.Func); ok { + switch lint.FuncName(fn) { + case "bytes.Index", "bytes.IndexAny", "bytes.IndexByte", + "bytes.IndexFunc", "bytes.IndexRune", "bytes.LastIndex", + "bytes.LastIndexAny", "bytes.LastIndexByte", "bytes.LastIndexFunc", + "strings.Index", "strings.IndexAny", "strings.IndexByte", + "strings.IndexFunc", "strings.IndexRune", "strings.LastIndex", + "strings.LastIndexAny", "strings.LastIndexByte", "strings.LastIndexFunc": + // TODO(dh): instead of limiting by +∞, + // limit by the upper bound of the passed + // string + cs = append(cs, NewIntIntervalConstraint(NewIntInterval(NewZ(-1), PInfinity), ins)) + case "bytes.Title", "bytes.ToLower", "bytes.ToTitle", "bytes.ToUpper", + "strings.Title", "strings.ToLower", "strings.ToTitle", "strings.ToUpper": + cs = append(cs, NewCopyConstraint(ins.Common().Args[0], ins)) + case "bytes.ToLowerSpecial", "bytes.ToTitleSpecial", "bytes.ToUpperSpecial", + "strings.ToLowerSpecial", "strings.ToTitleSpecial", "strings.ToUpperSpecial": + cs = append(cs, NewCopyConstraint(ins.Common().Args[1], ins)) + case "bytes.Compare", "strings.Compare": + cs = append(cs, NewIntIntervalConstraint(NewIntInterval(NewZ(-1), NewZ(1)), ins)) + case "bytes.Count", "strings.Count": + // TODO(dh): instead of limiting by +∞, + // limit by the upper bound of the passed + // string. + cs = append(cs, NewIntIntervalConstraint(NewIntInterval(NewZ(0), PInfinity), ins)) + case "bytes.Map", "bytes.TrimFunc", "bytes.TrimLeft", "bytes.TrimLeftFunc", + "bytes.TrimRight", "bytes.TrimRightFunc", "bytes.TrimSpace", + "strings.Map", "strings.TrimFunc", "strings.TrimLeft", "strings.TrimLeftFunc", + "strings.TrimRight", "strings.TrimRightFunc", "strings.TrimSpace": + // TODO(dh): lower = 0, upper = upper of passed string + case "bytes.TrimPrefix", "bytes.TrimSuffix", + "strings.TrimPrefix", "strings.TrimSuffix": + // TODO(dh) range between "unmodified" and len(cutset) removed + case "(*bytes.Buffer).Cap", "(*bytes.Buffer).Len", "(*bytes.Reader).Len", "(*bytes.Reader).Size": + cs = append(cs, NewIntIntervalConstraint(NewIntInterval(NewZ(0), PInfinity), ins)) + } + } + } + builtin, ok := ins.Common().Value.(*ssa.Builtin) + ops := ins.Operands(nil) + if !ok { + continue + } + switch builtin.Name() { + case "len": + switch op1 := (*ops[1]).Type().Underlying().(type) { + case *types.Basic: + if op1.Kind() == types.String || op1.Kind() == types.UntypedString { + cs = append(cs, NewStringLengthConstraint(*ops[1], ins)) + } + case *types.Slice: + cs = append(cs, NewSliceLengthConstraint(*ops[1], ins)) + } + + case "append": + cs = append(cs, NewSliceAppendConstraint(ins.Common().Args[0], ins.Common().Args[1], ins)) + } + case *ssa.BinOp: + ops := ins.Operands(nil) + basic, ok := (*ops[0]).Type().Underlying().(*types.Basic) + if !ok { + continue + } + switch basic.Kind() { + case types.Int, types.Int8, types.Int16, types.Int32, types.Int64, + types.Uint, types.Uint8, types.Uint16, types.Uint32, types.Uint64, types.UntypedInt: + fns := map[token.Token]func(ssa.Value, ssa.Value, ssa.Value) Constraint{ + token.ADD: NewIntAddConstraint, + token.SUB: NewIntSubConstraint, + token.MUL: NewIntMulConstraint, + // XXX support QUO, REM, SHL, SHR + } + fn, ok := fns[ins.Op] + if ok { + cs = append(cs, fn(*ops[0], *ops[1], ins)) + } + case types.String, types.UntypedString: + if ins.Op == token.ADD { + cs = append(cs, NewStringConcatConstraint(*ops[0], *ops[1], ins)) + } + } + case *ssa.Slice: + typ := ins.X.Type().Underlying() + switch typ := typ.(type) { + case *types.Basic: + cs = append(cs, NewStringSliceConstraint(ins.X, ins.Low, ins.High, ins)) + case *types.Slice: + cs = append(cs, NewSliceSliceConstraint(ins.X, ins.Low, ins.High, ins)) + case *types.Array: + cs = append(cs, NewArraySliceConstraint(ins.X, ins.Low, ins.High, ins)) + case *types.Pointer: + if _, ok := typ.Elem().(*types.Array); !ok { + continue + } + cs = append(cs, NewArraySliceConstraint(ins.X, ins.Low, ins.High, ins)) + } + case *ssa.Phi: + if !isSupportedType(ins.Type()) { + continue + } + ops := ins.Operands(nil) + dops := make([]ssa.Value, len(ops)) + for i, op := range ops { + dops[i] = *op + } + cs = append(cs, NewPhiConstraint(dops, ins)) + case *ssa.Sigma: + pred := ins.Block().Preds[0] + instrs := pred.Instrs + cond, ok := instrs[len(instrs)-1].(*ssa.If).Cond.(*ssa.BinOp) + ops := cond.Operands(nil) + if !ok { + continue + } + switch typ := ins.Type().Underlying().(type) { + case *types.Basic: + var c Constraint + switch typ.Kind() { + case types.Int, types.Int8, types.Int16, types.Int32, types.Int64, + types.Uint, types.Uint8, types.Uint16, types.Uint32, types.Uint64, types.UntypedInt: + c = sigmaInteger(g, ins, cond, ops) + case types.String, types.UntypedString: + c = sigmaString(g, ins, cond, ops) + } + if c != nil { + cs = append(cs, c) + } + case *types.Slice: + c := sigmaSlice(g, ins, cond, ops) + if c != nil { + cs = append(cs, c) + } + default: + //log.Printf("unsupported sigma type %T", typ) // XXX + } + case *ssa.MakeChan: + cs = append(cs, NewMakeChannelConstraint(ins.Size, ins)) + case *ssa.MakeSlice: + cs = append(cs, NewMakeSliceConstraint(ins.Len, ins)) + case *ssa.ChangeType: + switch ins.X.Type().Underlying().(type) { + case *types.Chan: + cs = append(cs, NewChannelChangeTypeConstraint(ins.X, ins)) + } + } + } + } + + for _, c := range cs { + if c == nil { + panic("nil constraint") + } + // If V is used in constraint C, then we create an edge V->C + for _, op := range c.Operands() { + g.AddEdge(op, c, false) + } + if c, ok := c.(Future); ok { + for _, op := range c.Futures() { + g.AddEdge(op, c, true) + } + } + // If constraint C defines variable V, then we create an edge + // C->V + g.AddEdge(c, c.Y(), false) + } + + g.FindSCCs() + g.sccEdges = make([][]Edge, len(g.SCCs)) + g.futures = make([][]Future, len(g.SCCs)) + for _, e := range g.Edges { + g.sccEdges[e.From.SCC] = append(g.sccEdges[e.From.SCC], e) + if !e.control { + continue + } + if c, ok := e.To.Value.(Future); ok { + g.futures[e.From.SCC] = append(g.futures[e.From.SCC], c) + } + } + return g +} + +func (g *Graph) Solve() Ranges { + var consts []Z + off := NewZ(1) + for _, n := range g.Vertices { + if c, ok := n.Value.(*ssa.Const); ok { + basic, ok := c.Type().Underlying().(*types.Basic) + if !ok { + continue + } + if (basic.Info() & types.IsInteger) != 0 { + z := ConstantToZ(c.Value) + consts = append(consts, z) + consts = append(consts, z.Add(off)) + consts = append(consts, z.Sub(off)) + } + } + + } + sort.Sort(Zs(consts)) + + for scc, vertices := range g.SCCs { + n := 0 + n = len(vertices) + if n == 1 { + g.resolveFutures(scc) + v := vertices[0] + if v, ok := v.Value.(ssa.Value); ok { + switch typ := v.Type().Underlying().(type) { + case *types.Basic: + switch typ.Kind() { + case types.String, types.UntypedString: + if !g.Range(v).(StringInterval).IsKnown() { + g.SetRange(v, StringInterval{NewIntInterval(NewZ(0), PInfinity)}) + } + default: + if !g.Range(v).(IntInterval).IsKnown() { + g.SetRange(v, InfinityFor(v)) + } + } + case *types.Chan: + if !g.Range(v).(ChannelInterval).IsKnown() { + g.SetRange(v, ChannelInterval{NewIntInterval(NewZ(0), PInfinity)}) + } + case *types.Slice: + if !g.Range(v).(SliceInterval).IsKnown() { + g.SetRange(v, SliceInterval{NewIntInterval(NewZ(0), PInfinity)}) + } + } + } + if c, ok := v.Value.(Constraint); ok { + g.SetRange(c.Y(), c.Eval(g)) + } + } else { + uses := g.uses(scc) + entries := g.entries(scc) + for len(entries) > 0 { + v := entries[len(entries)-1] + entries = entries[:len(entries)-1] + for _, use := range uses[v] { + if g.widen(use, consts) { + entries = append(entries, use.Y()) + } + } + } + + g.resolveFutures(scc) + + // XXX this seems to be necessary, but shouldn't be. + // removing it leads to nil pointer derefs; investigate + // where we're not setting values correctly. + for _, n := range vertices { + if v, ok := n.Value.(ssa.Value); ok { + i, ok := g.Range(v).(IntInterval) + if !ok { + continue + } + if !i.IsKnown() { + g.SetRange(v, InfinityFor(v)) + } + } + } + + actives := g.actives(scc) + for len(actives) > 0 { + v := actives[len(actives)-1] + actives = actives[:len(actives)-1] + for _, use := range uses[v] { + if g.narrow(use) { + actives = append(actives, use.Y()) + } + } + } + } + // propagate scc + for _, edge := range g.sccEdges[scc] { + if edge.control { + continue + } + if edge.From.SCC == edge.To.SCC { + continue + } + if c, ok := edge.To.Value.(Constraint); ok { + g.SetRange(c.Y(), c.Eval(g)) + } + if c, ok := edge.To.Value.(Future); ok { + if !c.IsKnown() { + c.MarkUnresolved() + } + } + } + } + + for v, r := range g.ranges { + i, ok := r.(IntInterval) + if !ok { + continue + } + if (v.Type().Underlying().(*types.Basic).Info() & types.IsUnsigned) == 0 { + if i.Upper != PInfinity { + s := &types.StdSizes{ + // XXX is it okay to assume the largest word size, or do we + // need to be platform specific? + WordSize: 8, + MaxAlign: 1, + } + bits := (s.Sizeof(v.Type()) * 8) - 1 + n := big.NewInt(1) + n = n.Lsh(n, uint(bits)) + upper, lower := &big.Int{}, &big.Int{} + upper.Sub(n, big.NewInt(1)) + lower.Neg(n) + + if i.Upper.Cmp(NewBigZ(upper)) == 1 { + i = NewIntInterval(NInfinity, PInfinity) + } else if i.Lower.Cmp(NewBigZ(lower)) == -1 { + i = NewIntInterval(NInfinity, PInfinity) + } + } + } + + g.ranges[v] = i + } + + return g.ranges +} + +func VertexString(v *Vertex) string { + switch v := v.Value.(type) { + case Constraint: + return v.String() + case ssa.Value: + return v.Name() + case nil: + return "BUG: nil vertex value" + default: + panic(fmt.Sprintf("unexpected type %T", v)) + } +} + +type Vertex struct { + Value interface{} // one of Constraint or ssa.Value + SCC int + index int + lowlink int + stack bool + + Succs []Edge +} + +type Ranges map[ssa.Value]Range + +func (r Ranges) Get(x ssa.Value) Range { + if x == nil { + return nil + } + i, ok := r[x] + if !ok { + switch x := x.Type().Underlying().(type) { + case *types.Basic: + switch x.Kind() { + case types.String, types.UntypedString: + return StringInterval{} + default: + return IntInterval{} + } + case *types.Chan: + return ChannelInterval{} + case *types.Slice: + return SliceInterval{} + } + } + return i +} + +type Graph struct { + Vertices map[interface{}]*Vertex + Edges []Edge + SCCs [][]*Vertex + ranges Ranges + + // map SCCs to futures + futures [][]Future + // map SCCs to edges + sccEdges [][]Edge +} + +func (g Graph) Graphviz() string { + var lines []string + lines = append(lines, "digraph{") + ids := map[interface{}]int{} + i := 1 + for _, v := range g.Vertices { + ids[v] = i + shape := "box" + if _, ok := v.Value.(ssa.Value); ok { + shape = "oval" + } + lines = append(lines, fmt.Sprintf(`n%d [shape="%s", label=%q, colorscheme=spectral11, style="filled", fillcolor="%d"]`, + i, shape, VertexString(v), (v.SCC%11)+1)) + i++ + } + for _, e := range g.Edges { + style := "solid" + if e.control { + style = "dashed" + } + lines = append(lines, fmt.Sprintf(`n%d -> n%d [style="%s"]`, ids[e.From], ids[e.To], style)) + } + lines = append(lines, "}") + return strings.Join(lines, "\n") +} + +func (g *Graph) SetRange(x ssa.Value, r Range) { + g.ranges[x] = r +} + +func (g *Graph) Range(x ssa.Value) Range { + return g.ranges.Get(x) +} + +func (g *Graph) widen(c Constraint, consts []Z) bool { + setRange := func(i Range) { + g.SetRange(c.Y(), i) + } + widenIntInterval := func(oi, ni IntInterval) (IntInterval, bool) { + if !ni.IsKnown() { + return oi, false + } + nlc := NInfinity + nuc := PInfinity + + // Don't get stuck widening for an absurd amount of time due + // to an excess number of constants, as may be present in + // table-based scanners. + if len(consts) < 1000 { + for _, co := range consts { + if co.Cmp(ni.Lower) <= 0 { + nlc = co + break + } + } + for _, co := range consts { + if co.Cmp(ni.Upper) >= 0 { + nuc = co + break + } + } + } + + if !oi.IsKnown() { + return ni, true + } + if ni.Lower.Cmp(oi.Lower) == -1 && ni.Upper.Cmp(oi.Upper) == 1 { + return NewIntInterval(nlc, nuc), true + } + if ni.Lower.Cmp(oi.Lower) == -1 { + return NewIntInterval(nlc, oi.Upper), true + } + if ni.Upper.Cmp(oi.Upper) == 1 { + return NewIntInterval(oi.Lower, nuc), true + } + return oi, false + } + switch oi := g.Range(c.Y()).(type) { + case IntInterval: + ni := c.Eval(g).(IntInterval) + si, changed := widenIntInterval(oi, ni) + if changed { + setRange(si) + return true + } + return false + case StringInterval: + ni := c.Eval(g).(StringInterval) + si, changed := widenIntInterval(oi.Length, ni.Length) + if changed { + setRange(StringInterval{si}) + return true + } + return false + case SliceInterval: + ni := c.Eval(g).(SliceInterval) + si, changed := widenIntInterval(oi.Length, ni.Length) + if changed { + setRange(SliceInterval{si}) + return true + } + return false + default: + return false + } +} + +func (g *Graph) narrow(c Constraint) bool { + narrowIntInterval := func(oi, ni IntInterval) (IntInterval, bool) { + oLower := oi.Lower + oUpper := oi.Upper + nLower := ni.Lower + nUpper := ni.Upper + + if oLower == NInfinity && nLower != NInfinity { + return NewIntInterval(nLower, oUpper), true + } + if oUpper == PInfinity && nUpper != PInfinity { + return NewIntInterval(oLower, nUpper), true + } + if oLower.Cmp(nLower) == 1 { + return NewIntInterval(nLower, oUpper), true + } + if oUpper.Cmp(nUpper) == -1 { + return NewIntInterval(oLower, nUpper), true + } + return oi, false + } + switch oi := g.Range(c.Y()).(type) { + case IntInterval: + ni := c.Eval(g).(IntInterval) + si, changed := narrowIntInterval(oi, ni) + if changed { + g.SetRange(c.Y(), si) + return true + } + return false + case StringInterval: + ni := c.Eval(g).(StringInterval) + si, changed := narrowIntInterval(oi.Length, ni.Length) + if changed { + g.SetRange(c.Y(), StringInterval{si}) + return true + } + return false + case SliceInterval: + ni := c.Eval(g).(SliceInterval) + si, changed := narrowIntInterval(oi.Length, ni.Length) + if changed { + g.SetRange(c.Y(), SliceInterval{si}) + return true + } + return false + default: + return false + } +} + +func (g *Graph) resolveFutures(scc int) { + for _, c := range g.futures[scc] { + c.Resolve() + } +} + +func (g *Graph) entries(scc int) []ssa.Value { + var entries []ssa.Value + for _, n := range g.Vertices { + if n.SCC != scc { + continue + } + if v, ok := n.Value.(ssa.Value); ok { + // XXX avoid quadratic runtime + // + // XXX I cannot think of any code where the future and its + // variables aren't in the same SCC, in which case this + // code isn't very useful (the variables won't be resolved + // yet). Before we have a cross-SCC example, however, we + // can't really verify that this code is working + // correctly, or indeed doing anything useful. + for _, on := range g.Vertices { + if c, ok := on.Value.(Future); ok { + if c.Y() == v { + if !c.IsResolved() { + g.SetRange(c.Y(), c.Eval(g)) + c.MarkResolved() + } + break + } + } + } + if g.Range(v).IsKnown() { + entries = append(entries, v) + } + } + } + return entries +} + +func (g *Graph) uses(scc int) map[ssa.Value][]Constraint { + m := map[ssa.Value][]Constraint{} + for _, e := range g.sccEdges[scc] { + if e.control { + continue + } + if v, ok := e.From.Value.(ssa.Value); ok { + c := e.To.Value.(Constraint) + sink := c.Y() + if g.Vertices[sink].SCC == scc { + m[v] = append(m[v], c) + } + } + } + return m +} + +func (g *Graph) actives(scc int) []ssa.Value { + var actives []ssa.Value + for _, n := range g.Vertices { + if n.SCC != scc { + continue + } + if v, ok := n.Value.(ssa.Value); ok { + if _, ok := v.(*ssa.Const); !ok { + actives = append(actives, v) + } + } + } + return actives +} + +func (g *Graph) AddEdge(from, to interface{}, ctrl bool) { + vf, ok := g.Vertices[from] + if !ok { + vf = &Vertex{Value: from} + g.Vertices[from] = vf + } + vt, ok := g.Vertices[to] + if !ok { + vt = &Vertex{Value: to} + g.Vertices[to] = vt + } + e := Edge{From: vf, To: vt, control: ctrl} + g.Edges = append(g.Edges, e) + vf.Succs = append(vf.Succs, e) +} + +type Edge struct { + From, To *Vertex + control bool +} + +func (e Edge) String() string { + return fmt.Sprintf("%s -> %s", VertexString(e.From), VertexString(e.To)) +} + +func (g *Graph) FindSCCs() { + // use Tarjan to find the SCCs + + index := 1 + var s []*Vertex + + scc := 0 + var strongconnect func(v *Vertex) + strongconnect = func(v *Vertex) { + // set the depth index for v to the smallest unused index + v.index = index + v.lowlink = index + index++ + s = append(s, v) + v.stack = true + + for _, e := range v.Succs { + w := e.To + if w.index == 0 { + // successor w has not yet been visited; recurse on it + strongconnect(w) + if w.lowlink < v.lowlink { + v.lowlink = w.lowlink + } + } else if w.stack { + // successor w is in stack s and hence in the current scc + if w.index < v.lowlink { + v.lowlink = w.index + } + } + } + + if v.lowlink == v.index { + for { + w := s[len(s)-1] + s = s[:len(s)-1] + w.stack = false + w.SCC = scc + if w == v { + break + } + } + scc++ + } + } + for _, v := range g.Vertices { + if v.index == 0 { + strongconnect(v) + } + } + + g.SCCs = make([][]*Vertex, scc) + for _, n := range g.Vertices { + n.SCC = scc - n.SCC - 1 + g.SCCs[n.SCC] = append(g.SCCs[n.SCC], n) + } +} + +func invertToken(tok token.Token) token.Token { + switch tok { + case token.LSS: + return token.GEQ + case token.GTR: + return token.LEQ + case token.EQL: + return token.NEQ + case token.NEQ: + return token.EQL + case token.GEQ: + return token.LSS + case token.LEQ: + return token.GTR + default: + panic(fmt.Sprintf("unsupported token %s", tok)) + } +} + +func flipToken(tok token.Token) token.Token { + switch tok { + case token.LSS: + return token.GTR + case token.GTR: + return token.LSS + case token.EQL: + return token.EQL + case token.NEQ: + return token.NEQ + case token.GEQ: + return token.LEQ + case token.LEQ: + return token.GEQ + default: + panic(fmt.Sprintf("unsupported token %s", tok)) + } +} + +type CopyConstraint struct { + aConstraint + X ssa.Value +} + +func (c *CopyConstraint) String() string { + return fmt.Sprintf("%s = copy(%s)", c.Y().Name(), c.X.Name()) +} + +func (c *CopyConstraint) Eval(g *Graph) Range { + return g.Range(c.X) +} + +func (c *CopyConstraint) Operands() []ssa.Value { + return []ssa.Value{c.X} +} + +func NewCopyConstraint(x, y ssa.Value) Constraint { + return &CopyConstraint{ + aConstraint: aConstraint{ + y: y, + }, + X: x, + } +} diff --git a/vendor/honnef.co/go/tools/stylecheck/analysis.go b/vendor/honnef.co/go/tools/stylecheck/analysis.go new file mode 100644 index 000000000..f252487f7 --- /dev/null +++ b/vendor/honnef.co/go/tools/stylecheck/analysis.go @@ -0,0 +1,111 @@ +package stylecheck + +import ( + "flag" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "honnef.co/go/tools/config" + "honnef.co/go/tools/facts" + "honnef.co/go/tools/internal/passes/buildssa" + "honnef.co/go/tools/lint/lintutil" +) + +func newFlagSet() flag.FlagSet { + fs := flag.NewFlagSet("", flag.PanicOnError) + fs.Var(lintutil.NewVersionFlag(), "go", "Target Go version") + return *fs +} + +var Analyzers = map[string]*analysis.Analyzer{ + "ST1000": { + Name: "ST1000", + Run: CheckPackageComment, + Doc: Docs["ST1000"].String(), + Requires: []*analysis.Analyzer{}, + Flags: newFlagSet(), + }, + "ST1001": { + Name: "ST1001", + Run: CheckDotImports, + Doc: Docs["ST1001"].String(), + Requires: []*analysis.Analyzer{facts.Generated, config.Analyzer}, + Flags: newFlagSet(), + }, + "ST1003": { + Name: "ST1003", + Run: CheckNames, + Doc: Docs["ST1003"].String(), + Requires: []*analysis.Analyzer{facts.Generated, config.Analyzer}, + Flags: newFlagSet(), + }, + "ST1005": { + Name: "ST1005", + Run: CheckErrorStrings, + Doc: Docs["ST1005"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + Flags: newFlagSet(), + }, + "ST1006": { + Name: "ST1006", + Run: CheckReceiverNames, + Doc: Docs["ST1006"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer, facts.Generated}, + Flags: newFlagSet(), + }, + "ST1008": { + Name: "ST1008", + Run: CheckErrorReturn, + Doc: Docs["ST1008"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + Flags: newFlagSet(), + }, + "ST1011": { + Name: "ST1011", + Run: CheckTimeNames, + Doc: Docs["ST1011"].String(), + Flags: newFlagSet(), + }, + "ST1012": { + Name: "ST1012", + Run: CheckErrorVarNames, + Doc: Docs["ST1012"].String(), + Requires: []*analysis.Analyzer{config.Analyzer}, + Flags: newFlagSet(), + }, + "ST1013": { + Name: "ST1013", + Run: CheckHTTPStatusCodes, + Doc: Docs["ST1013"].String(), + Requires: []*analysis.Analyzer{facts.Generated, facts.TokenFile, config.Analyzer}, + Flags: newFlagSet(), + }, + "ST1015": { + Name: "ST1015", + Run: CheckDefaultCaseOrder, + Doc: Docs["ST1015"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated, facts.TokenFile}, + Flags: newFlagSet(), + }, + "ST1016": { + Name: "ST1016", + Run: CheckReceiverNamesIdentical, + Doc: Docs["ST1016"].String(), + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + Flags: newFlagSet(), + }, + "ST1017": { + Name: "ST1017", + Run: CheckYodaConditions, + Doc: Docs["ST1017"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated, facts.TokenFile}, + Flags: newFlagSet(), + }, + "ST1018": { + Name: "ST1018", + Run: CheckInvisibleCharacters, + Doc: Docs["ST1018"].String(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), + }, +} diff --git a/vendor/honnef.co/go/tools/stylecheck/doc.go b/vendor/honnef.co/go/tools/stylecheck/doc.go new file mode 100644 index 000000000..9097214d9 --- /dev/null +++ b/vendor/honnef.co/go/tools/stylecheck/doc.go @@ -0,0 +1,154 @@ +package stylecheck + +import "honnef.co/go/tools/lint" + +var Docs = map[string]*lint.Documentation{ + "ST1000": &lint.Documentation{ + Title: `Incorrect or missing package comment`, + Text: `Packages must have a package comment that is formatted according to +the guidelines laid out in +https://github.com/golang/go/wiki/CodeReviewComments#package-comments.`, + Since: "2019.1", + NonDefault: true, + }, + + "ST1001": &lint.Documentation{ + Title: `Dot imports are discouraged`, + Text: `Dot imports that aren't in external test packages are discouraged. + +The dot_import_whitelist option can be used to whitelist certain +imports. + +Quoting Go Code Review Comments: + + The import . form can be useful in tests that, due to circular + dependencies, cannot be made part of the package being tested: + + package foo_test + + import ( + "bar/testutil" // also imports "foo" + . "foo" + ) + + In this case, the test file cannot be in package foo because it + uses bar/testutil, which imports foo. So we use the 'import .' + form to let the file pretend to be part of package foo even though + it is not. Except for this one case, do not use import . in your + programs. It makes the programs much harder to read because it is + unclear whether a name like Quux is a top-level identifier in the + current package or in an imported package.`, + Since: "2019.1", + Options: []string{"dot_import_whitelist"}, + }, + + "ST1003": &lint.Documentation{ + Title: `Poorly chosen identifier`, + Text: `Identifiers, such as variable and package names, follow certain rules. + +See the following links for details: + +- https://golang.org/doc/effective_go.html#package-names +- https://golang.org/doc/effective_go.html#mixed-caps +- https://github.com/golang/go/wiki/CodeReviewComments#initialisms +- https://github.com/golang/go/wiki/CodeReviewComments#variable-names`, + Since: "2019.1", + NonDefault: true, + Options: []string{"initialisms"}, + }, + + "ST1005": &lint.Documentation{ + Title: `Incorrectly formatted error string`, + Text: `Error strings follow a set of guidelines to ensure uniformity and good +composability. + +Quoting Go Code Review Comments: + + Error strings should not be capitalized (unless beginning with + proper nouns or acronyms) or end with punctuation, since they are + usually printed following other context. That is, use + fmt.Errorf("something bad") not fmt.Errorf("Something bad"), so + that log.Printf("Reading %s: %v", filename, err) formats without a + spurious capital letter mid-message.`, + Since: "2019.1", + }, + + "ST1006": &lint.Documentation{ + Title: `Poorly chosen receiver name`, + Text: `Quoting Go Code Review Comments: + + The name of a method's receiver should be a reflection of its + identity; often a one or two letter abbreviation of its type + suffices (such as "c" or "cl" for "Client"). Don't use generic + names such as "me", "this" or "self", identifiers typical of + object-oriented languages that place more emphasis on methods as + opposed to functions. The name need not be as descriptive as that + of a method argument, as its role is obvious and serves no + documentary purpose. It can be very short as it will appear on + almost every line of every method of the type; familiarity admits + brevity. Be consistent, too: if you call the receiver "c" in one + method, don't call it "cl" in another.`, + Since: "2019.1", + }, + + "ST1008": &lint.Documentation{ + Title: `A function's error value should be its last return value`, + Text: `A function's error value should be its last return value.`, + Since: `2019.1`, + }, + + "ST1011": &lint.Documentation{ + Title: `Poorly chosen name for variable of type time.Duration`, + Text: `time.Duration values represent an amount of time, which is represented +as a count of nanoseconds. An expression like 5 * time.Microsecond +yields the value 5000. It is therefore not appropriate to suffix a +variable of type time.Duration with any time unit, such as Msec or +Milli.`, + Since: `2019.1`, + }, + + "ST1012": &lint.Documentation{ + Title: `Poorly chosen name for error variable`, + Text: `Error variables that are part of an API should be called errFoo or +ErrFoo.`, + Since: "2019.1", + }, + + "ST1013": &lint.Documentation{ + Title: `Should use constants for HTTP error codes, not magic numbers`, + Text: `HTTP has a tremendous number of status codes. While some of those are +well known (200, 400, 404, 500), most of them are not. The net/http +package provides constants for all status codes that are part of the +various specifications. It is recommended to use these constants +instead of hard-coding magic numbers, to vastly improve the +readability of your code.`, + Since: "2019.1", + Options: []string{"http_status_code_whitelist"}, + }, + + "ST1015": &lint.Documentation{ + Title: `A switch's default case should be the first or last case`, + Since: "2019.1", + }, + + "ST1016": &lint.Documentation{ + Title: `Use consistent method receiver names`, + Since: "2019.1", + NonDefault: true, + }, + + "ST1017": &lint.Documentation{ + Title: `Don't use Yoda conditions`, + Text: `Yoda conditions are conditions of the kind 'if 42 == x', where the +literal is on the left side of the comparison. These are a common +idiom in languages in which assignment is an expression, to avoid bugs +of the kind 'if (x = 42)'. In Go, which doesn't allow for this kind of +bug, we prefer the more idiomatic 'if x == 42'.`, + Since: "2019.2", + }, + + "ST1018": &lint.Documentation{ + Title: `Avoid zero-width and control characters in string literals`, + Since: "2019.2", + }, +} diff --git a/vendor/honnef.co/go/tools/stylecheck/lint.go b/vendor/honnef.co/go/tools/stylecheck/lint.go new file mode 100644 index 000000000..1699d5898 --- /dev/null +++ b/vendor/honnef.co/go/tools/stylecheck/lint.go @@ -0,0 +1,629 @@ +package stylecheck // import "honnef.co/go/tools/stylecheck" + +import ( + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "honnef.co/go/tools/config" + "honnef.co/go/tools/internal/passes/buildssa" + . "honnef.co/go/tools/lint/lintdsl" + "honnef.co/go/tools/ssa" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" +) + +func CheckPackageComment(pass *analysis.Pass) (interface{}, error) { + // - At least one file in a non-main package should have a package comment + // + // - The comment should be of the form + // "Package x ...". This has a slight potential for false + // positives, as multiple files can have package comments, in + // which case they get appended. But that doesn't happen a lot in + // the real world. + + if pass.Pkg.Name() == "main" { + return nil, nil + } + hasDocs := false + for _, f := range pass.Files { + if IsInTest(pass, f) { + continue + } + if f.Doc != nil && len(f.Doc.List) > 0 { + hasDocs = true + prefix := "Package " + f.Name.Name + " " + if !strings.HasPrefix(strings.TrimSpace(f.Doc.Text()), prefix) { + ReportNodef(pass, f.Doc, `package comment should be of the form "%s..."`, prefix) + } + f.Doc.Text() + } + } + + if !hasDocs { + for _, f := range pass.Files { + if IsInTest(pass, f) { + continue + } + ReportNodef(pass, f, "at least one file in a package should have a package comment") + } + } + return nil, nil +} + +func CheckDotImports(pass *analysis.Pass) (interface{}, error) { + for _, f := range pass.Files { + imports: + for _, imp := range f.Imports { + path := imp.Path.Value + path = path[1 : len(path)-1] + for _, w := range config.For(pass).DotImportWhitelist { + if w == path { + continue imports + } + } + + if imp.Name != nil && imp.Name.Name == "." && !IsInTest(pass, f) { + ReportNodefFG(pass, imp, "should not use dot imports") + } + } + } + return nil, nil +} + +func CheckBlankImports(pass *analysis.Pass) (interface{}, error) { + fset := pass.Fset + for _, f := range pass.Files { + if IsInMain(pass, f) || IsInTest(pass, f) { + continue + } + + // Collect imports of the form `import _ "foo"`, i.e. with no + // parentheses, as their comment will be associated with the + // (paren-free) GenDecl, not the import spec itself. + // + // We don't directly process the GenDecl so that we can + // correctly handle the following: + // + // import _ "foo" + // import _ "bar" + // + // where only the first import should get flagged. + skip := map[ast.Spec]bool{} + ast.Inspect(f, func(node ast.Node) bool { + switch node := node.(type) { + case *ast.File: + return true + case *ast.GenDecl: + if node.Tok != token.IMPORT { + return false + } + if node.Lparen == token.NoPos && node.Doc != nil { + skip[node.Specs[0]] = true + } + return false + } + return false + }) + for i, imp := range f.Imports { + pos := fset.Position(imp.Pos()) + + if !IsBlank(imp.Name) { + continue + } + // Only flag the first blank import in a group of imports, + // or don't flag any of them, if the first one is + // commented + if i > 0 { + prev := f.Imports[i-1] + prevPos := fset.Position(prev.Pos()) + if pos.Line-1 == prevPos.Line && IsBlank(prev.Name) { + continue + } + } + + if imp.Doc == nil && imp.Comment == nil && !skip[imp] { + ReportNodef(pass, imp, "a blank import should be only in a main or test package, or have a comment justifying it") + } + } + } + return nil, nil +} + +func CheckIncDec(pass *analysis.Pass) (interface{}, error) { + // TODO(dh): this can be noisy for function bodies that look like this: + // x += 3 + // ... + // x += 2 + // ... + // x += 1 + fn := func(node ast.Node) { + assign := node.(*ast.AssignStmt) + if assign.Tok != token.ADD_ASSIGN && assign.Tok != token.SUB_ASSIGN { + return + } + if (len(assign.Lhs) != 1 || len(assign.Rhs) != 1) || + !IsIntLiteral(assign.Rhs[0], "1") { + return + } + + suffix := "" + switch assign.Tok { + case token.ADD_ASSIGN: + suffix = "++" + case token.SUB_ASSIGN: + suffix = "--" + } + + ReportNodef(pass, assign, "should replace %s with %s%s", Render(pass, assign), Render(pass, assign.Lhs[0]), suffix) + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.AssignStmt)(nil)}, fn) + return nil, nil +} + +func CheckErrorReturn(pass *analysis.Pass) (interface{}, error) { +fnLoop: + for _, fn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + sig := fn.Type().(*types.Signature) + rets := sig.Results() + if rets == nil || rets.Len() < 2 { + continue + } + + if rets.At(rets.Len()-1).Type() == types.Universe.Lookup("error").Type() { + // Last return type is error. If the function also returns + // errors in other positions, that's fine. + continue + } + for i := rets.Len() - 2; i >= 0; i-- { + if rets.At(i).Type() == types.Universe.Lookup("error").Type() { + pass.Reportf(rets.At(i).Pos(), "error should be returned as the last argument") + continue fnLoop + } + } + } + return nil, nil +} + +// CheckUnexportedReturn checks that exported functions on exported +// types do not return unexported types. +func CheckUnexportedReturn(pass *analysis.Pass) (interface{}, error) { + for _, fn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + if fn.Synthetic != "" || fn.Parent() != nil { + continue + } + if !ast.IsExported(fn.Name()) || IsInMain(pass, fn) || IsInTest(pass, fn) { + continue + } + sig := fn.Type().(*types.Signature) + if sig.Recv() != nil && !ast.IsExported(Dereference(sig.Recv().Type()).(*types.Named).Obj().Name()) { + continue + } + res := sig.Results() + for i := 0; i < res.Len(); i++ { + if named, ok := DereferenceR(res.At(i).Type()).(*types.Named); ok && + !ast.IsExported(named.Obj().Name()) && + named != types.Universe.Lookup("error").Type() { + pass.Reportf(fn.Pos(), "should not return unexported type") + } + } + } + return nil, nil +} + +func CheckReceiverNames(pass *analysis.Pass) (interface{}, error) { + ssapkg := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).Pkg + for _, m := range ssapkg.Members { + if T, ok := m.Object().(*types.TypeName); ok && !T.IsAlias() { + ms := typeutil.IntuitiveMethodSet(T.Type(), nil) + for _, sel := range ms { + fn := sel.Obj().(*types.Func) + recv := fn.Type().(*types.Signature).Recv() + if Dereference(recv.Type()) != T.Type() { + // skip embedded methods + continue + } + if recv.Name() == "self" || recv.Name() == "this" { + ReportfFG(pass, recv.Pos(), `receiver name should be a reflection of its identity; don't use generic names such as "this" or "self"`) + } + if recv.Name() == "_" { + ReportfFG(pass, recv.Pos(), "receiver name should not be an underscore, omit the name if it is unused") + } + } + } + } + return nil, nil +} + +func CheckReceiverNamesIdentical(pass *analysis.Pass) (interface{}, error) { + ssapkg := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).Pkg + for _, m := range ssapkg.Members { + names := map[string]int{} + + var firstFn *types.Func + if T, ok := m.Object().(*types.TypeName); ok && !T.IsAlias() { + ms := typeutil.IntuitiveMethodSet(T.Type(), nil) + for _, sel := range ms { + fn := sel.Obj().(*types.Func) + recv := fn.Type().(*types.Signature).Recv() + if Dereference(recv.Type()) != T.Type() { + // skip embedded methods + continue + } + if firstFn == nil { + firstFn = fn + } + if recv.Name() != "" && recv.Name() != "_" { + names[recv.Name()]++ + } + } + } + + if len(names) > 1 { + var seen []string + for name, count := range names { + seen = append(seen, fmt.Sprintf("%dx %q", count, name)) + } + + pass.Reportf(firstFn.Pos(), "methods on the same type should have the same receiver name (seen %s)", strings.Join(seen, ", ")) + } + } + return nil, nil +} + +func CheckContextFirstArg(pass *analysis.Pass) (interface{}, error) { + // TODO(dh): this check doesn't apply to test helpers. Example from the stdlib: + // func helperCommandContext(t *testing.T, ctx context.Context, s ...string) (cmd *exec.Cmd) { +fnLoop: + for _, fn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + if fn.Synthetic != "" || fn.Parent() != nil { + continue + } + params := fn.Signature.Params() + if params.Len() < 2 { + continue + } + if types.TypeString(params.At(0).Type(), nil) == "context.Context" { + continue + } + for i := 1; i < params.Len(); i++ { + param := params.At(i) + if types.TypeString(param.Type(), nil) == "context.Context" { + pass.Reportf(param.Pos(), "context.Context should be the first argument of a function") + continue fnLoop + } + } + } + return nil, nil +} + +func CheckErrorStrings(pass *analysis.Pass) (interface{}, error) { + objNames := map[*ssa.Package]map[string]bool{} + ssapkg := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).Pkg + objNames[ssapkg] = map[string]bool{} + for _, m := range ssapkg.Members { + if typ, ok := m.(*ssa.Type); ok { + objNames[ssapkg][typ.Name()] = true + } + } + for _, fn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + objNames[fn.Package()][fn.Name()] = true + } + + for _, fn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { + if IsInTest(pass, fn) { + // We don't care about malformed error messages in tests; + // they're usually for direct human consumption, not part + // of an API + continue + } + for _, block := range fn.Blocks { + instrLoop: + for _, ins := range block.Instrs { + call, ok := ins.(*ssa.Call) + if !ok { + continue + } + if !IsCallTo(call.Common(), "errors.New") && !IsCallTo(call.Common(), "fmt.Errorf") { + continue + } + + k, ok := call.Common().Args[0].(*ssa.Const) + if !ok { + continue + } + + s := constant.StringVal(k.Value) + if len(s) == 0 { + continue + } + switch s[len(s)-1] { + case '.', ':', '!', '\n': + pass.Reportf(call.Pos(), "error strings should not end with punctuation or a newline") + } + idx := strings.IndexByte(s, ' ') + if idx == -1 { + // single word error message, probably not a real + // error but something used in tests or during + // debugging + continue + } + word := s[:idx] + first, n := utf8.DecodeRuneInString(word) + if !unicode.IsUpper(first) { + continue + } + for _, c := range word[n:] { + if unicode.IsUpper(c) { + // Word is probably an initialism or + // multi-word function name + continue instrLoop + } + } + + word = strings.TrimRightFunc(word, func(r rune) bool { return unicode.IsPunct(r) }) + if objNames[fn.Package()][word] { + // Word is probably the name of a function or type in this package + continue + } + // First word in error starts with a capital + // letter, and the word doesn't contain any other + // capitals, making it unlikely to be an + // initialism or multi-word function name. + // + // It could still be a proper noun, though. + + pass.Reportf(call.Pos(), "error strings should not be capitalized") + } + } + } + return nil, nil +} + +func CheckTimeNames(pass *analysis.Pass) (interface{}, error) { + suffixes := []string{ + "Sec", "Secs", "Seconds", + "Msec", "Msecs", + "Milli", "Millis", "Milliseconds", + "Usec", "Usecs", "Microseconds", + "MS", "Ms", + } + fn := func(T types.Type, names []*ast.Ident) { + if !IsType(T, "time.Duration") && !IsType(T, "*time.Duration") { + return + } + for _, name := range names { + for _, suffix := range suffixes { + if strings.HasSuffix(name.Name, suffix) { + ReportNodef(pass, name, "var %s is of type %v; don't use unit-specific suffix %q", name.Name, T, suffix) + break + } + } + } + } + for _, f := range pass.Files { + ast.Inspect(f, func(node ast.Node) bool { + switch node := node.(type) { + case *ast.ValueSpec: + T := pass.TypesInfo.TypeOf(node.Type) + fn(T, node.Names) + case *ast.FieldList: + for _, field := range node.List { + T := pass.TypesInfo.TypeOf(field.Type) + fn(T, field.Names) + } + } + return true + }) + } + return nil, nil +} + +func CheckErrorVarNames(pass *analysis.Pass) (interface{}, error) { + for _, f := range pass.Files { + for _, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.VAR { + continue + } + for _, spec := range gen.Specs { + spec := spec.(*ast.ValueSpec) + if len(spec.Names) != len(spec.Values) { + continue + } + + for i, name := range spec.Names { + val := spec.Values[i] + if !IsCallToAST(pass, val, "errors.New") && !IsCallToAST(pass, val, "fmt.Errorf") { + continue + } + + prefix := "err" + if name.IsExported() { + prefix = "Err" + } + if !strings.HasPrefix(name.Name, prefix) { + ReportNodef(pass, name, "error var %s should have name of the form %sFoo", name.Name, prefix) + } + } + } + } + } + return nil, nil +} + +var httpStatusCodes = map[int]string{ + 100: "StatusContinue", + 101: "StatusSwitchingProtocols", + 102: "StatusProcessing", + 200: "StatusOK", + 201: "StatusCreated", + 202: "StatusAccepted", + 203: "StatusNonAuthoritativeInfo", + 204: "StatusNoContent", + 205: "StatusResetContent", + 206: "StatusPartialContent", + 207: "StatusMultiStatus", + 208: "StatusAlreadyReported", + 226: "StatusIMUsed", + 300: "StatusMultipleChoices", + 301: "StatusMovedPermanently", + 302: "StatusFound", + 303: "StatusSeeOther", + 304: "StatusNotModified", + 305: "StatusUseProxy", + 307: "StatusTemporaryRedirect", + 308: "StatusPermanentRedirect", + 400: "StatusBadRequest", + 401: "StatusUnauthorized", + 402: "StatusPaymentRequired", + 403: "StatusForbidden", + 404: "StatusNotFound", + 405: "StatusMethodNotAllowed", + 406: "StatusNotAcceptable", + 407: "StatusProxyAuthRequired", + 408: "StatusRequestTimeout", + 409: "StatusConflict", + 410: "StatusGone", + 411: "StatusLengthRequired", + 412: "StatusPreconditionFailed", + 413: "StatusRequestEntityTooLarge", + 414: "StatusRequestURITooLong", + 415: "StatusUnsupportedMediaType", + 416: "StatusRequestedRangeNotSatisfiable", + 417: "StatusExpectationFailed", + 418: "StatusTeapot", + 422: "StatusUnprocessableEntity", + 423: "StatusLocked", + 424: "StatusFailedDependency", + 426: "StatusUpgradeRequired", + 428: "StatusPreconditionRequired", + 429: "StatusTooManyRequests", + 431: "StatusRequestHeaderFieldsTooLarge", + 451: "StatusUnavailableForLegalReasons", + 500: "StatusInternalServerError", + 501: "StatusNotImplemented", + 502: "StatusBadGateway", + 503: "StatusServiceUnavailable", + 504: "StatusGatewayTimeout", + 505: "StatusHTTPVersionNotSupported", + 506: "StatusVariantAlsoNegotiates", + 507: "StatusInsufficientStorage", + 508: "StatusLoopDetected", + 510: "StatusNotExtended", + 511: "StatusNetworkAuthenticationRequired", +} + +func CheckHTTPStatusCodes(pass *analysis.Pass) (interface{}, error) { + whitelist := map[string]bool{} + for _, code := range config.For(pass).HTTPStatusCodeWhitelist { + whitelist[code] = true + } + fn := func(node ast.Node) bool { + if node == nil { + return true + } + call, ok := node.(*ast.CallExpr) + if !ok { + return true + } + + var arg int + switch CallNameAST(pass, call) { + case "net/http.Error": + arg = 2 + case "net/http.Redirect": + arg = 3 + case "net/http.StatusText": + arg = 0 + case "net/http.RedirectHandler": + arg = 1 + default: + return true + } + lit, ok := call.Args[arg].(*ast.BasicLit) + if !ok { + return true + } + if whitelist[lit.Value] { + return true + } + + n, err := strconv.Atoi(lit.Value) + if err != nil { + return true + } + s, ok := httpStatusCodes[n] + if !ok { + return true + } + ReportNodefFG(pass, lit, "should use constant http.%s instead of numeric literal %d", s, n) + return true + } + // OPT(dh): replace with inspector + for _, f := range pass.Files { + ast.Inspect(f, fn) + } + return nil, nil +} + +func CheckDefaultCaseOrder(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + stmt := node.(*ast.SwitchStmt) + list := stmt.Body.List + for i, c := range list { + if c.(*ast.CaseClause).List == nil && i != 0 && i != len(list)-1 { + ReportNodefFG(pass, c, "default case should be first or last in switch statement") + break + } + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.SwitchStmt)(nil)}, fn) + return nil, nil +} + +func CheckYodaConditions(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + cond := node.(*ast.BinaryExpr) + if cond.Op != token.EQL && cond.Op != token.NEQ { + return + } + if _, ok := cond.X.(*ast.BasicLit); !ok { + return + } + if _, ok := cond.Y.(*ast.BasicLit); ok { + // Don't flag lit == lit conditions, just in case + return + } + ReportNodefFG(pass, cond, "don't use Yoda conditions") + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.BinaryExpr)(nil)}, fn) + return nil, nil +} + +func CheckInvisibleCharacters(pass *analysis.Pass) (interface{}, error) { + fn := func(node ast.Node) { + lit := node.(*ast.BasicLit) + if lit.Kind != token.STRING { + return + } + for _, r := range lit.Value { + if unicode.Is(unicode.Cf, r) { + ReportNodef(pass, lit, "string literal contains the Unicode format character %U, consider using the %q escape sequence", r, r) + } else if unicode.Is(unicode.Cc, r) && r != '\n' && r != '\t' && r != '\r' { + ReportNodef(pass, lit, "string literal contains the Unicode control character %U, consider using the %q escape sequence", r, r) + } + } + } + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.BasicLit)(nil)}, fn) + return nil, nil +} diff --git a/vendor/honnef.co/go/tools/stylecheck/names.go b/vendor/honnef.co/go/tools/stylecheck/names.go new file mode 100644 index 000000000..160f9d7ff --- /dev/null +++ b/vendor/honnef.co/go/tools/stylecheck/names.go @@ -0,0 +1,264 @@ +// Copyright (c) 2013 The Go Authors. All rights reserved. +// Copyright (c) 2018 Dominik Honnef. All rights reserved. + +package stylecheck + +import ( + "go/ast" + "go/token" + "strings" + "unicode" + + "golang.org/x/tools/go/analysis" + "honnef.co/go/tools/config" + . "honnef.co/go/tools/lint/lintdsl" +) + +// knownNameExceptions is a set of names that are known to be exempt from naming checks. +// This is usually because they are constrained by having to match names in the +// standard library. +var knownNameExceptions = map[string]bool{ + "LastInsertId": true, // must match database/sql + "kWh": true, +} + +func CheckNames(pass *analysis.Pass) (interface{}, error) { + // A large part of this function is copied from + // github.com/golang/lint, Copyright (c) 2013 The Go Authors, + // licensed under the BSD 3-clause license. + + allCaps := func(s string) bool { + for _, r := range s { + if !((r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9') || r == '_') { + return false + } + } + return true + } + + check := func(id *ast.Ident, thing string, initialisms map[string]bool) { + if id.Name == "_" { + return + } + if knownNameExceptions[id.Name] { + return + } + + // Handle two common styles from other languages that don't belong in Go. + if len(id.Name) >= 5 && allCaps(id.Name) && strings.Contains(id.Name, "_") { + ReportfFG(pass, id.Pos(), "should not use ALL_CAPS in Go names; use CamelCase instead") + return + } + + should := lintName(id.Name, initialisms) + if id.Name == should { + return + } + + if len(id.Name) > 2 && strings.Contains(id.Name[1:len(id.Name)-1], "_") { + ReportfFG(pass, id.Pos(), "should not use underscores in Go names; %s %s should be %s", thing, id.Name, should) + return + } + ReportfFG(pass, id.Pos(), "%s %s should be %s", thing, id.Name, should) + } + checkList := func(fl *ast.FieldList, thing string, initialisms map[string]bool) { + if fl == nil { + return + } + for _, f := range fl.List { + for _, id := range f.Names { + check(id, thing, initialisms) + } + } + } + + il := config.For(pass).Initialisms + initialisms := make(map[string]bool, len(il)) + for _, word := range il { + initialisms[word] = true + } + for _, f := range pass.Files { + // Package names need slightly different handling than other names. + if !strings.HasSuffix(f.Name.Name, "_test") && strings.Contains(f.Name.Name, "_") { + ReportfFG(pass, f.Pos(), "should not use underscores in package names") + } + if strings.IndexFunc(f.Name.Name, unicode.IsUpper) != -1 { + ReportfFG(pass, f.Pos(), "should not use MixedCaps in package name; %s should be %s", f.Name.Name, strings.ToLower(f.Name.Name)) + } + + ast.Inspect(f, func(node ast.Node) bool { + switch v := node.(type) { + case *ast.AssignStmt: + if v.Tok != token.DEFINE { + return true + } + for _, exp := range v.Lhs { + if id, ok := exp.(*ast.Ident); ok { + check(id, "var", initialisms) + } + } + case *ast.FuncDecl: + // Functions with no body are defined elsewhere (in + // assembly, or via go:linkname). These are likely to + // be something very low level (such as the runtime), + // where our rules don't apply. + if v.Body == nil { + return true + } + + if IsInTest(pass, v) && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test") || strings.HasPrefix(v.Name.Name, "Benchmark")) { + return true + } + + thing := "func" + if v.Recv != nil { + thing = "method" + } + + if !isTechnicallyExported(v) { + check(v.Name, thing, initialisms) + } + + checkList(v.Type.Params, thing+" parameter", initialisms) + checkList(v.Type.Results, thing+" result", initialisms) + case *ast.GenDecl: + if v.Tok == token.IMPORT { + return true + } + var thing string + switch v.Tok { + case token.CONST: + thing = "const" + case token.TYPE: + thing = "type" + case token.VAR: + thing = "var" + } + for _, spec := range v.Specs { + switch s := spec.(type) { + case *ast.TypeSpec: + check(s.Name, thing, initialisms) + case *ast.ValueSpec: + for _, id := range s.Names { + check(id, thing, initialisms) + } + } + } + case *ast.InterfaceType: + // Do not check interface method names. + // They are often constrainted by the method names of concrete types. + for _, x := range v.Methods.List { + ft, ok := x.Type.(*ast.FuncType) + if !ok { // might be an embedded interface name + continue + } + checkList(ft.Params, "interface method parameter", initialisms) + checkList(ft.Results, "interface method result", initialisms) + } + case *ast.RangeStmt: + if v.Tok == token.ASSIGN { + return true + } + if id, ok := v.Key.(*ast.Ident); ok { + check(id, "range var", initialisms) + } + if id, ok := v.Value.(*ast.Ident); ok { + check(id, "range var", initialisms) + } + case *ast.StructType: + for _, f := range v.Fields.List { + for _, id := range f.Names { + check(id, "struct field", initialisms) + } + } + } + return true + }) + } + return nil, nil +} + +// lintName returns a different name if it should be different. +func lintName(name string, initialisms map[string]bool) (should string) { + // A large part of this function is copied from + // github.com/golang/lint, Copyright (c) 2013 The Go Authors, + // licensed under the BSD 3-clause license. + + // Fast path for simple cases: "_" and all lowercase. + if name == "_" { + return name + } + if strings.IndexFunc(name, func(r rune) bool { return !unicode.IsLower(r) }) == -1 { + return name + } + + // Split camelCase at any lower->upper transition, and split on underscores. + // Check each word for common initialisms. + runes := []rune(name) + w, i := 0, 0 // index of start of word, scan + for i+1 <= len(runes) { + eow := false // whether we hit the end of a word + if i+1 == len(runes) { + eow = true + } else if runes[i+1] == '_' && i+1 != len(runes)-1 { + // underscore; shift the remainder forward over any run of underscores + eow = true + n := 1 + for i+n+1 < len(runes) && runes[i+n+1] == '_' { + n++ + } + + // Leave at most one underscore if the underscore is between two digits + if i+n+1 < len(runes) && unicode.IsDigit(runes[i]) && unicode.IsDigit(runes[i+n+1]) { + n-- + } + + copy(runes[i+1:], runes[i+n+1:]) + runes = runes[:len(runes)-n] + } else if unicode.IsLower(runes[i]) && !unicode.IsLower(runes[i+1]) { + // lower->non-lower + eow = true + } + i++ + if !eow { + continue + } + + // [w,i) is a word. + word := string(runes[w:i]) + if u := strings.ToUpper(word); initialisms[u] { + // Keep consistent case, which is lowercase only at the start. + if w == 0 && unicode.IsLower(runes[w]) { + u = strings.ToLower(u) + } + // All the common initialisms are ASCII, + // so we can replace the bytes exactly. + // TODO(dh): this won't be true once we allow custom initialisms + copy(runes[w:], []rune(u)) + } else if w > 0 && strings.ToLower(word) == word { + // already all lowercase, and not the first word, so uppercase the first character. + runes[w] = unicode.ToUpper(runes[w]) + } + w = i + } + return string(runes) +} + +func isTechnicallyExported(f *ast.FuncDecl) bool { + if f.Recv != nil || f.Doc == nil { + return false + } + + const export = "//export " + const linkname = "//go:linkname " + for _, c := range f.Doc.List { + if strings.HasPrefix(c.Text, export) && len(c.Text) == len(export)+len(f.Name.Name) && c.Text[len(export):] == f.Name.Name { + return true + } + + if strings.HasPrefix(c.Text, linkname) { + return true + } + } + return false +} diff --git a/vendor/honnef.co/go/tools/unused/edge.go b/vendor/honnef.co/go/tools/unused/edge.go new file mode 100644 index 000000000..02e0d09cf --- /dev/null +++ b/vendor/honnef.co/go/tools/unused/edge.go @@ -0,0 +1,54 @@ +package unused + +//go:generate stringer -type edgeKind +type edgeKind uint64 + +func (e edgeKind) is(o edgeKind) bool { + return e&o != 0 +} + +const ( + edgeAlias edgeKind = 1 << iota + edgeBlankField + edgeAnonymousStruct + edgeCgoExported + edgeConstGroup + edgeElementType + edgeEmbeddedInterface + edgeExportedConstant + edgeExportedField + edgeExportedFunction + edgeExportedMethod + edgeExportedType + edgeExportedVariable + edgeExtendsExportedFields + edgeExtendsExportedMethodSet + edgeFieldAccess + edgeFunctionArgument + edgeFunctionResult + edgeFunctionSignature + edgeImplements + edgeInstructionOperand + edgeInterfaceCall + edgeInterfaceMethod + edgeKeyType + edgeLinkname + edgeMainFunction + edgeNamedType + edgeNetRPCRegister + edgeNoCopySentinel + edgeProvidesMethod + edgeReceiver + edgeRuntimeFunction + edgeSignature + edgeStructConversion + edgeTestSink + edgeTupleElement + edgeType + edgeTypeName + edgeUnderlyingType + edgePointerType + edgeUnsafeConversion + edgeUsedConstant + edgeVarDecl +) diff --git a/vendor/honnef.co/go/tools/unused/edgekind_string.go b/vendor/honnef.co/go/tools/unused/edgekind_string.go new file mode 100644 index 000000000..7629636cf --- /dev/null +++ b/vendor/honnef.co/go/tools/unused/edgekind_string.go @@ -0,0 +1,109 @@ +// Code generated by "stringer -type edgeKind"; DO NOT EDIT. + +package unused + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[edgeAlias-1] + _ = x[edgeBlankField-2] + _ = x[edgeAnonymousStruct-4] + _ = x[edgeCgoExported-8] + _ = x[edgeConstGroup-16] + _ = x[edgeElementType-32] + _ = x[edgeEmbeddedInterface-64] + _ = x[edgeExportedConstant-128] + _ = x[edgeExportedField-256] + _ = x[edgeExportedFunction-512] + _ = x[edgeExportedMethod-1024] + _ = x[edgeExportedType-2048] + _ = x[edgeExportedVariable-4096] + _ = x[edgeExtendsExportedFields-8192] + _ = x[edgeExtendsExportedMethodSet-16384] + _ = x[edgeFieldAccess-32768] + _ = x[edgeFunctionArgument-65536] + _ = x[edgeFunctionResult-131072] + _ = x[edgeFunctionSignature-262144] + _ = x[edgeImplements-524288] + _ = x[edgeInstructionOperand-1048576] + _ = x[edgeInterfaceCall-2097152] + _ = x[edgeInterfaceMethod-4194304] + _ = x[edgeKeyType-8388608] + _ = x[edgeLinkname-16777216] + _ = x[edgeMainFunction-33554432] + _ = x[edgeNamedType-67108864] + _ = x[edgeNetRPCRegister-134217728] + _ = x[edgeNoCopySentinel-268435456] + _ = x[edgeProvidesMethod-536870912] + _ = x[edgeReceiver-1073741824] + _ = x[edgeRuntimeFunction-2147483648] + _ = x[edgeSignature-4294967296] + _ = x[edgeStructConversion-8589934592] + _ = x[edgeTestSink-17179869184] + _ = x[edgeTupleElement-34359738368] + _ = x[edgeType-68719476736] + _ = x[edgeTypeName-137438953472] + _ = x[edgeUnderlyingType-274877906944] + _ = x[edgePointerType-549755813888] + _ = x[edgeUnsafeConversion-1099511627776] + _ = x[edgeUsedConstant-2199023255552] + _ = x[edgeVarDecl-4398046511104] +} + +const _edgeKind_name = "edgeAliasedgeBlankFieldedgeAnonymousStructedgeCgoExportededgeConstGroupedgeElementTypeedgeEmbeddedInterfaceedgeExportedConstantedgeExportedFieldedgeExportedFunctionedgeExportedMethodedgeExportedTypeedgeExportedVariableedgeExtendsExportedFieldsedgeExtendsExportedMethodSetedgeFieldAccessedgeFunctionArgumentedgeFunctionResultedgeFunctionSignatureedgeImplementsedgeInstructionOperandedgeInterfaceCalledgeInterfaceMethodedgeKeyTypeedgeLinknameedgeMainFunctionedgeNamedTypeedgeNetRPCRegisteredgeNoCopySentineledgeProvidesMethodedgeReceiveredgeRuntimeFunctionedgeSignatureedgeStructConversionedgeTestSinkedgeTupleElementedgeTypeedgeTypeNameedgeUnderlyingTypeedgePointerTypeedgeUnsafeConversionedgeUsedConstantedgeVarDecl" + +var _edgeKind_map = map[edgeKind]string{ + 1: _edgeKind_name[0:9], + 2: _edgeKind_name[9:23], + 4: _edgeKind_name[23:42], + 8: _edgeKind_name[42:57], + 16: _edgeKind_name[57:71], + 32: _edgeKind_name[71:86], + 64: _edgeKind_name[86:107], + 128: _edgeKind_name[107:127], + 256: _edgeKind_name[127:144], + 512: _edgeKind_name[144:164], + 1024: _edgeKind_name[164:182], + 2048: _edgeKind_name[182:198], + 4096: _edgeKind_name[198:218], + 8192: _edgeKind_name[218:243], + 16384: _edgeKind_name[243:271], + 32768: _edgeKind_name[271:286], + 65536: _edgeKind_name[286:306], + 131072: _edgeKind_name[306:324], + 262144: _edgeKind_name[324:345], + 524288: _edgeKind_name[345:359], + 1048576: _edgeKind_name[359:381], + 2097152: _edgeKind_name[381:398], + 4194304: _edgeKind_name[398:417], + 8388608: _edgeKind_name[417:428], + 16777216: _edgeKind_name[428:440], + 33554432: _edgeKind_name[440:456], + 67108864: _edgeKind_name[456:469], + 134217728: _edgeKind_name[469:487], + 268435456: _edgeKind_name[487:505], + 536870912: _edgeKind_name[505:523], + 1073741824: _edgeKind_name[523:535], + 2147483648: _edgeKind_name[535:554], + 4294967296: _edgeKind_name[554:567], + 8589934592: _edgeKind_name[567:587], + 17179869184: _edgeKind_name[587:599], + 34359738368: _edgeKind_name[599:615], + 68719476736: _edgeKind_name[615:623], + 137438953472: _edgeKind_name[623:635], + 274877906944: _edgeKind_name[635:653], + 549755813888: _edgeKind_name[653:668], + 1099511627776: _edgeKind_name[668:688], + 2199023255552: _edgeKind_name[688:704], + 4398046511104: _edgeKind_name[704:715], +} + +func (i edgeKind) String() string { + if str, ok := _edgeKind_map[i]; ok { + return str + } + return "edgeKind(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/vendor/honnef.co/go/tools/unused/implements.go b/vendor/honnef.co/go/tools/unused/implements.go new file mode 100644 index 000000000..835baac69 --- /dev/null +++ b/vendor/honnef.co/go/tools/unused/implements.go @@ -0,0 +1,82 @@ +package unused + +import "go/types" + +// lookupMethod returns the index of and method with matching package and name, or (-1, nil). +func lookupMethod(T *types.Interface, pkg *types.Package, name string) (int, *types.Func) { + if name != "_" { + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + if sameId(m, pkg, name) { + return i, m + } + } + } + return -1, nil +} + +func sameId(obj types.Object, pkg *types.Package, name string) bool { + // spec: + // "Two identifiers are different if they are spelled differently, + // or if they appear in different packages and are not exported. + // Otherwise, they are the same." + if name != obj.Name() { + return false + } + // obj.Name == name + if obj.Exported() { + return true + } + // not exported, so packages must be the same (pkg == nil for + // fields in Universe scope; this can only happen for types + // introduced via Eval) + if pkg == nil || obj.Pkg() == nil { + return pkg == obj.Pkg() + } + // pkg != nil && obj.pkg != nil + return pkg.Path() == obj.Pkg().Path() +} + +func (g *Graph) implements(V types.Type, T *types.Interface, msV *types.MethodSet) ([]*types.Selection, bool) { + // fast path for common case + if T.Empty() { + return nil, true + } + + if ityp, _ := V.Underlying().(*types.Interface); ityp != nil { + // TODO(dh): is this code reachable? + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + _, obj := lookupMethod(ityp, m.Pkg(), m.Name()) + switch { + case obj == nil: + return nil, false + case !types.Identical(obj.Type(), m.Type()): + return nil, false + } + } + return nil, true + } + + // A concrete type implements T if it implements all methods of T. + var sels []*types.Selection + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + sel := msV.Lookup(m.Pkg(), m.Name()) + if sel == nil { + return nil, false + } + + f, _ := sel.Obj().(*types.Func) + if f == nil { + return nil, false + } + + if !types.Identical(f.Type(), m.Type()) { + return nil, false + } + + sels = append(sels, sel) + } + return sels, true +} diff --git a/vendor/honnef.co/go/tools/unused/unused.go b/vendor/honnef.co/go/tools/unused/unused.go new file mode 100644 index 000000000..152d3692d --- /dev/null +++ b/vendor/honnef.co/go/tools/unused/unused.go @@ -0,0 +1,1964 @@ +package unused + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "io" + "strings" + "sync" + "sync/atomic" + + "golang.org/x/tools/go/analysis" + "honnef.co/go/tools/go/types/typeutil" + "honnef.co/go/tools/internal/passes/buildssa" + "honnef.co/go/tools/lint" + "honnef.co/go/tools/lint/lintdsl" + "honnef.co/go/tools/ssa" +) + +// The graph we construct omits nodes along a path that do not +// contribute any new information to the solution. For example, the +// full graph for a function with a receiver would be Func -> +// Signature -> Var -> Type. However, since signatures cannot be +// unused, and receivers are always considered used, we can compact +// the graph down to Func -> Type. This makes the graph smaller, but +// harder to debug. + +// TODO(dh): conversions between structs mark fields as used, but the +// conversion itself isn't part of that subgraph. even if the function +// containing the conversion is unused, the fields will be marked as +// used. + +// TODO(dh): we cannot observe function calls in assembly files. + +/* + +- packages use: + - (1.1) exported named types (unless in package main) + - (1.2) exported functions (unless in package main) + - (1.3) exported variables (unless in package main) + - (1.4) exported constants (unless in package main) + - (1.5) init functions + - (1.6) functions exported to cgo + - (1.7) the main function iff in the main package + - (1.8) symbols linked via go:linkname + +- named types use: + - (2.1) exported methods + - (2.2) the type they're based on + - (2.3) all their aliases. we can't easily track uses of aliases + because go/types turns them into uses of the aliased types. assume + that if a type is used, so are all of its aliases. + - (2.4) the pointer type. this aids with eagerly implementing + interfaces. if a method that implements an interface is defined on + a pointer receiver, and the pointer type is never used, but the + named type is, then we still want to mark the method as used. + +- variables and constants use: + - their types + +- functions use: + - (4.1) all their arguments, return parameters and receivers + - (4.2) anonymous functions defined beneath them + - (4.3) closures and bound methods. + this implements a simplified model where a function is used merely by being referenced, even if it is never called. + that way we don't have to keep track of closures escaping functions. + - (4.4) functions they return. we assume that someone else will call the returned function + - (4.5) functions/interface methods they call + - types they instantiate or convert to + - (4.7) fields they access + - (4.8) types of all instructions + - (4.9) package-level variables they assign to iff in tests (sinks for benchmarks) + +- conversions use: + - (5.1) when converting between two equivalent structs, the fields in + either struct use each other. the fields are relevant for the + conversion, but only if the fields are also accessed outside the + conversion. + - (5.2) when converting to or from unsafe.Pointer, mark all fields as used. + +- structs use: + - (6.1) fields of type NoCopy sentinel + - (6.2) exported fields + - (6.3) embedded fields that help implement interfaces (either fully implements it, or contributes required methods) (recursively) + - (6.4) embedded fields that have exported methods (recursively) + - (6.5) embedded structs that have exported fields (recursively) + +- (7.1) field accesses use fields +- (7.2) fields use their types + +- (8.0) How we handle interfaces: + - (8.1) We do not technically care about interfaces that only consist of + exported methods. Exported methods on concrete types are always + marked as used. + - Any concrete type implements all known interfaces. Even if it isn't + assigned to any interfaces in our code, the user may receive a value + of the type and expect to pass it back to us through an interface. + + Concrete types use their methods that implement interfaces. If the + type is used, it uses those methods. Otherwise, it doesn't. This + way, types aren't incorrectly marked reachable through the edge + from method to type. + + - (8.3) All interface methods are marked as used, even if they never get + called. This is to accomodate sum types (unexported interface + method that must exist but never gets called.) + + - (8.4) All embedded interfaces are marked as used. This is an + extension of 8.3, but we have to explicitly track embedded + interfaces because in a chain C->B->A, B wouldn't be marked as + used by 8.3 just because it contributes A's methods to C. + +- Inherent uses: + - thunks and other generated wrappers call the real function + - (9.2) variables use their types + - (9.3) types use their underlying and element types + - (9.4) conversions use the type they convert to + - (9.5) instructions use their operands + - (9.6) instructions use their operands' types + - (9.7) variable _reads_ use variables, writes do not, except in tests + - (9.8) runtime functions that may be called from user code via the compiler + + +- const groups: + (10.1) if one constant out of a block of constants is used, mark all + of them used. a lot of the time, unused constants exist for the sake + of completeness. See also + https://github.com/dominikh/go-tools/issues/365 + + +- (11.1) anonymous struct types use all their fields. we cannot + deduplicate struct types, as that leads to order-dependent + reportings. we can't not deduplicate struct types while still + tracking fields, because then each instance of the unnamed type in + the data flow chain will get its own fields, causing false + positives. Thus, we only accurately track fields of named struct + types, and assume that unnamed struct types use all their fields. + + +- Differences in whole program mode: + - (e2) types aim to implement all exported interfaces from all packages + - (e3) exported identifiers aren't automatically used. for fields and + methods this poses extra issues due to reflection. We assume + that all exported fields are used. We also maintain a list of + known reflection-based method callers. + +*/ + +func assert(b bool) { + if !b { + panic("failed assertion") + } +} + +func typString(obj types.Object) string { + switch obj := obj.(type) { + case *types.Func: + return "func" + case *types.Var: + if obj.IsField() { + return "field" + } + return "var" + case *types.Const: + return "const" + case *types.TypeName: + return "type" + default: + return "identifier" + } +} + +// /usr/lib/go/src/runtime/proc.go:433:6: func badmorestackg0 is unused (U1000) + +// Functions defined in the Go runtime that may be called through +// compiler magic or via assembly. +var runtimeFuncs = map[string]bool{ + // The first part of the list is copied from + // cmd/compile/internal/gc/builtin.go, var runtimeDecls + "newobject": true, + "panicindex": true, + "panicslice": true, + "panicdivide": true, + "panicmakeslicelen": true, + "throwinit": true, + "panicwrap": true, + "gopanic": true, + "gorecover": true, + "goschedguarded": true, + "printbool": true, + "printfloat": true, + "printint": true, + "printhex": true, + "printuint": true, + "printcomplex": true, + "printstring": true, + "printpointer": true, + "printiface": true, + "printeface": true, + "printslice": true, + "printnl": true, + "printsp": true, + "printlock": true, + "printunlock": true, + "concatstring2": true, + "concatstring3": true, + "concatstring4": true, + "concatstring5": true, + "concatstrings": true, + "cmpstring": true, + "intstring": true, + "slicebytetostring": true, + "slicebytetostringtmp": true, + "slicerunetostring": true, + "stringtoslicebyte": true, + "stringtoslicerune": true, + "slicecopy": true, + "slicestringcopy": true, + "decoderune": true, + "countrunes": true, + "convI2I": true, + "convT16": true, + "convT32": true, + "convT64": true, + "convTstring": true, + "convTslice": true, + "convT2E": true, + "convT2Enoptr": true, + "convT2I": true, + "convT2Inoptr": true, + "assertE2I": true, + "assertE2I2": true, + "assertI2I": true, + "assertI2I2": true, + "panicdottypeE": true, + "panicdottypeI": true, + "panicnildottype": true, + "ifaceeq": true, + "efaceeq": true, + "fastrand": true, + "makemap64": true, + "makemap": true, + "makemap_small": true, + "mapaccess1": true, + "mapaccess1_fast32": true, + "mapaccess1_fast64": true, + "mapaccess1_faststr": true, + "mapaccess1_fat": true, + "mapaccess2": true, + "mapaccess2_fast32": true, + "mapaccess2_fast64": true, + "mapaccess2_faststr": true, + "mapaccess2_fat": true, + "mapassign": true, + "mapassign_fast32": true, + "mapassign_fast32ptr": true, + "mapassign_fast64": true, + "mapassign_fast64ptr": true, + "mapassign_faststr": true, + "mapiterinit": true, + "mapdelete": true, + "mapdelete_fast32": true, + "mapdelete_fast64": true, + "mapdelete_faststr": true, + "mapiternext": true, + "mapclear": true, + "makechan64": true, + "makechan": true, + "chanrecv1": true, + "chanrecv2": true, + "chansend1": true, + "closechan": true, + "writeBarrier": true, + "typedmemmove": true, + "typedmemclr": true, + "typedslicecopy": true, + "selectnbsend": true, + "selectnbrecv": true, + "selectnbrecv2": true, + "selectsetpc": true, + "selectgo": true, + "block": true, + "makeslice": true, + "makeslice64": true, + "growslice": true, + "memmove": true, + "memclrNoHeapPointers": true, + "memclrHasPointers": true, + "memequal": true, + "memequal8": true, + "memequal16": true, + "memequal32": true, + "memequal64": true, + "memequal128": true, + "int64div": true, + "uint64div": true, + "int64mod": true, + "uint64mod": true, + "float64toint64": true, + "float64touint64": true, + "float64touint32": true, + "int64tofloat64": true, + "uint64tofloat64": true, + "uint32tofloat64": true, + "complex128div": true, + "racefuncenter": true, + "racefuncenterfp": true, + "racefuncexit": true, + "raceread": true, + "racewrite": true, + "racereadrange": true, + "racewriterange": true, + "msanread": true, + "msanwrite": true, + "x86HasPOPCNT": true, + "x86HasSSE41": true, + "arm64HasATOMICS": true, + + // The second part of the list is extracted from assembly code in + // the standard library, with the exception of the runtime package itself + "abort": true, + "aeshashbody": true, + "args": true, + "asminit": true, + "badctxt": true, + "badmcall2": true, + "badmcall": true, + "badmorestackg0": true, + "badmorestackgsignal": true, + "badsignal2": true, + "callbackasm1": true, + "callCfunction": true, + "cgocallback_gofunc": true, + "cgocallbackg": true, + "checkgoarm": true, + "check": true, + "debugCallCheck": true, + "debugCallWrap": true, + "emptyfunc": true, + "entersyscall": true, + "exit": true, + "exits": true, + "exitsyscall": true, + "externalthreadhandler": true, + "findnull": true, + "goexit1": true, + "gostring": true, + "i386_set_ldt": true, + "_initcgo": true, + "init_thread_tls": true, + "ldt0setup": true, + "libpreinit": true, + "load_g": true, + "morestack": true, + "mstart": true, + "nacl_sysinfo": true, + "nanotimeQPC": true, + "nanotime": true, + "newosproc0": true, + "newproc": true, + "newstack": true, + "noted": true, + "nowQPC": true, + "osinit": true, + "printf": true, + "racecallback": true, + "reflectcallmove": true, + "reginit": true, + "rt0_go": true, + "save_g": true, + "schedinit": true, + "setldt": true, + "settls": true, + "sighandler": true, + "sigprofNonGo": true, + "sigtrampgo": true, + "_sigtramp": true, + "sigtramp": true, + "stackcheck": true, + "syscall_chdir": true, + "syscall_chroot": true, + "syscall_close": true, + "syscall_dup2": true, + "syscall_execve": true, + "syscall_exit": true, + "syscall_fcntl": true, + "syscall_forkx": true, + "syscall_gethostname": true, + "syscall_getpid": true, + "syscall_ioctl": true, + "syscall_pipe": true, + "syscall_rawsyscall6": true, + "syscall_rawSyscall6": true, + "syscall_rawsyscall": true, + "syscall_RawSyscall": true, + "syscall_rawsysvicall6": true, + "syscall_setgid": true, + "syscall_setgroups": true, + "syscall_setpgid": true, + "syscall_setsid": true, + "syscall_setuid": true, + "syscall_syscall6": true, + "syscall_syscall": true, + "syscall_Syscall": true, + "syscall_sysvicall6": true, + "syscall_wait4": true, + "syscall_write": true, + "traceback": true, + "tstart": true, + "usplitR0": true, + "wbBufFlush": true, + "write": true, +} + +type pkg struct { + Fset *token.FileSet + Files []*ast.File + Pkg *types.Package + TypesInfo *types.Info + TypesSizes types.Sizes + SSA *ssa.Package + SrcFuncs []*ssa.Function +} + +type Checker struct { + WholeProgram bool + Debug io.Writer + + mu sync.Mutex + initialPackages map[*types.Package]struct{} + allPackages map[*types.Package]struct{} + graph *Graph +} + +func NewChecker(wholeProgram bool) *Checker { + return &Checker{ + initialPackages: map[*types.Package]struct{}{}, + allPackages: map[*types.Package]struct{}{}, + WholeProgram: wholeProgram, + } +} + +func (c *Checker) Analyzer() *analysis.Analyzer { + name := "U1000" + if c.WholeProgram { + name = "U1001" + } + return &analysis.Analyzer{ + Name: name, + Doc: "Unused code", + Run: c.Run, + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + } +} + +func (c *Checker) Run(pass *analysis.Pass) (interface{}, error) { + c.mu.Lock() + if c.graph == nil { + c.graph = NewGraph() + c.graph.wholeProgram = c.WholeProgram + c.graph.fset = pass.Fset + } + + var visit func(pkg *types.Package) + visit = func(pkg *types.Package) { + if _, ok := c.allPackages[pkg]; ok { + return + } + c.allPackages[pkg] = struct{}{} + for _, imp := range pkg.Imports() { + visit(imp) + } + } + visit(pass.Pkg) + + c.initialPackages[pass.Pkg] = struct{}{} + c.mu.Unlock() + + ssapkg := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + pkg := &pkg{ + Fset: pass.Fset, + Files: pass.Files, + Pkg: pass.Pkg, + TypesInfo: pass.TypesInfo, + TypesSizes: pass.TypesSizes, + SSA: ssapkg.Pkg, + SrcFuncs: ssapkg.SrcFuncs, + } + + c.processPkg(c.graph, pkg) + + return nil, nil +} + +func (c *Checker) ProblemObject(fset *token.FileSet, obj types.Object) lint.Problem { + name := obj.Name() + if sig, ok := obj.Type().(*types.Signature); ok && sig.Recv() != nil { + switch sig.Recv().Type().(type) { + case *types.Named, *types.Pointer: + typ := types.TypeString(sig.Recv().Type(), func(*types.Package) string { return "" }) + if len(typ) > 0 && typ[0] == '*' { + name = fmt.Sprintf("(%s).%s", typ, obj.Name()) + } else if len(typ) > 0 { + name = fmt.Sprintf("%s.%s", typ, obj.Name()) + } + } + } + + checkName := "U1000" + if c.WholeProgram { + checkName = "U1001" + } + return lint.Problem{ + Pos: lint.DisplayPosition(fset, obj.Pos()), + Message: fmt.Sprintf("%s %s is unused", typString(obj), name), + Check: checkName, + } +} + +func (c *Checker) Result() []types.Object { + out := c.results() + + out2 := make([]types.Object, 0, len(out)) + for _, v := range out { + if _, ok := c.initialPackages[v.Pkg()]; !ok { + continue + } + out2 = append(out2, v) + } + + return out2 +} + +func (c *Checker) debugf(f string, v ...interface{}) { + if c.Debug != nil { + fmt.Fprintf(c.Debug, f, v...) + } +} + +func (graph *Graph) quieten(node *Node) { + if node.seen { + return + } + switch obj := node.obj.(type) { + case *types.Named: + for i := 0; i < obj.NumMethods(); i++ { + m := obj.Method(i) + if node, ok := graph.nodeMaybe(m); ok { + node.quiet = true + } + } + case *types.Struct: + for i := 0; i < obj.NumFields(); i++ { + if node, ok := graph.nodeMaybe(obj.Field(i)); ok { + node.quiet = true + } + } + case *types.Interface: + for i := 0; i < obj.NumExplicitMethods(); i++ { + m := obj.ExplicitMethod(i) + if node, ok := graph.nodeMaybe(m); ok { + node.quiet = true + } + } + } +} + +func (c *Checker) results() []types.Object { + if c.graph == nil { + // We never analyzed any packages + return nil + } + + var out []types.Object + + if c.WholeProgram { + var ifaces []*types.Interface + var notIfaces []types.Type + + // implement as many interfaces as possible + c.graph.seenTypes.Iterate(func(t types.Type, _ interface{}) { + switch t := t.(type) { + case *types.Interface: + if t.NumMethods() > 0 { + ifaces = append(ifaces, t) + } + default: + if _, ok := t.Underlying().(*types.Interface); !ok { + notIfaces = append(notIfaces, t) + } + } + }) + + for pkg := range c.allPackages { + for _, iface := range interfacesFromExportData(pkg) { + if iface.NumMethods() > 0 { + ifaces = append(ifaces, iface) + } + } + } + + ctx := &context{ + g: c.graph, + seenTypes: &c.graph.seenTypes, + } + // (8.0) handle interfaces + // (e2) types aim to implement all exported interfaces from all packages + for _, t := range notIfaces { + // OPT(dh): it is unfortunate that we do not have access + // to a populated method set at this point. + ms := types.NewMethodSet(t) + for _, iface := range ifaces { + if sels, ok := c.graph.implements(t, iface, ms); ok { + for _, sel := range sels { + c.graph.useMethod(ctx, t, sel, t, edgeImplements) + } + } + } + } + } + + if c.Debug != nil { + debugNode := func(node *Node) { + if node.obj == nil { + c.debugf("n%d [label=\"Root\"];\n", node.id) + } else { + c.debugf("n%d [label=%q];\n", node.id, fmt.Sprintf("(%T) %s", node.obj, node.obj)) + } + for _, e := range node.used { + for i := edgeKind(1); i < 64; i++ { + if e.kind.is(1 << i) { + c.debugf("n%d -> n%d [label=%q];\n", node.id, e.node.id, edgeKind(1< 1 { + cg := &ConstGroup{} + ctx.see(cg) + for _, spec := range specs { + for _, name := range spec.(*ast.ValueSpec).Names { + obj := pkg.TypesInfo.ObjectOf(name) + // (10.1) const groups + ctx.seeAndUse(obj, cg, edgeConstGroup) + ctx.use(cg, obj, edgeConstGroup) + } + } + } + } + case token.VAR: + for _, spec := range n.Specs { + v := spec.(*ast.ValueSpec) + for _, name := range v.Names { + T := pkg.TypesInfo.TypeOf(name) + if fn != nil { + ctx.seeAndUse(T, fn, edgeVarDecl) + } else { + // TODO(dh): we likely want to make + // the type used by the variable, not + // the package containing the + // variable. But then we have to take + // special care of blank identifiers. + ctx.seeAndUse(T, nil, edgeVarDecl) + } + g.typ(ctx, T, nil) + } + } + case token.TYPE: + for _, spec := range n.Specs { + // go/types doesn't provide a way to go from a + // types.Named to the named type it was based on + // (the t1 in type t2 t1). Therefore we walk the + // AST and process GenDecls. + // + // (2.2) named types use the type they're based on + v := spec.(*ast.TypeSpec) + T := pkg.TypesInfo.TypeOf(v.Type) + obj := pkg.TypesInfo.ObjectOf(v.Name) + ctx.see(obj) + ctx.see(T) + ctx.use(T, obj, edgeType) + g.typ(ctx, obj.Type(), nil) + g.typ(ctx, T, nil) + + if v.Assign != 0 { + aliasFor := obj.(*types.TypeName).Type() + // (2.3) named types use all their aliases. we can't easily track uses of aliases + if isIrrelevant(aliasFor) { + // We do not track the type this is an + // alias for (for example builtins), so + // just mark the alias used. + // + // FIXME(dh): what about aliases declared inside functions? + ctx.use(obj, nil, edgeAlias) + } else { + ctx.see(aliasFor) + ctx.seeAndUse(obj, aliasFor, edgeAlias) + } + } + } + } + } + return true + }) + } + + for _, m := range pkg.SSA.Members { + switch m := m.(type) { + case *ssa.NamedConst: + // nothing to do, we collect all constants from Defs + case *ssa.Global: + if m.Object() != nil { + ctx.see(m.Object()) + if g.trackExportedIdentifier(ctx, m.Object()) { + // (1.3) packages use exported variables (unless in package main) + ctx.use(m.Object(), nil, edgeExportedVariable) + } + } + case *ssa.Function: + mObj := owningObject(m) + if mObj != nil { + ctx.see(mObj) + } + //lint:ignore SA9003 handled implicitly + if m.Name() == "init" { + // (1.5) packages use init functions + // + // This is handled implicitly. The generated init + // function has no object, thus everything in it will + // be owned by the package. + } + // This branch catches top-level functions, not methods. + if m.Object() != nil && g.trackExportedIdentifier(ctx, m.Object()) { + // (1.2) packages use exported functions (unless in package main) + ctx.use(mObj, nil, edgeExportedFunction) + } + if m.Name() == "main" && pkg.Pkg.Name() == "main" { + // (1.7) packages use the main function iff in the main package + ctx.use(mObj, nil, edgeMainFunction) + } + if pkg.Pkg.Path() == "runtime" && runtimeFuncs[m.Name()] { + // (9.8) runtime functions that may be called from user code via the compiler + ctx.use(mObj, nil, edgeRuntimeFunction) + } + if m.Syntax() != nil { + doc := m.Syntax().(*ast.FuncDecl).Doc + if doc != nil { + for _, cmt := range doc.List { + if strings.HasPrefix(cmt.Text, "//go:cgo_export_") { + // (1.6) packages use functions exported to cgo + ctx.use(mObj, nil, edgeCgoExported) + } + } + } + } + g.function(ctx, m) + case *ssa.Type: + if m.Object() != nil { + ctx.see(m.Object()) + if g.trackExportedIdentifier(ctx, m.Object()) { + // (1.1) packages use exported named types (unless in package main) + ctx.use(m.Object(), nil, edgeExportedType) + } + } + g.typ(ctx, m.Type(), nil) + default: + panic(fmt.Sprintf("unreachable: %T", m)) + } + } + + if !g.wholeProgram { + // When not in whole program mode we reset seenTypes after each package, + // which means g.seenTypes only contains types of + // interest to us. In whole program mode, we're better off + // processing all interfaces at once, globally, both for + // performance reasons and because in whole program mode we + // actually care about all interfaces, not just the subset + // that has unexported methods. + + var ifaces []*types.Interface + var notIfaces []types.Type + + ctx.seenTypes.Iterate(func(t types.Type, _ interface{}) { + switch t := t.(type) { + case *types.Interface: + // OPT(dh): (8.1) we only need interfaces that have unexported methods + ifaces = append(ifaces, t) + default: + if _, ok := t.Underlying().(*types.Interface); !ok { + notIfaces = append(notIfaces, t) + } + } + }) + + // (8.0) handle interfaces + for _, t := range notIfaces { + ms := pkg.SSA.Prog.MethodSets.MethodSet(t) + for _, iface := range ifaces { + if sels, ok := g.implements(t, iface, ms); ok { + for _, sel := range sels { + g.useMethod(ctx, t, sel, t, edgeImplements) + } + } + } + } + } +} + +func (g *Graph) useMethod(ctx *context, t types.Type, sel *types.Selection, by interface{}, kind edgeKind) { + obj := sel.Obj() + path := sel.Index() + assert(obj != nil) + if len(path) > 1 { + base := lintdsl.Dereference(t).Underlying().(*types.Struct) + for _, idx := range path[:len(path)-1] { + next := base.Field(idx) + // (6.3) structs use embedded fields that help implement interfaces + ctx.see(base) + ctx.seeAndUse(next, base, edgeProvidesMethod) + base, _ = lintdsl.Dereference(next.Type()).Underlying().(*types.Struct) + } + } + ctx.seeAndUse(obj, by, kind) +} + +func owningObject(fn *ssa.Function) types.Object { + if fn.Object() != nil { + return fn.Object() + } + if fn.Parent() != nil { + return owningObject(fn.Parent()) + } + return nil +} + +func (g *Graph) function(ctx *context, fn *ssa.Function) { + if fn.Package() != nil && fn.Package() != ctx.pkg.SSA { + return + } + + name := fn.RelString(nil) + if _, ok := ctx.seenFns[name]; ok { + return + } + ctx.seenFns[name] = struct{}{} + + // (4.1) functions use all their arguments, return parameters and receivers + g.signature(ctx, fn.Signature, owningObject(fn)) + g.instructions(ctx, fn) + for _, anon := range fn.AnonFuncs { + // (4.2) functions use anonymous functions defined beneath them + // + // This fact is expressed implicitly. Anonymous functions have + // no types.Object, so their owner is the surrounding + // function. + g.function(ctx, anon) + } +} + +func (g *Graph) typ(ctx *context, t types.Type, parent types.Type) { + if g.wholeProgram { + g.mu.Lock() + } + if ctx.seenTypes.At(t) != nil { + if g.wholeProgram { + g.mu.Unlock() + } + return + } + if g.wholeProgram { + g.mu.Unlock() + } + if t, ok := t.(*types.Named); ok && t.Obj().Pkg() != nil { + if t.Obj().Pkg() != ctx.pkg.Pkg { + return + } + } + + if g.wholeProgram { + g.mu.Lock() + } + ctx.seenTypes.Set(t, struct{}{}) + if g.wholeProgram { + g.mu.Unlock() + } + if isIrrelevant(t) { + return + } + + ctx.see(t) + switch t := t.(type) { + case *types.Struct: + for i := 0; i < t.NumFields(); i++ { + ctx.see(t.Field(i)) + if t.Field(i).Exported() { + // (6.2) structs use exported fields + ctx.use(t.Field(i), t, edgeExportedField) + } else if t.Field(i).Name() == "_" { + ctx.use(t.Field(i), t, edgeBlankField) + } else if isNoCopyType(t.Field(i).Type()) { + // (6.1) structs use fields of type NoCopy sentinel + ctx.use(t.Field(i), t, edgeNoCopySentinel) + } else if parent == nil { + // (11.1) anonymous struct types use all their fields. + ctx.use(t.Field(i), t, edgeAnonymousStruct) + } + if t.Field(i).Anonymous() { + // (e3) exported identifiers aren't automatically used. + if !g.wholeProgram { + // does the embedded field contribute exported methods to the method set? + T := t.Field(i).Type() + if _, ok := T.Underlying().(*types.Pointer); !ok { + // An embedded field is addressable, so check + // the pointer type to get the full method set + T = types.NewPointer(T) + } + ms := ctx.pkg.SSA.Prog.MethodSets.MethodSet(T) + for j := 0; j < ms.Len(); j++ { + if ms.At(j).Obj().Exported() { + // (6.4) structs use embedded fields that have exported methods (recursively) + ctx.use(t.Field(i), t, edgeExtendsExportedMethodSet) + break + } + } + } + + seen := map[*types.Struct]struct{}{} + var hasExportedField func(t types.Type) bool + hasExportedField = func(T types.Type) bool { + t, ok := lintdsl.Dereference(T).Underlying().(*types.Struct) + if !ok { + return false + } + if _, ok := seen[t]; ok { + return false + } + seen[t] = struct{}{} + for i := 0; i < t.NumFields(); i++ { + field := t.Field(i) + if field.Exported() { + return true + } + if field.Embedded() && hasExportedField(field.Type()) { + return true + } + } + return false + } + // does the embedded field contribute exported fields? + if hasExportedField(t.Field(i).Type()) { + // (6.5) structs use embedded structs that have exported fields (recursively) + ctx.use(t.Field(i), t, edgeExtendsExportedFields) + } + + } + g.variable(ctx, t.Field(i)) + } + case *types.Basic: + // Nothing to do + case *types.Named: + // (9.3) types use their underlying and element types + ctx.seeAndUse(t.Underlying(), t, edgeUnderlyingType) + ctx.seeAndUse(t.Obj(), t, edgeTypeName) + ctx.seeAndUse(t, t.Obj(), edgeNamedType) + + // (2.4) named types use the pointer type + if _, ok := t.Underlying().(*types.Interface); !ok && t.NumMethods() > 0 { + ctx.seeAndUse(types.NewPointer(t), t, edgePointerType) + } + + for i := 0; i < t.NumMethods(); i++ { + ctx.see(t.Method(i)) + // don't use trackExportedIdentifier here, we care about + // all exported methods, even in package main or in tests. + if t.Method(i).Exported() && !g.wholeProgram { + // (2.1) named types use exported methods + ctx.use(t.Method(i), t, edgeExportedMethod) + } + g.function(ctx, ctx.pkg.SSA.Prog.FuncValue(t.Method(i))) + } + + g.typ(ctx, t.Underlying(), t) + case *types.Slice: + // (9.3) types use their underlying and element types + ctx.seeAndUse(t.Elem(), t, edgeElementType) + g.typ(ctx, t.Elem(), nil) + case *types.Map: + // (9.3) types use their underlying and element types + ctx.seeAndUse(t.Elem(), t, edgeElementType) + // (9.3) types use their underlying and element types + ctx.seeAndUse(t.Key(), t, edgeKeyType) + g.typ(ctx, t.Elem(), nil) + g.typ(ctx, t.Key(), nil) + case *types.Signature: + g.signature(ctx, t, nil) + case *types.Interface: + for i := 0; i < t.NumMethods(); i++ { + m := t.Method(i) + // (8.3) All interface methods are marked as used + ctx.seeAndUse(m, t, edgeInterfaceMethod) + ctx.seeAndUse(m.Type().(*types.Signature), m, edgeSignature) + g.signature(ctx, m.Type().(*types.Signature), nil) + } + for i := 0; i < t.NumEmbeddeds(); i++ { + tt := t.EmbeddedType(i) + // (8.4) All embedded interfaces are marked as used + ctx.seeAndUse(tt, t, edgeEmbeddedInterface) + } + case *types.Array: + // (9.3) types use their underlying and element types + ctx.seeAndUse(t.Elem(), t, edgeElementType) + g.typ(ctx, t.Elem(), nil) + case *types.Pointer: + // (9.3) types use their underlying and element types + ctx.seeAndUse(t.Elem(), t, edgeElementType) + g.typ(ctx, t.Elem(), nil) + case *types.Chan: + // (9.3) types use their underlying and element types + ctx.seeAndUse(t.Elem(), t, edgeElementType) + g.typ(ctx, t.Elem(), nil) + case *types.Tuple: + for i := 0; i < t.Len(); i++ { + // (9.3) types use their underlying and element types + ctx.seeAndUse(t.At(i).Type(), t, edgeTupleElement|edgeType) + g.typ(ctx, t.At(i).Type(), nil) + } + default: + panic(fmt.Sprintf("unreachable: %T", t)) + } +} + +func (g *Graph) variable(ctx *context, v *types.Var) { + // (9.2) variables use their types + ctx.seeAndUse(v.Type(), v, edgeType) + g.typ(ctx, v.Type(), nil) +} + +func (g *Graph) signature(ctx *context, sig *types.Signature, fn types.Object) { + var user interface{} = fn + if fn == nil { + user = sig + ctx.see(sig) + } + if sig.Recv() != nil { + ctx.seeAndUse(sig.Recv().Type(), user, edgeReceiver|edgeType) + g.typ(ctx, sig.Recv().Type(), nil) + } + for i := 0; i < sig.Params().Len(); i++ { + param := sig.Params().At(i) + ctx.seeAndUse(param.Type(), user, edgeFunctionArgument|edgeType) + g.typ(ctx, param.Type(), nil) + } + for i := 0; i < sig.Results().Len(); i++ { + param := sig.Results().At(i) + ctx.seeAndUse(param.Type(), user, edgeFunctionResult|edgeType) + g.typ(ctx, param.Type(), nil) + } +} + +func (g *Graph) instructions(ctx *context, fn *ssa.Function) { + fnObj := owningObject(fn) + for _, b := range fn.Blocks { + for _, instr := range b.Instrs { + ops := instr.Operands(nil) + switch instr.(type) { + case *ssa.Store: + // (9.7) variable _reads_ use variables, writes do not + ops = ops[1:] + case *ssa.DebugRef: + ops = nil + } + for _, arg := range ops { + walkPhi(*arg, func(v ssa.Value) { + switch v := v.(type) { + case *ssa.Function: + // (4.3) functions use closures and bound methods. + // (4.5) functions use functions they call + // (9.5) instructions use their operands + // (4.4) functions use functions they return. we assume that someone else will call the returned function + if owningObject(v) != nil { + ctx.seeAndUse(owningObject(v), fnObj, edgeInstructionOperand) + } + g.function(ctx, v) + case *ssa.Const: + // (9.6) instructions use their operands' types + ctx.seeAndUse(v.Type(), fnObj, edgeType) + g.typ(ctx, v.Type(), nil) + case *ssa.Global: + if v.Object() != nil { + // (9.5) instructions use their operands + ctx.seeAndUse(v.Object(), fnObj, edgeInstructionOperand) + } + } + }) + } + if v, ok := instr.(ssa.Value); ok { + if _, ok := v.(*ssa.Range); !ok { + // See https://github.com/golang/go/issues/19670 + + // (4.8) instructions use their types + // (9.4) conversions use the type they convert to + ctx.seeAndUse(v.Type(), fnObj, edgeType) + g.typ(ctx, v.Type(), nil) + } + } + switch instr := instr.(type) { + case *ssa.Field: + st := instr.X.Type().Underlying().(*types.Struct) + field := st.Field(instr.Field) + // (4.7) functions use fields they access + ctx.seeAndUse(field, fnObj, edgeFieldAccess) + case *ssa.FieldAddr: + st := lintdsl.Dereference(instr.X.Type()).Underlying().(*types.Struct) + field := st.Field(instr.Field) + // (4.7) functions use fields they access + ctx.seeAndUse(field, fnObj, edgeFieldAccess) + case *ssa.Store: + // nothing to do, handled generically by operands + case *ssa.Call: + c := instr.Common() + if !c.IsInvoke() { + // handled generically as an instruction operand + + if g.wholeProgram { + // (e3) special case known reflection-based method callers + switch lintdsl.CallName(c) { + case "net/rpc.Register", "net/rpc.RegisterName", "(*net/rpc.Server).Register", "(*net/rpc.Server).RegisterName": + var arg ssa.Value + switch lintdsl.CallName(c) { + case "net/rpc.Register": + arg = c.Args[0] + case "net/rpc.RegisterName": + arg = c.Args[1] + case "(*net/rpc.Server).Register": + arg = c.Args[1] + case "(*net/rpc.Server).RegisterName": + arg = c.Args[2] + } + walkPhi(arg, func(v ssa.Value) { + if v, ok := v.(*ssa.MakeInterface); ok { + walkPhi(v.X, func(vv ssa.Value) { + ms := ctx.pkg.SSA.Prog.MethodSets.MethodSet(vv.Type()) + for i := 0; i < ms.Len(); i++ { + if ms.At(i).Obj().Exported() { + g.useMethod(ctx, vv.Type(), ms.At(i), fnObj, edgeNetRPCRegister) + } + } + }) + } + }) + } + } + } else { + // (4.5) functions use functions/interface methods they call + ctx.seeAndUse(c.Method, fnObj, edgeInterfaceCall) + } + case *ssa.Return: + // nothing to do, handled generically by operands + case *ssa.ChangeType: + // conversion type handled generically + + s1, ok1 := lintdsl.Dereference(instr.Type()).Underlying().(*types.Struct) + s2, ok2 := lintdsl.Dereference(instr.X.Type()).Underlying().(*types.Struct) + if ok1 && ok2 { + // Converting between two structs. The fields are + // relevant for the conversion, but only if the + // fields are also used outside of the conversion. + // Mark fields as used by each other. + + assert(s1.NumFields() == s2.NumFields()) + for i := 0; i < s1.NumFields(); i++ { + ctx.see(s1.Field(i)) + ctx.see(s2.Field(i)) + // (5.1) when converting between two equivalent structs, the fields in + // either struct use each other. the fields are relevant for the + // conversion, but only if the fields are also accessed outside the + // conversion. + ctx.seeAndUse(s1.Field(i), s2.Field(i), edgeStructConversion) + ctx.seeAndUse(s2.Field(i), s1.Field(i), edgeStructConversion) + } + } + case *ssa.MakeInterface: + // nothing to do, handled generically by operands + case *ssa.Slice: + // nothing to do, handled generically by operands + case *ssa.RunDefers: + // nothing to do, the deferred functions are already marked use by defering them. + case *ssa.Convert: + // to unsafe.Pointer + if typ, ok := instr.Type().(*types.Basic); ok && typ.Kind() == types.UnsafePointer { + if ptr, ok := instr.X.Type().Underlying().(*types.Pointer); ok { + if st, ok := ptr.Elem().Underlying().(*types.Struct); ok { + for i := 0; i < st.NumFields(); i++ { + // (5.2) when converting to or from unsafe.Pointer, mark all fields as used. + ctx.seeAndUse(st.Field(i), fnObj, edgeUnsafeConversion) + } + } + } + } + // from unsafe.Pointer + if typ, ok := instr.X.Type().(*types.Basic); ok && typ.Kind() == types.UnsafePointer { + if ptr, ok := instr.Type().Underlying().(*types.Pointer); ok { + if st, ok := ptr.Elem().Underlying().(*types.Struct); ok { + for i := 0; i < st.NumFields(); i++ { + // (5.2) when converting to or from unsafe.Pointer, mark all fields as used. + ctx.seeAndUse(st.Field(i), fnObj, edgeUnsafeConversion) + } + } + } + } + case *ssa.TypeAssert: + // nothing to do, handled generically by instruction + // type (possibly a tuple, which contains the asserted + // to type). redundantly handled by the type of + // ssa.Extract, too + case *ssa.MakeClosure: + // nothing to do, handled generically by operands + case *ssa.Alloc: + // nothing to do + case *ssa.UnOp: + // nothing to do + case *ssa.BinOp: + // nothing to do + case *ssa.If: + // nothing to do + case *ssa.Jump: + // nothing to do + case *ssa.IndexAddr: + // nothing to do + case *ssa.Extract: + // nothing to do + case *ssa.Panic: + // nothing to do + case *ssa.DebugRef: + // nothing to do + case *ssa.BlankStore: + // nothing to do + case *ssa.Phi: + // nothing to do + case *ssa.MakeMap: + // nothing to do + case *ssa.MapUpdate: + // nothing to do + case *ssa.Lookup: + // nothing to do + case *ssa.MakeSlice: + // nothing to do + case *ssa.Send: + // nothing to do + case *ssa.MakeChan: + // nothing to do + case *ssa.Range: + // nothing to do + case *ssa.Next: + // nothing to do + case *ssa.Index: + // nothing to do + case *ssa.Select: + // nothing to do + case *ssa.ChangeInterface: + // nothing to do + case *ssa.Go: + // nothing to do, handled generically by operands + case *ssa.Defer: + // nothing to do, handled generically by operands + default: + panic(fmt.Sprintf("unreachable: %T", instr)) + } + } + } +} + +// isNoCopyType reports whether a type represents the NoCopy sentinel +// type. The NoCopy type is a named struct with no fields and exactly +// one method `func Lock()` that is empty. +// +// FIXME(dh): currently we're not checking that the function body is +// empty. +func isNoCopyType(typ types.Type) bool { + st, ok := typ.Underlying().(*types.Struct) + if !ok { + return false + } + if st.NumFields() != 0 { + return false + } + + named, ok := typ.(*types.Named) + if !ok { + return false + } + if named.NumMethods() != 1 { + return false + } + meth := named.Method(0) + if meth.Name() != "Lock" { + return false + } + sig := meth.Type().(*types.Signature) + if sig.Params().Len() != 0 || sig.Results().Len() != 0 { + return false + } + return true +} + +func walkPhi(v ssa.Value, fn func(v ssa.Value)) { + phi, ok := v.(*ssa.Phi) + if !ok { + fn(v) + return + } + + seen := map[ssa.Value]struct{}{} + var impl func(v *ssa.Phi) + impl = func(v *ssa.Phi) { + if _, ok := seen[v]; ok { + return + } + seen[v] = struct{}{} + for _, e := range v.Edges { + if ev, ok := e.(*ssa.Phi); ok { + impl(ev) + } else { + fn(e) + } + } + } + impl(phi) +} + +func interfacesFromExportData(pkg *types.Package) []*types.Interface { + var out []*types.Interface + scope := pkg.Scope() + for _, name := range scope.Names() { + obj := scope.Lookup(name) + out = append(out, interfacesFromObject(obj)...) + } + return out +} + +func interfacesFromObject(obj types.Object) []*types.Interface { + var out []*types.Interface + switch obj := obj.(type) { + case *types.Func: + sig := obj.Type().(*types.Signature) + for i := 0; i < sig.Results().Len(); i++ { + out = append(out, interfacesFromObject(sig.Results().At(i))...) + } + for i := 0; i < sig.Params().Len(); i++ { + out = append(out, interfacesFromObject(sig.Params().At(i))...) + } + case *types.TypeName: + if named, ok := obj.Type().(*types.Named); ok { + for i := 0; i < named.NumMethods(); i++ { + out = append(out, interfacesFromObject(named.Method(i))...) + } + + if iface, ok := named.Underlying().(*types.Interface); ok { + out = append(out, iface) + } + } + case *types.Var: + // No call to Underlying here. We want unnamed interfaces + // only. Named interfaces are gotten directly from the + // package's scope. + if iface, ok := obj.Type().(*types.Interface); ok { + out = append(out, iface) + } + case *types.Const: + case *types.Builtin: + default: + panic(fmt.Sprintf("unhandled type: %T", obj)) + } + return out +} diff --git a/vendor/honnef.co/go/tools/version/buildinfo.go b/vendor/honnef.co/go/tools/version/buildinfo.go new file mode 100644 index 000000000..b6034bb7d --- /dev/null +++ b/vendor/honnef.co/go/tools/version/buildinfo.go @@ -0,0 +1,46 @@ +// +build go1.12 + +package version + +import ( + "fmt" + "runtime/debug" +) + +func printBuildInfo() { + if info, ok := debug.ReadBuildInfo(); ok { + fmt.Println("Main module:") + printModule(&info.Main) + fmt.Println("Dependencies:") + for _, dep := range info.Deps { + printModule(dep) + } + } else { + fmt.Println("Built without Go modules") + } +} + +func buildInfoVersion() (string, bool) { + info, ok := debug.ReadBuildInfo() + if !ok { + return "", false + } + if info.Main.Version == "(devel)" { + return "", false + } + return info.Main.Version, true +} + +func printModule(m *debug.Module) { + fmt.Printf("\t%s", m.Path) + if m.Version != "(devel)" { + fmt.Printf("@%s", m.Version) + } + if m.Sum != "" { + fmt.Printf(" (sum: %s)", m.Sum) + } + if m.Replace != nil { + fmt.Printf(" (replace: %s)", m.Replace.Path) + } + fmt.Println() +} diff --git a/vendor/honnef.co/go/tools/version/buildinfo111.go b/vendor/honnef.co/go/tools/version/buildinfo111.go new file mode 100644 index 000000000..06aae1e65 --- /dev/null +++ b/vendor/honnef.co/go/tools/version/buildinfo111.go @@ -0,0 +1,6 @@ +// +build !go1.12 + +package version + +func printBuildInfo() {} +func buildInfoVersion() (string, bool) { return "", false } diff --git a/vendor/honnef.co/go/tools/version/version.go b/vendor/honnef.co/go/tools/version/version.go new file mode 100644 index 000000000..468e8efd6 --- /dev/null +++ b/vendor/honnef.co/go/tools/version/version.go @@ -0,0 +1,42 @@ +package version + +import ( + "fmt" + "os" + "path/filepath" + "runtime" +) + +const Version = "2019.2.3" + +// version returns a version descriptor and reports whether the +// version is a known release. +func version() (string, bool) { + if Version != "devel" { + return Version, true + } + v, ok := buildInfoVersion() + if ok { + return v, false + } + return "devel", false +} + +func Print() { + v, release := version() + + if release { + fmt.Printf("%s %s\n", filepath.Base(os.Args[0]), v) + } else if v == "devel" { + fmt.Printf("%s (no version)\n", filepath.Base(os.Args[0])) + } else { + fmt.Printf("%s (devel, %s)\n", filepath.Base(os.Args[0]), v) + } +} + +func Verbose() { + Print() + fmt.Println() + fmt.Println("Compiled with Go version:", runtime.Version()) + printBuildInfo() +} diff --git a/vendor/k8s.io/api/admission/v1beta1/generated.pb.go b/vendor/k8s.io/api/admission/v1beta1/generated.pb.go index 4082082ff..10d3bead6 100644 --- a/vendor/k8s.io/api/admission/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/admission/v1beta1/generated.pb.go @@ -17,33 +17,24 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/admission/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/admission/v1beta1/generated.proto - - It has these top-level messages: - AdmissionRequest - AdmissionResponse - AdmissionReview -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + io "io" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import strings "strings" -import reflect "reflect" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import io "io" + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -56,27 +47,166 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AdmissionRequest) Reset() { *m = AdmissionRequest{} } -func (*AdmissionRequest) ProtoMessage() {} -func (*AdmissionRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AdmissionRequest) Reset() { *m = AdmissionRequest{} } +func (*AdmissionRequest) ProtoMessage() {} +func (*AdmissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b87c2352de86eab9, []int{0} +} +func (m *AdmissionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionRequest.Merge(m, src) +} +func (m *AdmissionRequest) XXX_Size() int { + return m.Size() +} +func (m *AdmissionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionRequest proto.InternalMessageInfo + +func (m *AdmissionResponse) Reset() { *m = AdmissionResponse{} } +func (*AdmissionResponse) ProtoMessage() {} +func (*AdmissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b87c2352de86eab9, []int{1} +} +func (m *AdmissionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionResponse.Merge(m, src) +} +func (m *AdmissionResponse) XXX_Size() int { + return m.Size() +} +func (m *AdmissionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionResponse proto.InternalMessageInfo -func (m *AdmissionResponse) Reset() { *m = AdmissionResponse{} } -func (*AdmissionResponse) ProtoMessage() {} -func (*AdmissionResponse) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *AdmissionReview) Reset() { *m = AdmissionReview{} } +func (*AdmissionReview) ProtoMessage() {} +func (*AdmissionReview) Descriptor() ([]byte, []int) { + return fileDescriptor_b87c2352de86eab9, []int{2} +} +func (m *AdmissionReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionReview.Merge(m, src) +} +func (m *AdmissionReview) XXX_Size() int { + return m.Size() +} +func (m *AdmissionReview) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionReview.DiscardUnknown(m) +} -func (m *AdmissionReview) Reset() { *m = AdmissionReview{} } -func (*AdmissionReview) ProtoMessage() {} -func (*AdmissionReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_AdmissionReview proto.InternalMessageInfo func init() { proto.RegisterType((*AdmissionRequest)(nil), "k8s.io.api.admission.v1beta1.AdmissionRequest") proto.RegisterType((*AdmissionResponse)(nil), "k8s.io.api.admission.v1beta1.AdmissionResponse") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.admission.v1beta1.AdmissionResponse.AuditAnnotationsEntry") proto.RegisterType((*AdmissionReview)(nil), "k8s.io.api.admission.v1beta1.AdmissionReview") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admission/v1beta1/generated.proto", fileDescriptor_b87c2352de86eab9) +} + +var fileDescriptor_b87c2352de86eab9 = []byte{ + // 902 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xd6, 0x8e, 0xed, 0x1d, 0x87, 0xda, 0x9d, 0x82, 0xb4, 0xb2, 0xd0, 0xda, 0xe4, 0x80, + 0x82, 0xd4, 0xcc, 0x92, 0x08, 0xaa, 0xa8, 0xe2, 0x92, 0x25, 0x11, 0x0a, 0x48, 0x4d, 0x34, 0xad, + 0x51, 0xe1, 0x80, 0x34, 0xf6, 0x4e, 0xed, 0xc5, 0xf6, 0xcc, 0xb2, 0x33, 0xeb, 0xe0, 0x1b, 0xe2, + 0xca, 0x85, 0x6f, 0xc0, 0x87, 0xe1, 0x92, 0x63, 0x8f, 0x3d, 0x59, 0xc4, 0x7c, 0x8b, 0x9c, 0xd0, + 0xcc, 0xce, 0x7a, 0xb7, 0x76, 0x02, 0xfd, 0xc3, 0xc9, 0xf3, 0xfe, 0xfc, 0x7e, 0xef, 0xf9, 0xf7, + 0x76, 0xde, 0x80, 0x93, 0xf1, 0xa1, 0x40, 0x21, 0xf7, 0xc6, 0x49, 0x9f, 0xc6, 0x8c, 0x4a, 0x2a, + 0xbc, 0x19, 0x65, 0x01, 0x8f, 0x3d, 0x13, 0x20, 0x51, 0xe8, 0x91, 0x60, 0x1a, 0x0a, 0x11, 0x72, + 0xe6, 0xcd, 0xf6, 0xfb, 0x54, 0x92, 0x7d, 0x6f, 0x48, 0x19, 0x8d, 0x89, 0xa4, 0x01, 0x8a, 0x62, + 0x2e, 0x39, 0xfc, 0x30, 0xcd, 0x46, 0x24, 0x0a, 0xd1, 0x2a, 0x1b, 0x99, 0xec, 0xf6, 0xde, 0x30, + 0x94, 0xa3, 0xa4, 0x8f, 0x06, 0x7c, 0xea, 0x0d, 0xf9, 0x90, 0x7b, 0x1a, 0xd4, 0x4f, 0x9e, 0x6b, + 0x4b, 0x1b, 0xfa, 0x94, 0x92, 0xb5, 0x1f, 0x14, 0x4b, 0x27, 0x72, 0x44, 0x99, 0x0c, 0x07, 0x44, + 0xa6, 0xf5, 0xd7, 0x4b, 0xb7, 0x3f, 0xcb, 0xb3, 0xa7, 0x64, 0x30, 0x0a, 0x19, 0x8d, 0xe7, 0x5e, + 0x34, 0x1e, 0x2a, 0x87, 0xf0, 0xa6, 0x54, 0x92, 0x9b, 0x50, 0xde, 0x6d, 0xa8, 0x38, 0x61, 0x32, + 0x9c, 0xd2, 0x0d, 0xc0, 0xc3, 0xff, 0x02, 0x88, 0xc1, 0x88, 0x4e, 0xc9, 0x3a, 0x6e, 0xe7, 0x0f, + 0x1b, 0xb4, 0x8e, 0x32, 0x45, 0x30, 0xfd, 0x29, 0xa1, 0x42, 0x42, 0x1f, 0x94, 0x93, 0x30, 0x70, + 0xac, 0xae, 0xb5, 0x6b, 0xfb, 0x9f, 0x5e, 0x2e, 0x3a, 0xa5, 0xe5, 0xa2, 0x53, 0xee, 0x9d, 0x1e, + 0x5f, 0x2f, 0x3a, 0x1f, 0xdd, 0x56, 0x48, 0xce, 0x23, 0x2a, 0x50, 0xef, 0xf4, 0x18, 0x2b, 0x30, + 0x7c, 0x06, 0x2a, 0xe3, 0x90, 0x05, 0xce, 0x9d, 0xae, 0xb5, 0xdb, 0x38, 0x78, 0x88, 0xf2, 0x09, + 0xac, 0x60, 0x28, 0x1a, 0x0f, 0x95, 0x43, 0x20, 0x25, 0x03, 0x9a, 0xed, 0xa3, 0xaf, 0x62, 0x9e, + 0x44, 0xdf, 0xd2, 0x58, 0x35, 0xf3, 0x4d, 0xc8, 0x02, 0x7f, 0xdb, 0x14, 0xaf, 0x28, 0x0b, 0x6b, + 0x46, 0x38, 0x02, 0xf5, 0x98, 0x0a, 0x9e, 0xc4, 0x03, 0xea, 0x94, 0x35, 0xfb, 0xa3, 0x37, 0x67, + 0xc7, 0x86, 0xc1, 0x6f, 0x99, 0x0a, 0xf5, 0xcc, 0x83, 0x57, 0xec, 0xf0, 0x73, 0xd0, 0x10, 0x49, + 0x3f, 0x0b, 0x38, 0x15, 0xad, 0xc7, 0x7d, 0x03, 0x68, 0x3c, 0xc9, 0x43, 0xb8, 0x98, 0x07, 0x43, + 0xd0, 0x88, 0x53, 0x25, 0x55, 0xd7, 0xce, 0x7b, 0xef, 0xa4, 0x40, 0x53, 0x95, 0xc2, 0x39, 0x1d, + 0x2e, 0x72, 0xc3, 0x39, 0x68, 0x1a, 0x73, 0xd5, 0xe5, 0xdd, 0x77, 0x96, 0xe4, 0xfe, 0x72, 0xd1, + 0x69, 0xe2, 0x57, 0x69, 0xf1, 0x7a, 0x1d, 0xf8, 0x35, 0x80, 0xc6, 0x55, 0x10, 0xc2, 0x69, 0x6a, + 0x8d, 0xda, 0x46, 0x23, 0x88, 0x37, 0x32, 0xf0, 0x0d, 0x28, 0xd8, 0x05, 0x15, 0x46, 0xa6, 0xd4, + 0xd9, 0xd2, 0xe8, 0xd5, 0xd0, 0x1f, 0x93, 0x29, 0xc5, 0x3a, 0x02, 0x3d, 0x60, 0xab, 0x5f, 0x11, + 0x91, 0x01, 0x75, 0xaa, 0x3a, 0xed, 0x9e, 0x49, 0xb3, 0x1f, 0x67, 0x01, 0x9c, 0xe7, 0xc0, 0x2f, + 0x80, 0xcd, 0x23, 0xf5, 0xa9, 0x87, 0x9c, 0x39, 0x35, 0x0d, 0x70, 0x33, 0xc0, 0x59, 0x16, 0xb8, + 0x2e, 0x1a, 0x38, 0x07, 0xc0, 0xa7, 0xa0, 0x9e, 0x08, 0x1a, 0x9f, 0xb2, 0xe7, 0xdc, 0xa9, 0x6b, + 0x41, 0x3f, 0x46, 0xc5, 0x1d, 0xf2, 0xca, 0xb5, 0x57, 0x42, 0xf6, 0x4c, 0x76, 0xfe, 0x3d, 0x65, + 0x1e, 0xbc, 0x62, 0x82, 0x3d, 0x50, 0xe5, 0xfd, 0x1f, 0xe9, 0x40, 0x3a, 0xb6, 0xe6, 0xdc, 0xbb, + 0x75, 0x48, 0xe6, 0xd6, 0x22, 0x4c, 0x2e, 0x4e, 0x7e, 0x96, 0x94, 0xa9, 0xf9, 0xf8, 0x77, 0x0d, + 0x75, 0xf5, 0x4c, 0x93, 0x60, 0x43, 0x06, 0x7f, 0x00, 0x36, 0x9f, 0x04, 0xa9, 0xd3, 0x01, 0x6f, + 0xc3, 0xbc, 0x92, 0xf2, 0x2c, 0xe3, 0xc1, 0x39, 0x25, 0xdc, 0x01, 0xd5, 0x20, 0x9e, 0xe3, 0x84, + 0x39, 0x8d, 0xae, 0xb5, 0x5b, 0xf7, 0x81, 0xea, 0xe1, 0x58, 0x7b, 0xb0, 0x89, 0xc0, 0x67, 0xa0, + 0xc6, 0x23, 0x25, 0x86, 0x70, 0xb6, 0xdf, 0xa6, 0x83, 0xa6, 0xe9, 0xa0, 0x76, 0x96, 0xb2, 0xe0, + 0x8c, 0x6e, 0xe7, 0xd7, 0x0a, 0xb8, 0x57, 0xd8, 0x50, 0x22, 0xe2, 0x4c, 0xd0, 0xff, 0x65, 0x45, + 0x7d, 0x02, 0x6a, 0x64, 0x32, 0xe1, 0x17, 0x34, 0xdd, 0x52, 0xf5, 0xbc, 0x89, 0xa3, 0xd4, 0x8d, + 0xb3, 0x38, 0x3c, 0x07, 0x55, 0x21, 0x89, 0x4c, 0x84, 0xd9, 0x38, 0x0f, 0x5e, 0xef, 0x7a, 0x3d, + 0xd1, 0x98, 0x54, 0x30, 0x4c, 0x45, 0x32, 0x91, 0xd8, 0xf0, 0xc0, 0x0e, 0xd8, 0x8a, 0x88, 0x1c, + 0x8c, 0xf4, 0x56, 0xd9, 0xf6, 0xed, 0xe5, 0xa2, 0xb3, 0x75, 0xae, 0x1c, 0x38, 0xf5, 0xc3, 0x43, + 0x60, 0xeb, 0xc3, 0xd3, 0x79, 0x94, 0x5d, 0x8c, 0xb6, 0x1a, 0xd1, 0x79, 0xe6, 0xbc, 0x2e, 0x1a, + 0x38, 0x4f, 0x86, 0xbf, 0x59, 0xa0, 0x45, 0x92, 0x20, 0x94, 0x47, 0x8c, 0x71, 0x49, 0xd2, 0xa9, + 0x54, 0xbb, 0xe5, 0xdd, 0xc6, 0xc1, 0x09, 0xfa, 0xb7, 0x97, 0x10, 0x6d, 0xe8, 0x8c, 0x8e, 0xd6, + 0x78, 0x4e, 0x98, 0x8c, 0xe7, 0xbe, 0x63, 0x84, 0x6a, 0xad, 0x87, 0xf1, 0x46, 0xe1, 0xf6, 0x97, + 0xe0, 0x83, 0x1b, 0x49, 0x60, 0x0b, 0x94, 0xc7, 0x74, 0x9e, 0x8e, 0x10, 0xab, 0x23, 0x7c, 0x1f, + 0x6c, 0xcd, 0xc8, 0x24, 0xa1, 0x7a, 0x1c, 0x36, 0x4e, 0x8d, 0x47, 0x77, 0x0e, 0xad, 0x9d, 0x3f, + 0x2d, 0xd0, 0x2c, 0x34, 0x37, 0x0b, 0xe9, 0x05, 0xec, 0x81, 0x9a, 0x59, 0x25, 0x9a, 0xa3, 0x71, + 0x80, 0x5e, 0xfb, 0xcf, 0x69, 0x94, 0xdf, 0x50, 0xa3, 0xce, 0xf6, 0x5c, 0xc6, 0x05, 0xbf, 0xd3, + 0xcf, 0x8b, 0xfe, 0xf7, 0xe6, 0xf1, 0xf2, 0xde, 0x50, 0x34, 0x7f, 0xdb, 0xbc, 0x27, 0xda, 0xc2, + 0x2b, 0x3a, 0x7f, 0xef, 0xf2, 0xca, 0x2d, 0xbd, 0xb8, 0x72, 0x4b, 0x2f, 0xaf, 0xdc, 0xd2, 0x2f, + 0x4b, 0xd7, 0xba, 0x5c, 0xba, 0xd6, 0x8b, 0xa5, 0x6b, 0xbd, 0x5c, 0xba, 0xd6, 0x5f, 0x4b, 0xd7, + 0xfa, 0xfd, 0x6f, 0xb7, 0xf4, 0x7d, 0xcd, 0x10, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x8b, 0xd1, + 0x27, 0x74, 0xfd, 0x08, 0x00, 0x00, +} + func (m *AdmissionRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -84,87 +214,146 @@ func (m *AdmissionRequest) Marshal() (dAtA []byte, err error) { } func (m *AdmissionRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Kind.Size())) - n1, err := m.Kind.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n2, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubResource))) - i += copy(dAtA[i:], m.SubResource) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) - i += copy(dAtA[i:], m.Operation) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UserInfo.Size())) - n3, err := m.UserInfo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.RequestSubResource) + copy(dAtA[i:], m.RequestSubResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestSubResource))) + i-- + dAtA[i] = 0x7a + if m.RequestResource != nil { + { + size, err := m.RequestResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 } - i += n3 - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n4, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.RequestKind != nil { + { + size, err := m.RequestKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a } - i += n4 - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.OldObject.Size())) - n5, err := m.OldObject.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- + dAtA[i] = 0x62 if m.DryRun != nil { - dAtA[i] = 0x58 - i++ + i-- if *m.DryRun { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x58 + } + { + size, err := m.OldObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + { + size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x42 + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0x3a + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x32 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x2a + i -= len(m.SubResource) + copy(dAtA[i:], m.SubResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubResource))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Kind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AdmissionResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -172,73 +361,85 @@ func (m *AdmissionResponse) Marshal() (dAtA []byte, err error) { } func (m *AdmissionResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x10 - i++ - if m.Allowed { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.Result != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) - n6, err := m.Result.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - if m.Patch != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Patch))) - i += copy(dAtA[i:], m.Patch) - } - if m.PatchType != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PatchType))) - i += copy(dAtA[i:], *m.PatchType) - } if len(m.AuditAnnotations) > 0 { keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations)) for k := range m.AuditAnnotations { keysForAuditAnnotations = append(keysForAuditAnnotations, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) - for _, k := range keysForAuditAnnotations { - dAtA[i] = 0x32 - i++ - v := m.AuditAnnotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ + for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + i-- + dAtA[i] = 0x12 + i -= len(keysForAuditAnnotations[iNdEx]) + copy(dAtA[i:], keysForAuditAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 } } - return i, nil + if m.PatchType != nil { + i -= len(*m.PatchType) + copy(dAtA[i:], *m.PatchType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PatchType))) + i-- + dAtA[i] = 0x2a + } + if m.Patch != nil { + i -= len(m.Patch) + copy(dAtA[i:], m.Patch) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Patch))) + i-- + dAtA[i] = 0x22 + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i-- + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AdmissionReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -246,43 +447,57 @@ func (m *AdmissionReview) Marshal() (dAtA []byte, err error) { } func (m *AdmissionReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Request != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Request.Size())) - n7, err := m.Request.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Response.Size())) - n8, err := m.Response.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Request != nil { + { + size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AdmissionRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.UID) @@ -308,10 +523,25 @@ func (m *AdmissionRequest) Size() (n int) { if m.DryRun != nil { n += 2 } + l = m.Options.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.RequestKind != nil { + l = m.RequestKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RequestResource != nil { + l = m.RequestResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.RequestSubResource) + n += 1 + l + sovGenerated(uint64(l)) return n } func (m *AdmissionResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.UID) @@ -341,6 +571,9 @@ func (m *AdmissionResponse) Size() (n int) { } func (m *AdmissionReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Request != nil { @@ -355,14 +588,7 @@ func (m *AdmissionReview) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -373,16 +599,20 @@ func (this *AdmissionRequest) String() string { } s := strings.Join([]string{`&AdmissionRequest{`, `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `Kind:` + strings.Replace(strings.Replace(this.Kind.String(), "GroupVersionKind", "k8s_io_apimachinery_pkg_apis_meta_v1.GroupVersionKind", 1), `&`, ``, 1) + `,`, - `Resource:` + strings.Replace(strings.Replace(this.Resource.String(), "GroupVersionResource", "k8s_io_apimachinery_pkg_apis_meta_v1.GroupVersionResource", 1), `&`, ``, 1) + `,`, + `Kind:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Kind), "GroupVersionKind", "v1.GroupVersionKind", 1), `&`, ``, 1) + `,`, + `Resource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resource), "GroupVersionResource", "v1.GroupVersionResource", 1), `&`, ``, 1) + `,`, `SubResource:` + fmt.Sprintf("%v", this.SubResource) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, - `UserInfo:` + strings.Replace(strings.Replace(this.UserInfo.String(), "UserInfo", "k8s_io_api_authentication_v1.UserInfo", 1), `&`, ``, 1) + `,`, - `Object:` + strings.Replace(strings.Replace(this.Object.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `OldObject:` + strings.Replace(strings.Replace(this.OldObject.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`, + `Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `OldObject:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OldObject), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, `DryRun:` + valueToStringGenerated(this.DryRun) + `,`, + `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `RequestKind:` + strings.Replace(fmt.Sprintf("%v", this.RequestKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`, + `RequestResource:` + strings.Replace(fmt.Sprintf("%v", this.RequestResource), "GroupVersionResource", "v1.GroupVersionResource", 1) + `,`, + `RequestSubResource:` + fmt.Sprintf("%v", this.RequestSubResource) + `,`, `}`, }, "") return s @@ -404,7 +634,7 @@ func (this *AdmissionResponse) String() string { s := strings.Join([]string{`&AdmissionResponse{`, `UID:` + fmt.Sprintf("%v", this.UID) + `,`, `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, - `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "k8s_io_apimachinery_pkg_apis_meta_v1.Status", 1) + `,`, + `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1) + `,`, `Patch:` + valueToStringGenerated(this.Patch) + `,`, `PatchType:` + valueToStringGenerated(this.PatchType) + `,`, `AuditAnnotations:` + mapStringForAuditAnnotations + `,`, @@ -417,8 +647,8 @@ func (this *AdmissionReview) String() string { return "nil" } s := strings.Join([]string{`&AdmissionReview{`, - `Request:` + strings.Replace(fmt.Sprintf("%v", this.Request), "AdmissionRequest", "AdmissionRequest", 1) + `,`, - `Response:` + strings.Replace(fmt.Sprintf("%v", this.Response), "AdmissionResponse", "AdmissionResponse", 1) + `,`, + `Request:` + strings.Replace(this.Request.String(), "AdmissionRequest", "AdmissionRequest", 1) + `,`, + `Response:` + strings.Replace(this.Response.String(), "AdmissionResponse", "AdmissionResponse", 1) + `,`, `}`, }, "") return s @@ -446,7 +676,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -474,7 +704,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -484,6 +714,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -503,7 +736,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -512,6 +745,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -533,7 +769,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -542,6 +778,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -563,7 +802,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -573,6 +812,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -592,7 +834,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -602,6 +844,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -621,7 +866,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -631,6 +876,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -650,7 +898,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -660,6 +908,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -679,7 +930,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -688,6 +939,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -709,7 +963,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -718,6 +972,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -739,7 +996,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -748,6 +1005,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -769,13 +1029,150 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } b := bool(v != 0) m.DryRun = &b + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestKind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestKind == nil { + m.RequestKind = &v1.GroupVersionKind{} + } + if err := m.RequestKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestResource == nil { + m.RequestResource = &v1.GroupVersionResource{} + } + if err := m.RequestResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestSubResource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestSubResource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -785,6 +1182,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -812,7 +1212,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -840,7 +1240,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -850,6 +1250,9 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -869,7 +1272,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -889,7 +1292,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -898,11 +1301,14 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Result == nil { - m.Result = &k8s_io_apimachinery_pkg_apis_meta_v1.Status{} + m.Result = &v1.Status{} } if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -922,7 +1328,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -931,6 +1337,9 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -953,7 +1362,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -963,6 +1372,9 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -983,7 +1395,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -992,6 +1404,9 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1012,7 +1427,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1029,7 +1444,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1039,6 +1454,9 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -1055,7 +1473,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1065,6 +1483,9 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -1096,6 +1517,9 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1123,7 +1547,7 @@ func (m *AdmissionReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1151,7 +1575,7 @@ func (m *AdmissionReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1160,6 +1584,9 @@ func (m *AdmissionReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1184,7 +1611,7 @@ func (m *AdmissionReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1193,6 +1620,9 @@ func (m *AdmissionReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1212,6 +1642,9 @@ func (m *AdmissionReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1278,10 +1711,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1310,6 +1746,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1328,63 +1767,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admission/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 821 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0x8e, 0x37, 0x69, 0x12, 0x4f, 0x2a, 0x36, 0x3b, 0x80, 0x64, 0x45, 0xc8, 0x09, 0x3d, 0xa0, - 0x20, 0x6d, 0xc7, 0xb4, 0x82, 0x55, 0xb5, 0xe2, 0x12, 0xd3, 0x08, 0x55, 0x48, 0xdb, 0x6a, 0x76, - 0x83, 0x80, 0x03, 0xd2, 0xc4, 0x9e, 0x4d, 0x4c, 0xe2, 0x19, 0xe3, 0x99, 0x49, 0xc9, 0x0d, 0x71, - 0xe5, 0x82, 0xc4, 0x9f, 0xc4, 0xa5, 0xc7, 0x3d, 0xee, 0x29, 0xa2, 0xe1, 0xbf, 0xe8, 0x09, 0x79, - 0x3c, 0x8e, 0x43, 0xba, 0x85, 0x5d, 0xb4, 0x27, 0xfb, 0xfd, 0xf8, 0xbe, 0x37, 0xf3, 0xbd, 0x37, - 0x0f, 0x0c, 0x67, 0x27, 0x02, 0x45, 0xdc, 0x9b, 0xa9, 0x31, 0x4d, 0x19, 0x95, 0x54, 0x78, 0x0b, - 0xca, 0x42, 0x9e, 0x7a, 0x26, 0x40, 0x92, 0xc8, 0x23, 0x61, 0x1c, 0x09, 0x11, 0x71, 0xe6, 0x2d, - 0x8e, 0xc6, 0x54, 0x92, 0x23, 0x6f, 0x42, 0x19, 0x4d, 0x89, 0xa4, 0x21, 0x4a, 0x52, 0x2e, 0x39, - 0xfc, 0x20, 0xcf, 0x46, 0x24, 0x89, 0xd0, 0x26, 0x1b, 0x99, 0xec, 0xce, 0xe1, 0x24, 0x92, 0x53, - 0x35, 0x46, 0x01, 0x8f, 0xbd, 0x09, 0x9f, 0x70, 0x4f, 0x83, 0xc6, 0xea, 0xb9, 0xb6, 0xb4, 0xa1, - 0xff, 0x72, 0xb2, 0xce, 0xc3, 0xed, 0xd2, 0x4a, 0x4e, 0x29, 0x93, 0x51, 0x40, 0x64, 0x5e, 0x7f, - 0xb7, 0x74, 0xe7, 0xd3, 0x32, 0x3b, 0x26, 0xc1, 0x34, 0x62, 0x34, 0x5d, 0x7a, 0xc9, 0x6c, 0x92, - 0x39, 0x84, 0x17, 0x53, 0x49, 0x5e, 0x85, 0xf2, 0xee, 0x42, 0xa5, 0x8a, 0xc9, 0x28, 0xa6, 0xb7, - 0x00, 0x8f, 0xfe, 0x0b, 0x20, 0x82, 0x29, 0x8d, 0xc9, 0x2e, 0xee, 0xe0, 0xf7, 0x3a, 0x68, 0x0f, - 0x0a, 0x45, 0x30, 0xfd, 0x51, 0x51, 0x21, 0xa1, 0x0f, 0xaa, 0x2a, 0x0a, 0x1d, 0xab, 0x67, 0xf5, - 0x6d, 0xff, 0x93, 0xab, 0x55, 0xb7, 0xb2, 0x5e, 0x75, 0xab, 0xa3, 0xb3, 0xd3, 0x9b, 0x55, 0xf7, - 0xc3, 0xbb, 0x0a, 0xc9, 0x65, 0x42, 0x05, 0x1a, 0x9d, 0x9d, 0xe2, 0x0c, 0x0c, 0xbf, 0x01, 0xb5, - 0x59, 0xc4, 0x42, 0xe7, 0x5e, 0xcf, 0xea, 0xb7, 0x8e, 0x1f, 0xa1, 0xb2, 0x03, 0x1b, 0x18, 0x4a, - 0x66, 0x93, 0xcc, 0x21, 0x50, 0x26, 0x03, 0x5a, 0x1c, 0xa1, 0x2f, 0x53, 0xae, 0x92, 0xaf, 0x69, - 0x9a, 0x1d, 0xe6, 0xab, 0x88, 0x85, 0xfe, 0xbe, 0x29, 0x5e, 0xcb, 0x2c, 0xac, 0x19, 0xe1, 0x14, - 0x34, 0x53, 0x2a, 0xb8, 0x4a, 0x03, 0xea, 0x54, 0x35, 0xfb, 0xe3, 0x37, 0x67, 0xc7, 0x86, 0xc1, - 0x6f, 0x9b, 0x0a, 0xcd, 0xc2, 0x83, 0x37, 0xec, 0xf0, 0x33, 0xd0, 0x12, 0x6a, 0x5c, 0x04, 0x9c, - 0x9a, 0xd6, 0xe3, 0x5d, 0x03, 0x68, 0x3d, 0x2d, 0x43, 0x78, 0x3b, 0x0f, 0xf6, 0x40, 0x8d, 0x91, - 0x98, 0x3a, 0x7b, 0x3a, 0x7f, 0x73, 0x85, 0x27, 0x24, 0xa6, 0x58, 0x47, 0xa0, 0x07, 0xec, 0xec, - 0x2b, 0x12, 0x12, 0x50, 0xa7, 0xae, 0xd3, 0x1e, 0x98, 0x34, 0xfb, 0x49, 0x11, 0xc0, 0x65, 0x0e, - 0xfc, 0x1c, 0xd8, 0x3c, 0xc9, 0x1a, 0x17, 0x71, 0xe6, 0x34, 0x34, 0xc0, 0x2d, 0x00, 0xe7, 0x45, - 0xe0, 0x66, 0xdb, 0xc0, 0x25, 0x00, 0x3e, 0x03, 0x4d, 0x25, 0x68, 0x7a, 0xc6, 0x9e, 0x73, 0xa7, - 0xa9, 0x15, 0xfb, 0x08, 0x6d, 0xbf, 0x88, 0x7f, 0x0c, 0x71, 0xa6, 0xd4, 0xc8, 0x64, 0x97, 0xea, - 0x14, 0x1e, 0xbc, 0x61, 0x82, 0x23, 0x50, 0xe7, 0xe3, 0x1f, 0x68, 0x20, 0x1d, 0x5b, 0x73, 0x1e, - 0xde, 0xd9, 0x05, 0x33, 0x83, 0x08, 0x93, 0xcb, 0xe1, 0x4f, 0x92, 0xb2, 0xac, 0x01, 0xfe, 0x3b, - 0x86, 0xba, 0x7e, 0xae, 0x49, 0xb0, 0x21, 0x83, 0xdf, 0x03, 0x9b, 0xcf, 0xc3, 0xdc, 0xe9, 0x80, - 0xff, 0xc3, 0xbc, 0x91, 0xf2, 0xbc, 0xe0, 0xc1, 0x25, 0x25, 0x3c, 0x00, 0xf5, 0x30, 0x5d, 0x62, - 0xc5, 0x9c, 0x56, 0xcf, 0xea, 0x37, 0x7d, 0x90, 0x9d, 0xe1, 0x54, 0x7b, 0xb0, 0x89, 0x1c, 0xfc, - 0x52, 0x03, 0x0f, 0xb6, 0x5e, 0x85, 0x48, 0x38, 0x13, 0xf4, 0xad, 0x3c, 0x8b, 0x8f, 0x41, 0x83, - 0xcc, 0xe7, 0xfc, 0x92, 0xe6, 0x2f, 0xa3, 0xe9, 0xdf, 0x37, 0x3c, 0x8d, 0x41, 0xee, 0xc6, 0x45, - 0x1c, 0x5e, 0x80, 0xba, 0x90, 0x44, 0x2a, 0x61, 0xa6, 0xfc, 0xe1, 0xeb, 0x4d, 0xf9, 0x53, 0x8d, - 0xc9, 0xaf, 0x85, 0xa9, 0x50, 0x73, 0x89, 0x0d, 0x0f, 0xec, 0x82, 0xbd, 0x84, 0xc8, 0x60, 0xaa, - 0x27, 0x79, 0xdf, 0xb7, 0xd7, 0xab, 0xee, 0xde, 0x45, 0xe6, 0xc0, 0xb9, 0x1f, 0x9e, 0x00, 0x5b, - 0xff, 0x3c, 0x5b, 0x26, 0xc5, 0xf8, 0x76, 0x32, 0x21, 0x2f, 0x0a, 0xe7, 0xcd, 0xb6, 0x81, 0xcb, - 0x64, 0xf8, 0xab, 0x05, 0xda, 0x44, 0x85, 0x91, 0x1c, 0x30, 0xc6, 0xa5, 0x1e, 0x24, 0xe1, 0xd4, - 0x7b, 0xd5, 0x7e, 0xeb, 0x78, 0x88, 0xfe, 0x6d, 0xfb, 0xa2, 0x5b, 0x3a, 0xa3, 0xc1, 0x0e, 0xcf, - 0x90, 0xc9, 0x74, 0xe9, 0x3b, 0x46, 0xa8, 0xf6, 0x6e, 0x18, 0xdf, 0x2a, 0xdc, 0xf9, 0x02, 0xbc, - 0xff, 0x4a, 0x12, 0xd8, 0x06, 0xd5, 0x19, 0x5d, 0xe6, 0x2d, 0xc4, 0xd9, 0x2f, 0x7c, 0x0f, 0xec, - 0x2d, 0xc8, 0x5c, 0x51, 0xdd, 0x0e, 0x1b, 0xe7, 0xc6, 0xe3, 0x7b, 0x27, 0xd6, 0xc1, 0x1f, 0x16, - 0xb8, 0xbf, 0x75, 0xb8, 0x45, 0x44, 0x2f, 0xe1, 0x08, 0x34, 0xd2, 0x7c, 0x49, 0x6a, 0x8e, 0xd6, - 0x31, 0x7a, 0xed, 0xcb, 0x69, 0x94, 0xdf, 0xca, 0x5a, 0x6d, 0x0c, 0x5c, 0x70, 0xc1, 0x6f, 0xf5, - 0x4a, 0xd3, 0xb7, 0x37, 0x0b, 0xd3, 0x7b, 0x43, 0xd1, 0xfc, 0x7d, 0xb3, 0xc3, 0xb4, 0x85, 0x37, - 0x74, 0xfe, 0xe1, 0xd5, 0xb5, 0x5b, 0x79, 0x71, 0xed, 0x56, 0x5e, 0x5e, 0xbb, 0x95, 0x9f, 0xd7, - 0xae, 0x75, 0xb5, 0x76, 0xad, 0x17, 0x6b, 0xd7, 0x7a, 0xb9, 0x76, 0xad, 0x3f, 0xd7, 0xae, 0xf5, - 0xdb, 0x5f, 0x6e, 0xe5, 0xbb, 0x86, 0x21, 0xfe, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xf4, 0xc2, 0x6f, - 0x1b, 0x71, 0x07, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/admission/v1beta1/generated.proto b/vendor/k8s.io/api/admission/v1beta1/generated.proto index 451d4c9ad..6999b80c2 100644 --- a/vendor/k8s.io/api/admission/v1beta1/generated.proto +++ b/vendor/k8s.io/api/admission/v1beta1/generated.proto @@ -37,22 +37,50 @@ message AdmissionRequest { // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. optional string uid = 1; - // Kind is the type of object being manipulated. For example: Pod + // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2; - // Resource is the name of the resource being requested. This is not the kind. For example: pods + // Resource is the fully-qualified resource being requested (for example, v1.pods) optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3; - // SubResource is the name of the subresource being requested. This is a different resource, scoped to the parent - // resource, but it may have a different kind. For instance, /pods has the resource "pods" and the kind "Pod", while - // /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod" (because status operates on - // pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource - // "binding", and kind "Binding". + // SubResource is the subresource being requested, if any (for example, "status" or "scale") // +optional optional string subResource = 4; + // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). + // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), + // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type for more details. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13; + + // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). + // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), + // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14; + + // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") + // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + optional string requestSubResource = 15; + // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and - // rely on the server to generate the name. If that is the case, this method will return the empty string. + // rely on the server to generate the name. If that is the case, this field will contain an empty string. // +optional optional string name = 5; @@ -60,17 +88,18 @@ message AdmissionRequest { // +optional optional string namespace = 6; - // Operation is the operation being performed + // Operation is the operation being performed. This may be different than the operation + // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. optional string operation = 7; // UserInfo is information about the requesting user optional k8s.io.api.authentication.v1.UserInfo userInfo = 8; - // Object is the object from the incoming request prior to default values being applied + // Object is the object from the incoming request. // +optional optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 9; - // OldObject is the existing object. Only populated for UPDATE requests. + // OldObject is the existing object. Only populated for DELETE and UPDATE requests. // +optional optional k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10; @@ -78,6 +107,14 @@ message AdmissionRequest { // Defaults to false. // +optional optional bool dryRun = 11; + + // Options is the operation option structure of the operation being performed. + // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be + // different than the options the caller provided. e.g. for a patch request the performed + // Operation might be a CREATE, in which case the Options will a + // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension options = 12; } // AdmissionResponse describes an admission response. diff --git a/vendor/k8s.io/api/admission/v1beta1/types.go b/vendor/k8s.io/api/admission/v1beta1/types.go index 653e84710..2cb9ea55a 100644 --- a/vendor/k8s.io/api/admission/v1beta1/types.go +++ b/vendor/k8s.io/api/admission/v1beta1/types.go @@ -43,38 +43,73 @@ type AdmissionRequest struct { // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` - // Kind is the type of object being manipulated. For example: Pod + // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) Kind metav1.GroupVersionKind `json:"kind" protobuf:"bytes,2,opt,name=kind"` - // Resource is the name of the resource being requested. This is not the kind. For example: pods + // Resource is the fully-qualified resource being requested (for example, v1.pods) Resource metav1.GroupVersionResource `json:"resource" protobuf:"bytes,3,opt,name=resource"` - // SubResource is the name of the subresource being requested. This is a different resource, scoped to the parent - // resource, but it may have a different kind. For instance, /pods has the resource "pods" and the kind "Pod", while - // /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod" (because status operates on - // pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource - // "binding", and kind "Binding". + // SubResource is the subresource being requested, if any (for example, "status" or "scale") // +optional SubResource string `json:"subResource,omitempty" protobuf:"bytes,4,opt,name=subResource"` + + // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). + // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), + // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type for more details. + // +optional + RequestKind *metav1.GroupVersionKind `json:"requestKind,omitempty" protobuf:"bytes,13,opt,name=requestKind"` + // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). + // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), + // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + RequestResource *metav1.GroupVersionResource `json:"requestResource,omitempty" protobuf:"bytes,14,opt,name=requestResource"` + // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") + // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + RequestSubResource string `json:"requestSubResource,omitempty" protobuf:"bytes,15,opt,name=requestSubResource"` + // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and - // rely on the server to generate the name. If that is the case, this method will return the empty string. + // rely on the server to generate the name. If that is the case, this field will contain an empty string. // +optional Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"` // Namespace is the namespace associated with the request (if any). // +optional Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"` - // Operation is the operation being performed + // Operation is the operation being performed. This may be different than the operation + // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. Operation Operation `json:"operation" protobuf:"bytes,7,opt,name=operation"` // UserInfo is information about the requesting user UserInfo authenticationv1.UserInfo `json:"userInfo" protobuf:"bytes,8,opt,name=userInfo"` - // Object is the object from the incoming request prior to default values being applied + // Object is the object from the incoming request. // +optional Object runtime.RawExtension `json:"object,omitempty" protobuf:"bytes,9,opt,name=object"` - // OldObject is the existing object. Only populated for UPDATE requests. + // OldObject is the existing object. Only populated for DELETE and UPDATE requests. // +optional OldObject runtime.RawExtension `json:"oldObject,omitempty" protobuf:"bytes,10,opt,name=oldObject"` // DryRun indicates that modifications will definitely not be persisted for this request. // Defaults to false. // +optional DryRun *bool `json:"dryRun,omitempty" protobuf:"varint,11,opt,name=dryRun"` + // Options is the operation option structure of the operation being performed. + // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be + // different than the options the caller provided. e.g. for a patch request the performed + // Operation might be a CREATE, in which case the Options will a + // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. + // +optional + Options runtime.RawExtension `json:"options,omitempty" protobuf:"bytes,12,opt,name=options"` } // AdmissionResponse describes an admission response. diff --git a/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go index 8a938db3b..2ef98db87 100644 --- a/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go @@ -28,18 +28,22 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AdmissionRequest = map[string]string{ - "": "AdmissionRequest describes the admission.Attributes for the admission request.", - "uid": "UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are otherwise identical (parallel requests, requests when earlier requests did not modify etc) The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.", - "kind": "Kind is the type of object being manipulated. For example: Pod", - "resource": "Resource is the name of the resource being requested. This is not the kind. For example: pods", - "subResource": "SubResource is the name of the subresource being requested. This is a different resource, scoped to the parent resource, but it may have a different kind. For instance, /pods has the resource \"pods\" and the kind \"Pod\", while /pods/foo/status has the resource \"pods\", the sub resource \"status\", and the kind \"Pod\" (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource \"pods\", subresource \"binding\", and kind \"Binding\".", - "name": "Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this method will return the empty string.", - "namespace": "Namespace is the namespace associated with the request (if any).", - "operation": "Operation is the operation being performed", - "userInfo": "UserInfo is information about the requesting user", - "object": "Object is the object from the incoming request prior to default values being applied", - "oldObject": "OldObject is the existing object. Only populated for UPDATE requests.", - "dryRun": "DryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.", + "": "AdmissionRequest describes the admission.Attributes for the admission request.", + "uid": "UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are otherwise identical (parallel requests, requests when earlier requests did not modify etc) The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.", + "kind": "Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)", + "resource": "Resource is the fully-qualified resource being requested (for example, v1.pods)", + "subResource": "SubResource is the subresource being requested, if any (for example, \"status\" or \"scale\")", + "requestKind": "RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). If this is specified and differs from the value in \"kind\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `kind: {group:\"apps\", version:\"v1\", kind:\"Deployment\"}` (matching the rule the webhook registered for), and `requestKind: {group:\"apps\", version:\"v1beta1\", kind:\"Deployment\"}` (indicating the kind of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type for more details.", + "requestResource": "RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). If this is specified and differs from the value in \"resource\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `resource: {group:\"apps\", version:\"v1\", resource:\"deployments\"}` (matching the resource the webhook registered for), and `requestResource: {group:\"apps\", version:\"v1beta1\", resource:\"deployments\"}` (indicating the resource of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type.", + "requestSubResource": "RequestSubResource is the name of the subresource of the original API request, if any (for example, \"status\" or \"scale\") If this is specified and differs from the value in \"subResource\", an equivalent match and conversion was performed. See documentation for the \"matchPolicy\" field in the webhook configuration type.", + "name": "Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this field will contain an empty string.", + "namespace": "Namespace is the namespace associated with the request (if any).", + "operation": "Operation is the operation being performed. This may be different than the operation requested. e.g. a patch can result in either a CREATE or UPDATE Operation.", + "userInfo": "UserInfo is information about the requesting user", + "object": "Object is the object from the incoming request.", + "oldObject": "OldObject is the existing object. Only populated for DELETE and UPDATE requests.", + "dryRun": "DryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.", + "options": "Options is the operation option structure of the operation being performed. e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be different than the options the caller provided. e.g. for a patch request the performed Operation might be a CREATE, in which case the Options will a `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.", } func (AdmissionRequest) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go index 2b4352a94..e4704c86d 100644 --- a/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go @@ -30,6 +30,16 @@ func (in *AdmissionRequest) DeepCopyInto(out *AdmissionRequest) { *out = *in out.Kind = in.Kind out.Resource = in.Resource + if in.RequestKind != nil { + in, out := &in.RequestKind, &out.RequestKind + *out = new(v1.GroupVersionKind) + **out = **in + } + if in.RequestResource != nil { + in, out := &in.RequestResource, &out.RequestResource + *out = new(v1.GroupVersionResource) + **out = **in + } in.UserInfo.DeepCopyInto(&out.UserInfo) in.Object.DeepCopyInto(&out.Object) in.OldObject.DeepCopyInto(&out.OldObject) @@ -38,6 +48,7 @@ func (in *AdmissionRequest) DeepCopyInto(out *AdmissionRequest) { *out = new(bool) **out = **in } + in.Options.DeepCopyInto(&out.Options) return } diff --git a/vendor/k8s.io/api/admissionregistration/v1/doc.go b/vendor/k8s.io/api/admissionregistration/v1/doc.go new file mode 100644 index 000000000..c3940f090 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/doc.go @@ -0,0 +1,26 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +groupName=admissionregistration.k8s.io + +// Package v1 is the v1 version of the API. +// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration +// MutatingWebhookConfiguration and ValidatingWebhookConfiguration are for the +// new dynamic admission controller configuration. +package v1 // import "k8s.io/api/admissionregistration/v1" diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go new file mode 100644 index 000000000..1acb6345a --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go @@ -0,0 +1,3469 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } +func (*MutatingWebhook) ProtoMessage() {} +func (*MutatingWebhook) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{0} +} +func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhook.Merge(m, src) +} +func (m *MutatingWebhook) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhook) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhook.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo + +func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } +func (*MutatingWebhookConfiguration) ProtoMessage() {} +func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{1} +} +func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhookConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhookConfiguration.Merge(m, src) +} +func (m *MutatingWebhookConfiguration) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhookConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhookConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo + +func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } +func (*MutatingWebhookConfigurationList) ProtoMessage() {} +func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{2} +} +func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhookConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhookConfigurationList.Merge(m, src) +} +func (m *MutatingWebhookConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhookConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo + +func (m *Rule) Reset() { *m = Rule{} } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{3} +} +func (m *Rule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Rule) XXX_Merge(src proto.Message) { + xxx_messageInfo_Rule.Merge(m, src) +} +func (m *Rule) XXX_Size() int { + return m.Size() +} +func (m *Rule) XXX_DiscardUnknown() { + xxx_messageInfo_Rule.DiscardUnknown(m) +} + +var xxx_messageInfo_Rule proto.InternalMessageInfo + +func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } +func (*RuleWithOperations) ProtoMessage() {} +func (*RuleWithOperations) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{4} +} +func (m *RuleWithOperations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuleWithOperations) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuleWithOperations.Merge(m, src) +} +func (m *RuleWithOperations) XXX_Size() int { + return m.Size() +} +func (m *RuleWithOperations) XXX_DiscardUnknown() { + xxx_messageInfo_RuleWithOperations.DiscardUnknown(m) +} + +var xxx_messageInfo_RuleWithOperations proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{5} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo + +func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } +func (*ValidatingWebhook) ProtoMessage() {} +func (*ValidatingWebhook) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{6} +} +func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhook.Merge(m, src) +} +func (m *ValidatingWebhook) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhook) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhook.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo + +func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } +func (*ValidatingWebhookConfiguration) ProtoMessage() {} +func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{7} +} +func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhookConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhookConfiguration.Merge(m, src) +} +func (m *ValidatingWebhookConfiguration) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhookConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhookConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo + +func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } +func (*ValidatingWebhookConfigurationList) ProtoMessage() {} +func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{8} +} +func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhookConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhookConfigurationList.Merge(m, src) +} +func (m *ValidatingWebhookConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhookConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhookConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{9} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhook") + proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfiguration") + proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfigurationList") + proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1.Rule") + proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1.RuleWithOperations") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1.ServiceReference") + proto.RegisterType((*ValidatingWebhook)(nil), "k8s.io.api.admissionregistration.v1.ValidatingWebhook") + proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1.ValidatingWebhookConfiguration") + proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1.ValidatingWebhookConfigurationList") + proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1.WebhookClientConfig") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1/generated.proto", fileDescriptor_aaac5994f79683e8) +} + +var fileDescriptor_aaac5994f79683e8 = []byte{ + // 1104 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xc6, 0x76, 0x63, 0x8f, 0xf3, 0xa7, 0x19, 0xa0, 0x35, 0xa1, 0xf2, 0x5a, 0x46, 0x42, + 0x46, 0xc0, 0x6e, 0x13, 0x4a, 0xa9, 0xb8, 0xa0, 0x6c, 0xf8, 0xa3, 0x88, 0xa4, 0x8d, 0x26, 0x6d, + 0x8a, 0x50, 0x0e, 0x1d, 0xaf, 0xc7, 0xf6, 0x10, 0x7b, 0x67, 0x35, 0x33, 0xeb, 0x92, 0x1b, 0x1f, + 0x81, 0xaf, 0x00, 0x9f, 0x82, 0x1b, 0xe2, 0x96, 0x63, 0x8f, 0x39, 0xa0, 0x85, 0x2c, 0x17, 0x0e, + 0x7c, 0x82, 0x9c, 0xd0, 0xcc, 0xae, 0x77, 0xfd, 0x27, 0x09, 0x56, 0x0e, 0x3d, 0xe5, 0xb6, 0xf3, + 0x7b, 0xf3, 0x7e, 0x6f, 0xde, 0xdb, 0xf7, 0xde, 0x0f, 0xec, 0x1c, 0x3d, 0x12, 0x16, 0x65, 0xf6, + 0x51, 0xd0, 0x24, 0xdc, 0x23, 0x92, 0x08, 0x7b, 0x40, 0xbc, 0x16, 0xe3, 0x76, 0x62, 0xc0, 0x3e, + 0xb5, 0x71, 0xab, 0x4f, 0x85, 0xa0, 0xcc, 0xe3, 0xa4, 0x43, 0x85, 0xe4, 0x58, 0x52, 0xe6, 0xd9, + 0x83, 0x75, 0xbb, 0x43, 0x3c, 0xc2, 0xb1, 0x24, 0x2d, 0xcb, 0xe7, 0x4c, 0x32, 0xf8, 0x6e, 0xec, + 0x64, 0x61, 0x9f, 0x5a, 0x17, 0x3a, 0x59, 0x83, 0xf5, 0xb5, 0x8f, 0x3a, 0x54, 0x76, 0x83, 0xa6, + 0xe5, 0xb2, 0xbe, 0xdd, 0x61, 0x1d, 0x66, 0x6b, 0xdf, 0x66, 0xd0, 0xd6, 0x27, 0x7d, 0xd0, 0x5f, + 0x31, 0xe7, 0xda, 0x83, 0xec, 0x21, 0x7d, 0xec, 0x76, 0xa9, 0x47, 0xf8, 0xb1, 0xed, 0x1f, 0x75, + 0x14, 0x20, 0xec, 0x3e, 0x91, 0xf8, 0x82, 0x97, 0xac, 0xd9, 0x97, 0x79, 0xf1, 0xc0, 0x93, 0xb4, + 0x4f, 0xa6, 0x1c, 0x1e, 0xfe, 0x9f, 0x83, 0x70, 0xbb, 0xa4, 0x8f, 0x27, 0xfd, 0xea, 0xbf, 0x2d, + 0x80, 0x95, 0xdd, 0x40, 0x62, 0x49, 0xbd, 0xce, 0x73, 0xd2, 0xec, 0x32, 0x76, 0x04, 0x6b, 0x20, + 0xef, 0xe1, 0x3e, 0xa9, 0x18, 0x35, 0xa3, 0x51, 0x72, 0x16, 0x4f, 0x42, 0x73, 0x2e, 0x0a, 0xcd, + 0xfc, 0x63, 0xdc, 0x27, 0x48, 0x5b, 0x20, 0x07, 0x8b, 0x6e, 0x8f, 0x12, 0x4f, 0x6e, 0x31, 0xaf, + 0x4d, 0x3b, 0x95, 0xf9, 0x9a, 0xd1, 0x28, 0x6f, 0x3c, 0xb2, 0x66, 0xa8, 0x9f, 0x95, 0x44, 0xd9, + 0x1a, 0xf1, 0x77, 0xde, 0x4c, 0x62, 0x2c, 0x8e, 0xa2, 0x68, 0x2c, 0x06, 0x3c, 0x04, 0x05, 0x1e, + 0xf4, 0x88, 0xa8, 0xe4, 0x6a, 0xb9, 0x46, 0x79, 0xe3, 0xd3, 0x99, 0x82, 0xa1, 0xa0, 0x47, 0x9e, + 0x53, 0xd9, 0x7d, 0xe2, 0x93, 0x18, 0x14, 0xce, 0x52, 0x12, 0xab, 0xa0, 0x6c, 0x02, 0xc5, 0xa4, + 0x70, 0x07, 0x2c, 0xb5, 0x31, 0xed, 0x05, 0x9c, 0xec, 0xb1, 0x1e, 0x75, 0x8f, 0x2b, 0x79, 0x9d, + 0xfc, 0x7b, 0x51, 0x68, 0x2e, 0x7d, 0x35, 0x6a, 0x38, 0x0f, 0xcd, 0xd5, 0x31, 0xe0, 0xe9, 0xb1, + 0x4f, 0xd0, 0xb8, 0x33, 0xfc, 0x02, 0x94, 0xfb, 0x58, 0xba, 0xdd, 0x84, 0xab, 0xa4, 0xb9, 0xea, + 0x51, 0x68, 0x96, 0x77, 0x33, 0xf8, 0x3c, 0x34, 0x57, 0x46, 0x8e, 0x9a, 0x67, 0xd4, 0x0d, 0xfe, + 0x00, 0x56, 0x55, 0xb5, 0x85, 0x8f, 0x5d, 0xb2, 0x4f, 0x7a, 0xc4, 0x95, 0x8c, 0x57, 0x0a, 0xba, + 0xd4, 0x1f, 0x8f, 0x64, 0x9f, 0xfe, 0x6f, 0xcb, 0x3f, 0xea, 0x28, 0x40, 0x58, 0xaa, 0xad, 0x54, + 0xfa, 0x3b, 0xb8, 0x49, 0x7a, 0x43, 0x57, 0xe7, 0xad, 0x28, 0x34, 0x57, 0x1f, 0x4f, 0x32, 0xa2, + 0xe9, 0x20, 0x90, 0x81, 0x65, 0xd6, 0xfc, 0x9e, 0xb8, 0x32, 0x0d, 0x5b, 0xbe, 0x7e, 0x58, 0x18, + 0x85, 0xe6, 0xf2, 0x93, 0x31, 0x3a, 0x34, 0x41, 0xaf, 0x0a, 0x26, 0x68, 0x8b, 0x7c, 0xd9, 0x6e, + 0x13, 0x57, 0x8a, 0xca, 0xad, 0xac, 0x60, 0xfb, 0x19, 0xac, 0x0a, 0x96, 0x1d, 0xb7, 0x7a, 0x58, + 0x08, 0x34, 0xea, 0x06, 0x3f, 0x03, 0xcb, 0xaa, 0xd7, 0x59, 0x20, 0xf7, 0x89, 0xcb, 0xbc, 0x96, + 0xa8, 0x2c, 0xd4, 0x8c, 0x46, 0x21, 0x7e, 0xc1, 0xd3, 0x31, 0x0b, 0x9a, 0xb8, 0x09, 0x9f, 0x81, + 0xbb, 0x69, 0x17, 0x21, 0x32, 0xa0, 0xe4, 0xe5, 0x01, 0xe1, 0xea, 0x20, 0x2a, 0xc5, 0x5a, 0xae, + 0x51, 0x72, 0xde, 0x89, 0x42, 0xf3, 0xee, 0xe6, 0xc5, 0x57, 0xd0, 0x65, 0xbe, 0xf0, 0x05, 0x80, + 0x9c, 0x50, 0x6f, 0xc0, 0x5c, 0xdd, 0x7e, 0x49, 0x43, 0x00, 0x9d, 0xdf, 0xfd, 0x28, 0x34, 0x21, + 0x9a, 0xb2, 0x9e, 0x87, 0xe6, 0x9d, 0x69, 0x54, 0xb7, 0xc7, 0x05, 0x5c, 0xf5, 0x53, 0x03, 0xdc, + 0x9b, 0x98, 0xe0, 0x78, 0x62, 0x82, 0xb8, 0xe3, 0xe1, 0x0b, 0x50, 0x54, 0x3f, 0xa6, 0x85, 0x25, + 0xd6, 0x23, 0x5d, 0xde, 0xb8, 0x3f, 0xdb, 0x6f, 0x8c, 0xff, 0xd9, 0x2e, 0x91, 0xd8, 0x81, 0xc9, + 0xd0, 0x80, 0x0c, 0x43, 0x29, 0x2b, 0x3c, 0x00, 0xc5, 0x24, 0xb2, 0xa8, 0xcc, 0xeb, 0xe9, 0x7c, + 0x30, 0xd3, 0x74, 0x4e, 0x3c, 0xdb, 0xc9, 0xab, 0x28, 0xa8, 0xf8, 0x32, 0xe1, 0xaa, 0xff, 0x63, + 0x80, 0xda, 0x55, 0xa9, 0xed, 0x50, 0x21, 0xe1, 0xe1, 0x54, 0x7a, 0xd6, 0x8c, 0x5d, 0x4a, 0x45, + 0x9c, 0xdc, 0xed, 0x24, 0xb9, 0xe2, 0x10, 0x19, 0x49, 0xad, 0x0d, 0x0a, 0x54, 0x92, 0xfe, 0x30, + 0xaf, 0xcd, 0xeb, 0xe4, 0x35, 0xf6, 0xe6, 0x6c, 0xff, 0x6c, 0x2b, 0x5e, 0x14, 0xd3, 0xd7, 0x7f, + 0x37, 0x40, 0x5e, 0x2d, 0x24, 0xf8, 0x01, 0x28, 0x61, 0x9f, 0x7e, 0xcd, 0x59, 0xe0, 0x8b, 0x8a, + 0xa1, 0x3b, 0x6f, 0x29, 0x0a, 0xcd, 0xd2, 0xe6, 0xde, 0x76, 0x0c, 0xa2, 0xcc, 0x0e, 0xd7, 0x41, + 0x19, 0xfb, 0x34, 0x6d, 0xd4, 0x79, 0x7d, 0x7d, 0x45, 0x8d, 0xcd, 0xe6, 0xde, 0x76, 0xda, 0x9c, + 0xa3, 0x77, 0x14, 0x3f, 0x27, 0x82, 0x05, 0xdc, 0x4d, 0x56, 0x69, 0xc2, 0x8f, 0x86, 0x20, 0xca, + 0xec, 0xf0, 0x43, 0x50, 0x10, 0x2e, 0xf3, 0x49, 0xb2, 0x0d, 0xef, 0xa8, 0x67, 0xef, 0x2b, 0xe0, + 0x3c, 0x34, 0x4b, 0xfa, 0x43, 0xb7, 0x65, 0x7c, 0xa9, 0xfe, 0x8b, 0x01, 0xe0, 0xf4, 0xc2, 0x85, + 0x9f, 0x03, 0xc0, 0xd2, 0x53, 0x92, 0x92, 0xa9, 0x7b, 0x29, 0x45, 0xcf, 0x43, 0x73, 0x29, 0x3d, + 0x69, 0xca, 0x11, 0x17, 0xf8, 0x0d, 0xc8, 0xab, 0x25, 0x9d, 0xa8, 0xcc, 0xfb, 0x33, 0x2f, 0xfe, + 0x4c, 0xba, 0xd4, 0x09, 0x69, 0x92, 0xfa, 0xcf, 0x06, 0xb8, 0xbd, 0x4f, 0xf8, 0x80, 0xba, 0x04, + 0x91, 0x36, 0xe1, 0xc4, 0x73, 0x09, 0xb4, 0x41, 0x29, 0x5d, 0x82, 0x89, 0xec, 0xad, 0x26, 0xbe, + 0xa5, 0x74, 0x61, 0xa2, 0xec, 0x4e, 0x2a, 0x91, 0xf3, 0x97, 0x4a, 0xe4, 0x3d, 0x90, 0xf7, 0xb1, + 0xec, 0x56, 0x72, 0xfa, 0x46, 0x51, 0x59, 0xf7, 0xb0, 0xec, 0x22, 0x8d, 0x6a, 0x2b, 0xe3, 0x52, + 0xd7, 0xb5, 0x90, 0x58, 0x19, 0x97, 0x48, 0xa3, 0xf5, 0x3f, 0x6f, 0x81, 0xd5, 0x03, 0xdc, 0xa3, + 0xad, 0x1b, 0x59, 0xbe, 0x91, 0xe5, 0x2b, 0x65, 0x19, 0xdc, 0xc8, 0xf2, 0x75, 0x64, 0xb9, 0xfe, + 0x87, 0x01, 0xaa, 0x53, 0x13, 0xf6, 0xba, 0x65, 0xf3, 0xdb, 0x29, 0xd9, 0x7c, 0x38, 0xd3, 0xf4, + 0x4c, 0x3d, 0x7c, 0x4a, 0x38, 0xff, 0x35, 0x40, 0xfd, 0xea, 0xf4, 0x5e, 0x83, 0x74, 0x76, 0xc7, + 0xa5, 0x73, 0xeb, 0x7a, 0xb9, 0xcd, 0x22, 0x9e, 0xbf, 0x1a, 0xe0, 0x8d, 0x0b, 0xf6, 0x17, 0x7c, + 0x1b, 0xe4, 0x02, 0xde, 0x4b, 0x56, 0xf0, 0x42, 0x14, 0x9a, 0xb9, 0x67, 0x68, 0x07, 0x29, 0x0c, + 0x1e, 0x82, 0x05, 0x11, 0xab, 0x40, 0x92, 0xf9, 0x27, 0x33, 0x3d, 0x6f, 0x52, 0x39, 0x9c, 0x72, + 0x14, 0x9a, 0x0b, 0x43, 0x74, 0x48, 0x09, 0x1b, 0xa0, 0xe8, 0x62, 0x27, 0xf0, 0x5a, 0x89, 0x6a, + 0x2d, 0x3a, 0x8b, 0xaa, 0x48, 0x5b, 0x9b, 0x31, 0x86, 0x52, 0xab, 0xd3, 0x38, 0x39, 0xab, 0xce, + 0xbd, 0x3a, 0xab, 0xce, 0x9d, 0x9e, 0x55, 0xe7, 0x7e, 0x8c, 0xaa, 0xc6, 0x49, 0x54, 0x35, 0x5e, + 0x45, 0x55, 0xe3, 0x34, 0xaa, 0x1a, 0x7f, 0x45, 0x55, 0xe3, 0xa7, 0xbf, 0xab, 0x73, 0xdf, 0xcd, + 0x0f, 0xd6, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x57, 0x91, 0x2c, 0x7b, 0xeb, 0x0e, 0x00, 0x00, +} + +func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.ReinvocationPolicy != nil { + i -= len(*m.ReinvocationPolicy) + copy(dAtA[i:], *m.ReinvocationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy))) + i-- + dAtA[i] = 0x52 + } + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a + } + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 + } + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Rule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Rule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Rule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scope != nil { + i -= len(*m.Scope) + copy(dAtA[i:], *m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope))) + i-- + dAtA[i] = 0x22 + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.APIVersions) > 0 { + for iNdEx := len(m.APIVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIVersions[iNdEx]) + copy(dAtA[i:], m.APIVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersions[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Rule.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Operations) > 0 { + for iNdEx := len(m.Operations) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Operations[iNdEx]) + copy(dAtA[i:], m.Operations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operations[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } + if m.Path != nil { + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a + } + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 + } + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a + } + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x12 + } + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MutatingWebhook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReinvocationPolicy != nil { + l = len(*m.ReinvocationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MutatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MutatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Rule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIVersions) > 0 { + for _, s := range m.APIVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Scope != nil { + l = len(*m.Scope) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RuleWithOperations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Operations) > 0 { + for _, s := range m.Operations { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Rule.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func (m *ValidatingWebhook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ValidatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *MutatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&MutatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]MutatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&MutatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]MutatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Rule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Rule{`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `Scope:` + valueToStringGenerated(this.Scope) + `,`, + `}`, + }, "") + return s +} +func (this *RuleWithOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuleWithOperations{`, + `Operations:` + fmt.Sprintf("%v", this.Operations) + `,`, + `Rule:` + strings.Replace(strings.Replace(this.Rule.String(), "Rule", "Rule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&ValidatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]ValidatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ReinvocationPolicyType(dAtA[iNdEx:postIndex]) + m.ReinvocationPolicy = &s + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, MutatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, MutatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Rule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ScopeType(dAtA[iNdEx:postIndex]) + m.Scope = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, ValidatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ValidatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &ServiceReference{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) + if m.CABundle == nil { + m.CABundle = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.URL = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1/generated.proto new file mode 100644 index 000000000..f102f3a7e --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.proto @@ -0,0 +1,479 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.admissionregistration.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// MutatingWebhook describes an admission webhook and the resources and operations it applies to. +message MutatingWebhook { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + optional string name = 1; + + // ClientConfig defines how to communicate with the hook. + // Required + optional WebhookClientConfig clientConfig = 2; + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + repeated RuleWithOperations rules = 3; + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Fail. + // +optional + optional string failurePolicy = 4; + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Equivalent" + // +optional + optional string matchPolicy = 9; + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. + optional string sideEffects = 6; + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 10 seconds. + // +optional + optional int32 timeoutSeconds = 7; + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + repeated string admissionReviewVersions = 8; + + // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: the webhook will not be called more than once in a single admission evaluation. + // + // IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation + // if the object being admitted is modified by other admission plugins after the initial webhook call. + // Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. + // Note: + // * the number of additional invocations is not guaranteed to be exactly one. + // * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. + // * webhooks that use this option may be reordered to minimize the number of additional invocations. + // * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. + // + // Defaults to "Never". + // +optional + optional string reinvocationPolicy = 10; +} + +// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +message MutatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated MutatingWebhook Webhooks = 2; +} + +// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. +message MutatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of MutatingWebhookConfiguration. + repeated MutatingWebhookConfiguration items = 2; +} + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +message Rule { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiGroups = 1; + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiVersions = 2; + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + repeated string resources = 3; + + // scope specifies the scope of this rule. + // Valid values are "Cluster", "Namespaced", and "*" + // "Cluster" means that only cluster-scoped resources will match this rule. + // Namespace API objects are cluster-scoped. + // "Namespaced" means that only namespaced resources will match this rule. + // "*" means that there are no scope restrictions. + // Subresources match the scope of their parent resource. + // Default is "*". + // + // +optional + optional string scope = 4; +} + +// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make +// sure that all the tuple expansions are valid. +message RuleWithOperations { + // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * + // for all operations. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string operations = 1; + + // Rule is embedded, it describes other criteria of the rule, like + // APIGroups, APIVersions, Resources, etc. + optional Rule rule = 2; +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +message ServiceReference { + // `namespace` is the namespace of the service. + // Required + optional string namespace = 1; + + // `name` is the name of the service. + // Required + optional string name = 2; + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + optional string path = 3; + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + optional int32 port = 4; +} + +// ValidatingWebhook describes an admission webhook and the resources and operations it applies to. +message ValidatingWebhook { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + optional string name = 1; + + // ClientConfig defines how to communicate with the hook. + // Required + optional WebhookClientConfig clientConfig = 2; + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + repeated RuleWithOperations rules = 3; + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Fail. + // +optional + optional string failurePolicy = 4; + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Equivalent" + // +optional + optional string matchPolicy = 9; + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. + optional string sideEffects = 6; + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 10 seconds. + // +optional + optional int32 timeoutSeconds = 7; + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + repeated string admissionReviewVersions = 8; +} + +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +message ValidatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated ValidatingWebhook Webhooks = 2; +} + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +message ValidatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingWebhookConfiguration. + repeated ValidatingWebhookConfiguration items = 2; +} + +// WebhookClientConfig contains the information to make a TLS +// connection with the webhook +message WebhookClientConfig { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + optional string url = 3; + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + optional ServiceReference service = 1; + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + optional bytes caBundle = 2; +} + diff --git a/vendor/k8s.io/api/admissionregistration/v1/register.go b/vendor/k8s.io/api/admissionregistration/v1/register.go new file mode 100644 index 000000000..716ce7fc5 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "admissionregistration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ValidatingWebhookConfiguration{}, + &ValidatingWebhookConfigurationList{}, + &MutatingWebhookConfiguration{}, + &MutatingWebhookConfigurationList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/admissionregistration/v1/types.go b/vendor/k8s.io/api/admissionregistration/v1/types.go new file mode 100644 index 000000000..114a4c68a --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/types.go @@ -0,0 +1,551 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +type Rule struct { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` + + // scope specifies the scope of this rule. + // Valid values are "Cluster", "Namespaced", and "*" + // "Cluster" means that only cluster-scoped resources will match this rule. + // Namespace API objects are cluster-scoped. + // "Namespaced" means that only namespaced resources will match this rule. + // "*" means that there are no scope restrictions. + // Subresources match the scope of their parent resource. + // Default is "*". + // + // +optional + Scope *ScopeType `json:"scope,omitempty" protobuf:"bytes,4,rep,name=scope"` +} + +type ScopeType string + +const ( + // ClusterScope means that scope is limited to cluster-scoped objects. + // Namespace objects are cluster-scoped. + ClusterScope ScopeType = "Cluster" + // NamespacedScope means that scope is limited to namespaced objects. + NamespacedScope ScopeType = "Namespaced" + // AllScopes means that all scopes are included. + AllScopes ScopeType = "*" +) + +type FailurePolicyType string + +const ( + // Ignore means that an error calling the webhook is ignored. + Ignore FailurePolicyType = "Ignore" + // Fail means that an error calling the webhook causes the admission to fail. + Fail FailurePolicyType = "Fail" +) + +// MatchPolicyType specifies the type of match policy +type MatchPolicyType string + +const ( + // Exact means requests should only be sent to the webhook if they exactly match a given rule + Exact MatchPolicyType = "Exact" + // Equivalent means requests should be sent to the webhook if they modify a resource listed in rules via another API group or version. + Equivalent MatchPolicyType = "Equivalent" +) + +type SideEffectClass string + +const ( + // SideEffectClassUnknown means that no information is known about the side effects of calling the webhook. + // If a request with the dry-run attribute would trigger a call to this webhook, the request will instead fail. + SideEffectClassUnknown SideEffectClass = "Unknown" + // SideEffectClassNone means that calling the webhook will have no side effects. + SideEffectClassNone SideEffectClass = "None" + // SideEffectClassSome means that calling the webhook will possibly have side effects. + // If a request with the dry-run attribute would trigger a call to this webhook, the request will instead fail. + SideEffectClassSome SideEffectClass = "Some" + // SideEffectClassNoneOnDryRun means that calling the webhook will possibly have side effects, but if the + // request being reviewed has the dry-run attribute, the side effects will be suppressed. + SideEffectClassNoneOnDryRun SideEffectClass = "NoneOnDryRun" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +type ValidatingWebhookConfiguration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Webhooks []ValidatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +type ValidatingWebhookConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of ValidatingWebhookConfiguration. + Items []ValidatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +type MutatingWebhookConfiguration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Webhooks []MutatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. +type MutatingWebhookConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of MutatingWebhookConfiguration. + Items []MutatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ValidatingWebhook describes an admission webhook and the resources and operations it applies to. +type ValidatingWebhook struct { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // ClientConfig defines how to communicate with the hook. + // Required + ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Fail. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Equivalent" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,9,opt,name=matchPolicy,casttype=MatchPolicyType"` + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,10,opt,name=objectSelector"` + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. + SideEffects *SideEffectClass `json:"sideEffects" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 10 seconds. + // +optional + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,7,opt,name=timeoutSeconds"` + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + AdmissionReviewVersions []string `json:"admissionReviewVersions" protobuf:"bytes,8,rep,name=admissionReviewVersions"` +} + +// MutatingWebhook describes an admission webhook and the resources and operations it applies to. +type MutatingWebhook struct { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // ClientConfig defines how to communicate with the hook. + // Required + ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Fail. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Equivalent" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,9,opt,name=matchPolicy,casttype=MatchPolicyType"` + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,11,opt,name=objectSelector"` + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. + SideEffects *SideEffectClass `json:"sideEffects" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 10 seconds. + // +optional + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,7,opt,name=timeoutSeconds"` + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + AdmissionReviewVersions []string `json:"admissionReviewVersions" protobuf:"bytes,8,rep,name=admissionReviewVersions"` + + // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: the webhook will not be called more than once in a single admission evaluation. + // + // IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation + // if the object being admitted is modified by other admission plugins after the initial webhook call. + // Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. + // Note: + // * the number of additional invocations is not guaranteed to be exactly one. + // * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. + // * webhooks that use this option may be reordered to minimize the number of additional invocations. + // * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. + // + // Defaults to "Never". + // +optional + ReinvocationPolicy *ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` +} + +// ReinvocationPolicyType specifies what type of policy the admission hook uses. +type ReinvocationPolicyType string + +const ( + // NeverReinvocationPolicy indicates that the webhook must not be called more than once in a + // single admission evaluation. + NeverReinvocationPolicy ReinvocationPolicyType = "Never" + // IfNeededReinvocationPolicy indicates that the webhook may be called at least one + // additional time as part of the admission evaluation if the object being admitted is + // modified by other admission plugins after the initial webhook call. + IfNeededReinvocationPolicy ReinvocationPolicyType = "IfNeeded" +) + +// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make +// sure that all the tuple expansions are valid. +type RuleWithOperations struct { + // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * + // for all operations. + // If '*' is present, the length of the slice must be one. + // Required. + Operations []OperationType `json:"operations,omitempty" protobuf:"bytes,1,rep,name=operations,casttype=OperationType"` + // Rule is embedded, it describes other criteria of the rule, like + // APIGroups, APIVersions, Resources, etc. + Rule `json:",inline" protobuf:"bytes,2,opt,name=rule"` +} + +type OperationType string + +// The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go. +const ( + OperationAll OperationType = "*" + Create OperationType = "CREATE" + Update OperationType = "UPDATE" + Delete OperationType = "DELETE" + Connect OperationType = "CONNECT" +) + +// WebhookClientConfig contains the information to make a TLS +// connection with the webhook +type WebhookClientConfig struct { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"` + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // `namespace` is the namespace of the service. + // Required + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // `name` is the name of the service. + // Required + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000..2fde0ce37 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go @@ -0,0 +1,151 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_MutatingWebhook = map[string]string{ + "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail.", + "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Equivalent\"", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", + "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", + "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", + "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", +} + +func (MutatingWebhook) SwaggerDoc() map[string]string { + return map_MutatingWebhook +} + +var map_MutatingWebhookConfiguration = map[string]string{ + "": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", +} + +func (MutatingWebhookConfiguration) SwaggerDoc() map[string]string { + return map_MutatingWebhookConfiguration +} + +var map_MutatingWebhookConfigurationList = map[string]string{ + "": "MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of MutatingWebhookConfiguration.", +} + +func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string { + return map_MutatingWebhookConfigurationList +} + +var map_Rule = map[string]string{ + "": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", + "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", + "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", + "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", + "scope": "scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\".", +} + +func (Rule) SwaggerDoc() map[string]string { + return map_Rule +} + +var map_RuleWithOperations = map[string]string{ + "": "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.", + "operations": "Operations is the operations the admission hook cares about - CREATE, UPDATE, or * for all operations. If '*' is present, the length of the slice must be one. Required.", +} + +func (RuleWithOperations) SwaggerDoc() map[string]string { + return map_RuleWithOperations +} + +var map_ServiceReference = map[string]string{ + "": "ServiceReference holds a reference to Service.legacy.k8s.io", + "namespace": "`namespace` is the namespace of the service. Required", + "name": "`name` is the name of the service. Required", + "path": "`path` is an optional URL path which will be sent in any request to this service.", + "port": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", +} + +func (ServiceReference) SwaggerDoc() map[string]string { + return map_ServiceReference +} + +var map_ValidatingWebhook = map[string]string{ + "": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail.", + "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Equivalent\"", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", + "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", + "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", +} + +func (ValidatingWebhook) SwaggerDoc() map[string]string { + return map_ValidatingWebhook +} + +var map_ValidatingWebhookConfiguration = map[string]string{ + "": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", +} + +func (ValidatingWebhookConfiguration) SwaggerDoc() map[string]string { + return map_ValidatingWebhookConfiguration +} + +var map_ValidatingWebhookConfigurationList = map[string]string{ + "": "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of ValidatingWebhookConfiguration.", +} + +func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string { + return map_ValidatingWebhookConfigurationList +} + +var map_WebhookClientConfig = map[string]string{ + "": "WebhookClientConfig contains the information to make a TLS connection with the webhook", + "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.", + "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", +} + +func (WebhookClientConfig) SwaggerDoc() map[string]string { + return map_WebhookClientConfig +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..3afb74673 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go @@ -0,0 +1,396 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SideEffects != nil { + in, out := &in.SideEffects, &out.SideEffects + *out = new(SideEffectClass) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + if in.AdmissionReviewVersions != nil { + in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ReinvocationPolicy != nil { + in, out := &in.ReinvocationPolicy, &out.ReinvocationPolicy + *out = new(ReinvocationPolicyType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhook. +func (in *MutatingWebhook) DeepCopy() *MutatingWebhook { + if in == nil { + return nil + } + out := new(MutatingWebhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Webhooks != nil { + in, out := &in.Webhooks, &out.Webhooks + *out = make([]MutatingWebhook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfiguration. +func (in *MutatingWebhookConfiguration) DeepCopy() *MutatingWebhookConfiguration { + if in == nil { + return nil + } + out := new(MutatingWebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingWebhookConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhookConfigurationList) DeepCopyInto(out *MutatingWebhookConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MutatingWebhookConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfigurationList. +func (in *MutatingWebhookConfigurationList) DeepCopy() *MutatingWebhookConfigurationList { + if in == nil { + return nil + } + out := new(MutatingWebhookConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rule) DeepCopyInto(out *Rule) { + *out = *in + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIVersions != nil { + in, out := &in.APIVersions, &out.APIVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(ScopeType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. +func (in *Rule) DeepCopy() *Rule { + if in == nil { + return nil + } + out := new(Rule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleWithOperations) DeepCopyInto(out *RuleWithOperations) { + *out = *in + if in.Operations != nil { + in, out := &in.Operations, &out.Operations + *out = make([]OperationType, len(*in)) + copy(*out, *in) + } + in.Rule.DeepCopyInto(&out.Rule) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleWithOperations. +func (in *RuleWithOperations) DeepCopy() *RuleWithOperations { + if in == nil { + return nil + } + out := new(RuleWithOperations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SideEffects != nil { + in, out := &in.SideEffects, &out.SideEffects + *out = new(SideEffectClass) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + if in.AdmissionReviewVersions != nil { + in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhook. +func (in *ValidatingWebhook) DeepCopy() *ValidatingWebhook { + if in == nil { + return nil + } + out := new(ValidatingWebhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Webhooks != nil { + in, out := &in.Webhooks, &out.Webhooks + *out = make([]ValidatingWebhook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfiguration. +func (in *ValidatingWebhookConfiguration) DeepCopy() *ValidatingWebhookConfiguration { + if in == nil { + return nil + } + out := new(ValidatingWebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingWebhookConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhookConfigurationList) DeepCopyInto(out *ValidatingWebhookConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingWebhookConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfigurationList. +func (in *ValidatingWebhookConfigurationList) DeepCopy() *ValidatingWebhookConfigurationList { + if in == nil { + return nil + } + out := new(ValidatingWebhookConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index 1114e29a1..d84d8b634 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -17,35 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto - - It has these top-level messages: - MutatingWebhookConfiguration - MutatingWebhookConfigurationList - Rule - RuleWithOperations - ServiceReference - ValidatingWebhookConfiguration - ValidatingWebhookConfigurationList - Webhook - WebhookClientConfig -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -58,141 +44,593 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } +func (*MutatingWebhook) ProtoMessage() {} +func (*MutatingWebhook) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{0} +} +func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhook.Merge(m, src) +} +func (m *MutatingWebhook) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhook) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhook.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo + func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } func (*MutatingWebhookConfiguration) ProtoMessage() {} func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_abeea74cbc46f55a, []int{1} +} +func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhookConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhookConfiguration.Merge(m, src) +} +func (m *MutatingWebhookConfiguration) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhookConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhookConfiguration.DiscardUnknown(m) } +var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo + func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } func (*MutatingWebhookConfigurationList) ProtoMessage() {} func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptor_abeea74cbc46f55a, []int{2} +} +func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhookConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhookConfigurationList.Merge(m, src) +} +func (m *MutatingWebhookConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhookConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo + +func (m *Rule) Reset() { *m = Rule{} } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{3} +} +func (m *Rule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Rule) XXX_Merge(src proto.Message) { + xxx_messageInfo_Rule.Merge(m, src) +} +func (m *Rule) XXX_Size() int { + return m.Size() +} +func (m *Rule) XXX_DiscardUnknown() { + xxx_messageInfo_Rule.DiscardUnknown(m) +} + +var xxx_messageInfo_Rule proto.InternalMessageInfo + +func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } +func (*RuleWithOperations) ProtoMessage() {} +func (*RuleWithOperations) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{4} +} +func (m *RuleWithOperations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuleWithOperations) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuleWithOperations.Merge(m, src) +} +func (m *RuleWithOperations) XXX_Size() int { + return m.Size() +} +func (m *RuleWithOperations) XXX_DiscardUnknown() { + xxx_messageInfo_RuleWithOperations.DiscardUnknown(m) +} + +var xxx_messageInfo_RuleWithOperations proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{5} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) } -func (m *Rule) Reset() { *m = Rule{} } -func (*Rule) ProtoMessage() {} -func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo -func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } -func (*RuleWithOperations) ProtoMessage() {} -func (*RuleWithOperations) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } +func (*ValidatingWebhook) ProtoMessage() {} +func (*ValidatingWebhook) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{6} +} +func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhook.Merge(m, src) +} +func (m *ValidatingWebhook) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhook) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhook.DiscardUnknown(m) +} -func (m *ServiceReference) Reset() { *m = ServiceReference{} } -func (*ServiceReference) ProtoMessage() {} -func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } func (*ValidatingWebhookConfiguration) ProtoMessage() {} func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptor_abeea74cbc46f55a, []int{7} +} +func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhookConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhookConfiguration.Merge(m, src) +} +func (m *ValidatingWebhookConfiguration) XXX_Size() int { + return m.Size() } +func (m *ValidatingWebhookConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhookConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } func (*ValidatingWebhookConfigurationList) ProtoMessage() {} func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptor_abeea74cbc46f55a, []int{8} +} +func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhookConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhookConfigurationList.Merge(m, src) +} +func (m *ValidatingWebhookConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhookConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhookConfigurationList.DiscardUnknown(m) } -func (m *Webhook) Reset() { *m = Webhook{} } -func (*Webhook) ProtoMessage() {} -func (*Webhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{9} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} -func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } -func (*WebhookClientConfig) ProtoMessage() {} -func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo func init() { + proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook") proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration") proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList") proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1beta1.Rule") proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.RuleWithOperations") proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1beta1.ServiceReference") + proto.RegisterType((*ValidatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhook") proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration") proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationList") - proto.RegisterType((*Webhook)(nil), "k8s.io.api.admissionregistration.v1beta1.Webhook") proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1beta1.WebhookClientConfig") } -func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptor_abeea74cbc46f55a) +} + +var fileDescriptor_abeea74cbc46f55a = []byte{ + // 1114 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0x4d, 0x6f, 0x23, 0x45, + 0x13, 0xce, 0xc4, 0xf6, 0xda, 0x6e, 0x27, 0xbb, 0x9b, 0x7e, 0x5f, 0x76, 0x4d, 0x58, 0x79, 0x2c, + 0x1f, 0x90, 0x25, 0xd8, 0x99, 0x4d, 0x40, 0x08, 0x16, 0x10, 0x8a, 0x03, 0x0b, 0x91, 0x92, 0xdd, + 0xd0, 0xd9, 0x0f, 0x89, 0x0f, 0x69, 0xdb, 0xe3, 0xb2, 0xdd, 0xd8, 0x9e, 0x1e, 0x4d, 0xf7, 0x38, + 0xe4, 0xc6, 0x4f, 0xe0, 0x2f, 0x70, 0xe2, 0x57, 0x70, 0xe0, 0x16, 0x6e, 0x7b, 0xdc, 0x0b, 0x23, + 0x32, 0x9c, 0x38, 0x70, 0xe0, 0x9a, 0x13, 0x9a, 0x9e, 0xf6, 0xf8, 0x2b, 0x59, 0x4c, 0x90, 0xf6, + 0x94, 0xdb, 0xf4, 0x53, 0x5d, 0x4f, 0x75, 0xd5, 0x54, 0xd5, 0x83, 0x3e, 0xef, 0xbd, 0x2b, 0x2c, + 0xc6, 0xed, 0x5e, 0xd0, 0x04, 0xdf, 0x05, 0x09, 0xc2, 0x1e, 0x82, 0xdb, 0xe2, 0xbe, 0xad, 0x0d, + 0xd4, 0x63, 0x36, 0x6d, 0x0d, 0x98, 0x10, 0x8c, 0xbb, 0x3e, 0x74, 0x98, 0x90, 0x3e, 0x95, 0x8c, + 0xbb, 0xf6, 0x70, 0xa3, 0x09, 0x92, 0x6e, 0xd8, 0x1d, 0x70, 0xc1, 0xa7, 0x12, 0x5a, 0x96, 0xe7, + 0x73, 0xc9, 0x71, 0x3d, 0xf1, 0xb4, 0xa8, 0xc7, 0xac, 0x33, 0x3d, 0x2d, 0xed, 0xb9, 0x7e, 0xbb, + 0xc3, 0x64, 0x37, 0x68, 0x5a, 0x0e, 0x1f, 0xd8, 0x1d, 0xde, 0xe1, 0xb6, 0x22, 0x68, 0x06, 0x6d, + 0x75, 0x52, 0x07, 0xf5, 0x95, 0x10, 0xaf, 0xbf, 0x3d, 0x7e, 0xd2, 0x80, 0x3a, 0x5d, 0xe6, 0x82, + 0x7f, 0x64, 0x7b, 0xbd, 0x4e, 0x0c, 0x08, 0x7b, 0x00, 0x92, 0xda, 0xc3, 0xb9, 0xe7, 0xac, 0xdb, + 0xe7, 0x79, 0xf9, 0x81, 0x2b, 0xd9, 0x00, 0xe6, 0x1c, 0xde, 0xf9, 0x27, 0x07, 0xe1, 0x74, 0x61, + 0x40, 0x67, 0xfd, 0x6a, 0xbf, 0xe4, 0xd1, 0xb5, 0xbd, 0x40, 0x52, 0xc9, 0xdc, 0xce, 0x13, 0x68, + 0x76, 0x39, 0xef, 0xe1, 0x2a, 0xca, 0xba, 0x74, 0x00, 0x65, 0xa3, 0x6a, 0xd4, 0x8b, 0x8d, 0x95, + 0xe3, 0xd0, 0x5c, 0x8a, 0x42, 0x33, 0x7b, 0x9f, 0x0e, 0x80, 0x28, 0x0b, 0x3e, 0x44, 0x2b, 0x4e, + 0x9f, 0x81, 0x2b, 0xb7, 0xb9, 0xdb, 0x66, 0x9d, 0xf2, 0x72, 0xd5, 0xa8, 0x97, 0x36, 0x3f, 0xb4, + 0x16, 0x2d, 0xa2, 0xa5, 0x43, 0x6d, 0x4f, 0x90, 0x34, 0xfe, 0xaf, 0x03, 0xad, 0x4c, 0xa2, 0x64, + 0x2a, 0x10, 0xa6, 0x28, 0xe7, 0x07, 0x7d, 0x10, 0xe5, 0x4c, 0x35, 0x53, 0x2f, 0x6d, 0x7e, 0xb0, + 0x78, 0x44, 0x12, 0xf4, 0xe1, 0x09, 0x93, 0xdd, 0x07, 0x1e, 0x24, 0x16, 0xd1, 0x58, 0xd5, 0x01, + 0x73, 0xb1, 0x4d, 0x90, 0x84, 0x19, 0xef, 0xa2, 0xd5, 0x36, 0x65, 0xfd, 0xc0, 0x87, 0x7d, 0xde, + 0x67, 0xce, 0x51, 0x39, 0xab, 0xca, 0xf0, 0x7a, 0x14, 0x9a, 0xab, 0xf7, 0x26, 0x0d, 0xa7, 0xa1, + 0xb9, 0x36, 0x05, 0x3c, 0x3c, 0xf2, 0x80, 0x4c, 0x3b, 0xe3, 0x8f, 0x51, 0x69, 0x40, 0xa5, 0xd3, + 0xd5, 0x5c, 0x45, 0xc5, 0x55, 0x8b, 0x42, 0xb3, 0xb4, 0x37, 0x86, 0x4f, 0x43, 0xf3, 0xda, 0xc4, + 0x51, 0xf1, 0x4c, 0xba, 0xe1, 0x6f, 0xd1, 0x5a, 0x5c, 0x77, 0xe1, 0x51, 0x07, 0x0e, 0xa0, 0x0f, + 0x8e, 0xe4, 0x7e, 0x39, 0xa7, 0x8a, 0xfe, 0xd6, 0x44, 0x09, 0xd2, 0x3f, 0x6f, 0x79, 0xbd, 0x4e, + 0x0c, 0x08, 0x2b, 0x6e, 0x30, 0x6b, 0xb8, 0x61, 0xed, 0xd2, 0x26, 0xf4, 0x47, 0xae, 0x8d, 0x57, + 0xa2, 0xd0, 0x5c, 0xbb, 0x3f, 0xcb, 0x48, 0xe6, 0x83, 0x60, 0x8e, 0xae, 0xf2, 0xe6, 0x37, 0xe0, + 0xc8, 0x34, 0x6c, 0xe9, 0xe2, 0x61, 0x71, 0x14, 0x9a, 0x57, 0x1f, 0x4c, 0xd1, 0x91, 0x19, 0xfa, + 0xb8, 0x60, 0x82, 0xb5, 0xe0, 0x93, 0x76, 0x1b, 0x1c, 0x29, 0xca, 0x57, 0xc6, 0x05, 0x3b, 0x18, + 0xc3, 0x71, 0xc1, 0xc6, 0xc7, 0xed, 0x3e, 0x15, 0x82, 0x4c, 0xba, 0xe1, 0xbb, 0xe8, 0x6a, 0xdc, + 0xf5, 0x3c, 0x90, 0x07, 0xe0, 0x70, 0xb7, 0x25, 0xca, 0xf9, 0xaa, 0x51, 0xcf, 0x25, 0x2f, 0x78, + 0x38, 0x65, 0x21, 0x33, 0x37, 0xf1, 0x23, 0x74, 0x33, 0x6d, 0x25, 0x02, 0x43, 0x06, 0x87, 0x8f, + 0xc1, 0x8f, 0x0f, 0xa2, 0x5c, 0xa8, 0x66, 0xea, 0xc5, 0xc6, 0x6b, 0x51, 0x68, 0xde, 0xdc, 0x3a, + 0xfb, 0x0a, 0x39, 0xcf, 0x17, 0x3f, 0x45, 0xd8, 0x07, 0xe6, 0x0e, 0xb9, 0xa3, 0xda, 0x4f, 0x37, + 0x04, 0x52, 0xf9, 0xdd, 0x89, 0x42, 0x13, 0x93, 0x39, 0xeb, 0x69, 0x68, 0xde, 0x98, 0x47, 0x55, + 0x7b, 0x9c, 0xc1, 0x55, 0xfb, 0xd5, 0x40, 0xb7, 0x66, 0x66, 0x39, 0x19, 0x9b, 0x20, 0xe9, 0x78, + 0xfc, 0x14, 0x15, 0xe2, 0x1f, 0xd3, 0xa2, 0x92, 0xaa, 0xe1, 0x2e, 0x6d, 0xde, 0x59, 0xec, 0x37, + 0x26, 0xff, 0x6c, 0x0f, 0x24, 0x6d, 0x60, 0x3d, 0x34, 0x68, 0x8c, 0x91, 0x94, 0x15, 0x7f, 0x89, + 0x0a, 0x3a, 0xb2, 0x28, 0x2f, 0xab, 0x11, 0x7d, 0x6f, 0xf1, 0x11, 0x9d, 0x79, 0x7b, 0x23, 0x1b, + 0x87, 0x22, 0x85, 0x43, 0x4d, 0x58, 0xfb, 0xd3, 0x40, 0xd5, 0x17, 0xe5, 0xb7, 0xcb, 0x84, 0xc4, + 0x5f, 0xcd, 0xe5, 0x68, 0x2d, 0xd8, 0xaa, 0x4c, 0x24, 0x19, 0x5e, 0xd7, 0x19, 0x16, 0x46, 0xc8, + 0x44, 0x7e, 0x3d, 0x94, 0x63, 0x12, 0x06, 0xa3, 0xe4, 0xee, 0x5d, 0x38, 0xb9, 0xa9, 0x87, 0x8f, + 0x37, 0xd1, 0x4e, 0x4c, 0x4e, 0x92, 0x18, 0xb5, 0x9f, 0x0d, 0x94, 0x8d, 0x57, 0x13, 0x7e, 0x03, + 0x15, 0xa9, 0xc7, 0x3e, 0xf5, 0x79, 0xe0, 0x89, 0xb2, 0xa1, 0x7a, 0x70, 0x35, 0x0a, 0xcd, 0xe2, + 0xd6, 0xfe, 0x4e, 0x02, 0x92, 0xb1, 0x1d, 0x6f, 0xa0, 0x12, 0xf5, 0x58, 0xda, 0xb2, 0xcb, 0xea, + 0xfa, 0xb5, 0x78, 0x80, 0xb6, 0xf6, 0x77, 0xd2, 0x36, 0x9d, 0xbc, 0x13, 0xf3, 0xfb, 0x20, 0x78, + 0xe0, 0x3b, 0x7a, 0xb3, 0x6a, 0x7e, 0x32, 0x02, 0xc9, 0xd8, 0x8e, 0xdf, 0x44, 0x39, 0xe1, 0x70, + 0x0f, 0xf4, 0x5e, 0xbc, 0x11, 0x3f, 0xfb, 0x20, 0x06, 0x4e, 0x43, 0xb3, 0xa8, 0x3e, 0x54, 0x83, + 0x26, 0x97, 0x6a, 0x3f, 0x1a, 0x08, 0xcf, 0xaf, 0x5e, 0xfc, 0x11, 0x42, 0x3c, 0x3d, 0xe9, 0x94, + 0x4c, 0xd5, 0x55, 0x29, 0x7a, 0x1a, 0x9a, 0xab, 0xe9, 0x49, 0x51, 0x4e, 0xb8, 0xe0, 0x7d, 0x94, + 0x8d, 0xd7, 0xb5, 0x56, 0x1e, 0xeb, 0xdf, 0xe9, 0xc0, 0x58, 0xd3, 0xe2, 0x13, 0x51, 0x4c, 0xb5, + 0x1f, 0x0c, 0x74, 0xfd, 0x00, 0xfc, 0x21, 0x73, 0x80, 0x40, 0x1b, 0x7c, 0x70, 0x1d, 0xc0, 0x36, + 0x2a, 0xa6, 0x3b, 0x51, 0xeb, 0xe1, 0x9a, 0xf6, 0x2d, 0xa6, 0xfb, 0x93, 0x8c, 0xef, 0xa4, 0xda, + 0xb9, 0x7c, 0xae, 0x76, 0xde, 0x42, 0x59, 0x8f, 0xca, 0x6e, 0x39, 0xa3, 0x6e, 0x14, 0x62, 0xeb, + 0x3e, 0x95, 0x5d, 0xa2, 0x50, 0x65, 0xe5, 0xbe, 0x54, 0xc5, 0xcd, 0x69, 0x2b, 0xf7, 0x25, 0x51, + 0x68, 0xed, 0x8f, 0x2b, 0x68, 0xed, 0x31, 0xed, 0xb3, 0xd6, 0xa5, 0x5e, 0x5f, 0xea, 0xf5, 0x82, + 0x7a, 0x8d, 0x2e, 0xf5, 0xfa, 0x22, 0x7a, 0x5d, 0x3b, 0x31, 0x50, 0x65, 0x6e, 0xd6, 0x5e, 0xb6, + 0x9e, 0x7e, 0x3d, 0xa7, 0xa7, 0xef, 0x2f, 0x3e, 0x42, 0x73, 0xaf, 0x9f, 0x53, 0xd4, 0xbf, 0x0c, + 0x54, 0x7b, 0x71, 0x8e, 0x2f, 0x41, 0x53, 0x07, 0xd3, 0x9a, 0xfa, 0xd9, 0x7f, 0x48, 0x70, 0x11, + 0x55, 0xfd, 0xc9, 0x40, 0xff, 0x3b, 0x63, 0x9d, 0xe1, 0x57, 0x51, 0x26, 0xf0, 0xfb, 0x7a, 0x2d, + 0xe7, 0xa3, 0xd0, 0xcc, 0x3c, 0x22, 0xbb, 0x24, 0xc6, 0x30, 0x45, 0x79, 0x91, 0x28, 0x83, 0x4e, + 0xff, 0xee, 0xe2, 0x6f, 0x9c, 0x95, 0x94, 0x46, 0x29, 0x0a, 0xcd, 0xfc, 0x08, 0x1d, 0xf1, 0xe2, + 0x3a, 0x2a, 0x38, 0xb4, 0x11, 0xb8, 0x2d, 0xad, 0x69, 0x2b, 0x8d, 0x95, 0xb8, 0x5c, 0xdb, 0x5b, + 0x09, 0x46, 0x52, 0x6b, 0xe3, 0xf6, 0xf1, 0x49, 0x65, 0xe9, 0xd9, 0x49, 0x65, 0xe9, 0xf9, 0x49, + 0x65, 0xe9, 0xbb, 0xa8, 0x62, 0x1c, 0x47, 0x15, 0xe3, 0x59, 0x54, 0x31, 0x9e, 0x47, 0x15, 0xe3, + 0xb7, 0xa8, 0x62, 0x7c, 0xff, 0x7b, 0x65, 0xe9, 0x8b, 0xbc, 0x8e, 0xff, 0x77, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xae, 0x27, 0x2b, 0xcb, 0x2c, 0x0f, 0x00, 0x00, +} + +func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.ReinvocationPolicy != nil { + i -= len(*m.ReinvocationPolicy) + copy(dAtA[i:], *m.ReinvocationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy))) + i-- + dAtA[i] = 0x52 + } + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a } - i += n1 - if len(m.Webhooks) > 0 { - for _, msg := range m.Webhooks { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 + } + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { +func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + return len(dAtA) - i, nil +} + +func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { - return 0, err + return nil, err } - i += n2 + return dAtA[:n], nil +} + +func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Rule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -200,68 +638,56 @@ func (m *Rule) Marshal() (dAtA []byte, err error) { } func (m *Rule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Rule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if m.Scope != nil { + i -= len(*m.Scope) + copy(dAtA[i:], *m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope))) + i-- + dAtA[i] = 0x22 + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a } } if len(m.APIVersions) > 0 { - for _, s := range m.APIVersions { + for iNdEx := len(m.APIVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIVersions[iNdEx]) + copy(dAtA[i:], m.APIVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersions[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0xa } } - if m.Scope != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope))) - i += copy(dAtA[i:], *m.Scope) - } - return i, nil + return len(dAtA) - i, nil } func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -269,40 +695,41 @@ func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { } func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Operations) > 0 { - for _, s := range m.Operations { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.Rule.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Rule.Size())) - n3, err := m.Rule.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Operations) > 0 { + for iNdEx := len(m.Operations) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Operations[iNdEx]) + copy(dAtA[i:], m.Operations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operations[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - i += n3 - return i, nil + return len(dAtA) - i, nil } func (m *ServiceReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -310,191 +737,249 @@ func (m *ServiceReference) Marshal() (dAtA []byte, err error) { } func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } if m.Path != nil { - dAtA[i] = 0x1a - i++ + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) - i += copy(dAtA[i:], *m.Path) + i-- + dAtA[i] = 0x1a } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { +func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 } - i += n4 - if len(m.Webhooks) > 0 { - for _, msg := range m.Webhooks { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a + } + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 + } + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { +func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Webhook) Marshal() (dAtA []byte, err error) { +func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Webhook) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) - n6, err := m.ClientConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if m.FailurePolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i += copy(dAtA[i:], *m.FailurePolicy) - } - if m.NamespaceSelector != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n7, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n7 - } - if m.SideEffects != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) - i += copy(dAtA[i:], *m.SideEffects) - } - if m.TimeoutSeconds != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - } - if len(m.AdmissionReviewVersions) > 0 { - for _, s := range m.AdmissionReviewVersions { - dAtA[i] = 0x42 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -502,45 +987,111 @@ func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { } func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Service != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) - n8, err := m.Service.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a } if m.CABundle != nil { - dAtA[i] = 0x12 - i++ + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i += copy(dAtA[i:], m.CABundle) + i-- + dAtA[i] = 0x12 } - if m.URL != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i += copy(dAtA[i:], *m.URL) + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base +} +func (m *MutatingWebhook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReinvocationPolicy != nil { + l = len(*m.ReinvocationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } + func (m *MutatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -555,6 +1106,9 @@ func (m *MutatingWebhookConfiguration) Size() (n int) { } func (m *MutatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -569,6 +1123,9 @@ func (m *MutatingWebhookConfigurationList) Size() (n int) { } func (m *Rule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.APIGroups) > 0 { @@ -597,6 +1154,9 @@ func (m *Rule) Size() (n int) { } func (m *RuleWithOperations) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Operations) > 0 { @@ -611,6 +1171,9 @@ func (m *RuleWithOperations) Size() (n int) { } func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -621,38 +1184,16 @@ func (m *ServiceReference) Size() (n int) { l = len(*m.Path) n += 1 + l + sovGenerated(uint64(l)) } - return n -} - -func (m *ValidatingWebhookConfiguration) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Webhooks) > 0 { - for _, e := range m.Webhooks { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) } return n } -func (m *ValidatingWebhookConfigurationList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *ValidatingWebhook) Size() (n int) { + if m == nil { + return 0 } - return n -} - -func (m *Webhook) Size() (n int) { var l int _ = l l = len(m.Name) @@ -686,10 +1227,55 @@ func (m *Webhook) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ValidatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Service != nil { @@ -708,36 +1294,64 @@ func (m *WebhookClientConfig) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *MutatingWebhookConfiguration) String() string { +func (this *MutatingWebhook) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&MutatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&MutatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s } -func (this *MutatingWebhookConfigurationList) String() string { +func (this *MutatingWebhookConfiguration) String() string { if this == nil { return "nil" } + repeatedStringForWebhooks := "[]MutatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&MutatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]MutatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -774,45 +1388,63 @@ func (this *ServiceReference) String() string { `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, `}`, }, "") return s } -func (this *ValidatingWebhookConfiguration) String() string { +func (this *ValidatingWebhook) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&ValidatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s } -func (this *ValidatingWebhookConfigurationList) String() string { +func (this *ValidatingWebhookConfiguration) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, + repeatedStringForWebhooks := "[]ValidatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, `}`, }, "") return s } -func (this *Webhook) String() string { +func (this *ValidatingWebhookConfigurationList) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&Webhook{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + repeatedStringForItems := "[]ValidatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -822,7 +1454,7 @@ func (this *WebhookClientConfig) String() string { return "nil" } s := strings.Join([]string{`&WebhookClientConfig{`, - `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "ServiceReference", "ServiceReference", 1) + `,`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, `URL:` + valueToStringGenerated(this.URL) + `,`, `}`, @@ -837,7 +1469,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { +func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -852,7 +1484,7 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -860,15 +1492,47 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + return fmt.Errorf("proto: MutatingWebhook: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MutatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -880,7 +1544,7 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -889,16 +1553,19 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -910,7 +1577,7 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -919,69 +1586,144 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Webhooks = append(m.Webhooks, Webhook{}) - if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) } - if skippy < 0 { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) } - if iNdEx >= l { + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) } - var msglen int + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -991,25 +1733,636 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ReinvocationPolicyType(dAtA[iNdEx:postIndex]) + m.ReinvocationPolicy = &s + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, MutatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, MutatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Rule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ScopeType(dAtA[iNdEx:postIndex]) + m.Scope = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1021,7 +2374,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1030,11 +2383,13 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, MutatingWebhookConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1047,6 +2402,9 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1059,7 +2417,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } return nil } -func (m *Rule) Unmarshal(dAtA []byte) error { +func (m *ServiceReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1074,7 +2432,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1082,15 +2440,15 @@ func (m *Rule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Rule: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1102,7 +2460,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1112,14 +2470,17 @@ func (m *Rule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1131,7 +2492,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1141,14 +2502,17 @@ func (m *Rule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1160,7 +2524,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1170,16 +2534,20 @@ func (m *Rule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s iNdEx = postIndex case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1189,22 +2557,12 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := ScopeType(dAtA[iNdEx:postIndex]) - m.Scope = &s - iNdEx = postIndex + m.Port = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1214,6 +2572,9 @@ func (m *Rule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1226,7 +2587,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } return nil } -func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { +func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1241,7 +2602,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1249,15 +2610,15 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingWebhook: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1269,7 +2630,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1279,14 +2640,17 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1298,7 +2662,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1307,66 +2671,53 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1378,7 +2729,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1388,16 +2739,20 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(dAtA[iNdEx:postIndex]) + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1407,24 +2762,31 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1436,7 +2798,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1446,67 +2808,72 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Path = &s + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1516,25 +2883,28 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s iNdEx = postIndex - case 2: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1546,7 +2916,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1555,11 +2925,16 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Webhooks = append(m.Webhooks, Webhook{}) - if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1572,6 +2947,9 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1584,7 +2962,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { +func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1599,7 +2977,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1607,15 +2985,15 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1627,7 +3005,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1636,16 +3014,19 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1657,7 +3038,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1666,11 +3047,14 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ValidatingWebhookConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Webhooks = append(m.Webhooks, ValidatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1683,6 +3067,9 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1695,7 +3082,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } return nil } -func (m *Webhook) Unmarshal(dAtA []byte) error { +func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1710,7 +3097,7 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1718,44 +3105,15 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Webhook: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Webhook: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1767,7 +3125,7 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1776,77 +3134,19 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, RuleWithOperations{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := FailurePolicyType(dAtA[iNdEx:postIndex]) - m.FailurePolicy = &s - iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1858,7 +3158,7 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1867,94 +3167,16 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - s := SideEffectClass(dAtA[iNdEx:postIndex]) - m.SideEffects = &s - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TimeoutSeconds = &v - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + m.Items = append(m.Items, ValidatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -1965,6 +3187,9 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1992,7 +3217,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2020,7 +3245,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2029,6 +3254,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2053,7 +3281,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2062,6 +3290,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2084,7 +3315,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2094,6 +3325,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2109,6 +3343,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2175,10 +3412,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2207,6 +3447,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2225,73 +3468,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 989 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x55, 0x4f, 0x6f, 0xe3, 0x44, - 0x14, 0xaf, 0x37, 0x09, 0x49, 0x26, 0xed, 0xee, 0x76, 0xf8, 0xb3, 0xa1, 0xac, 0xec, 0x28, 0x07, - 0x14, 0x09, 0xd6, 0xa6, 0x05, 0x21, 0xb4, 0x02, 0xa1, 0xba, 0xb0, 0x50, 0xa9, 0xbb, 0x5b, 0x26, - 0xfb, 0x47, 0x42, 0x1c, 0x98, 0x38, 0x2f, 0xc9, 0x10, 0xc7, 0x63, 0x79, 0xc6, 0x29, 0xbd, 0x21, - 0xf1, 0x05, 0xf8, 0x16, 0xf0, 0x25, 0x38, 0x70, 0xeb, 0x71, 0x2f, 0x88, 0x3d, 0x59, 0xd4, 0x9c, - 0x39, 0x70, 0xed, 0x09, 0x8d, 0xed, 0xd8, 0x49, 0xd3, 0x76, 0xb3, 0x17, 0x0e, 0xdc, 0x3c, 0xbf, - 0xf7, 0x7e, 0xef, 0xbd, 0xdf, 0xcc, 0x7b, 0xcf, 0xe8, 0xab, 0xf1, 0x47, 0xc2, 0x64, 0xdc, 0x1a, - 0x87, 0x3d, 0x08, 0x3c, 0x90, 0x20, 0xac, 0x29, 0x78, 0x7d, 0x1e, 0x58, 0x99, 0x81, 0xfa, 0xcc, - 0xa2, 0xfd, 0x09, 0x13, 0x82, 0x71, 0x2f, 0x80, 0x21, 0x13, 0x32, 0xa0, 0x92, 0x71, 0xcf, 0x9a, - 0x6e, 0xf7, 0x40, 0xd2, 0x6d, 0x6b, 0x08, 0x1e, 0x04, 0x54, 0x42, 0xdf, 0xf4, 0x03, 0x2e, 0x39, - 0xee, 0xa4, 0x4c, 0x93, 0xfa, 0xcc, 0xbc, 0x90, 0x69, 0x66, 0xcc, 0xad, 0x3b, 0x43, 0x26, 0x47, - 0x61, 0xcf, 0x74, 0xf8, 0xc4, 0x1a, 0xf2, 0x21, 0xb7, 0x92, 0x00, 0xbd, 0x70, 0x90, 0x9c, 0x92, - 0x43, 0xf2, 0x95, 0x06, 0xde, 0xfa, 0xa0, 0x28, 0x69, 0x42, 0x9d, 0x11, 0xf3, 0x20, 0x38, 0xb6, - 0xfc, 0xf1, 0x50, 0x01, 0xc2, 0x9a, 0x80, 0xa4, 0xd6, 0x74, 0xa9, 0x9c, 0x2d, 0xeb, 0x32, 0x56, - 0x10, 0x7a, 0x92, 0x4d, 0x60, 0x89, 0xf0, 0xe1, 0x8b, 0x08, 0xc2, 0x19, 0xc1, 0x84, 0x9e, 0xe7, - 0xb5, 0x7f, 0xd7, 0xd0, 0xed, 0xfb, 0xa1, 0xa4, 0x92, 0x79, 0xc3, 0xa7, 0xd0, 0x1b, 0x71, 0x3e, - 0xde, 0xe3, 0xde, 0x80, 0x0d, 0xc3, 0x54, 0x36, 0xfe, 0x16, 0xd5, 0x54, 0x91, 0x7d, 0x2a, 0x69, - 0x53, 0x6b, 0x69, 0x9d, 0xc6, 0xce, 0x7b, 0x66, 0x71, 0x57, 0x79, 0x2e, 0xd3, 0x1f, 0x0f, 0x15, - 0x20, 0x4c, 0xe5, 0x6d, 0x4e, 0xb7, 0xcd, 0x87, 0xbd, 0xef, 0xc0, 0x91, 0xf7, 0x41, 0x52, 0x1b, - 0x9f, 0x44, 0xc6, 0x5a, 0x1c, 0x19, 0xa8, 0xc0, 0x48, 0x1e, 0x15, 0x77, 0x51, 0x2d, 0xcb, 0x2c, - 0x9a, 0xd7, 0x5a, 0xa5, 0x4e, 0x63, 0x67, 0xdb, 0x5c, 0xf5, 0x35, 0xcc, 0x8c, 0x69, 0x97, 0x55, - 0x0a, 0x52, 0x3b, 0xca, 0x02, 0xb5, 0xff, 0xd6, 0x50, 0xeb, 0x2a, 0x5d, 0x07, 0x4c, 0x48, 0xfc, - 0xcd, 0x92, 0x36, 0x73, 0x35, 0x6d, 0x8a, 0x9d, 0x28, 0xbb, 0x99, 0x29, 0xab, 0xcd, 0x90, 0x39, - 0x5d, 0x63, 0x54, 0x61, 0x12, 0x26, 0x33, 0x51, 0xf7, 0x56, 0x17, 0x75, 0x55, 0xe1, 0xf6, 0x46, - 0x96, 0xb2, 0xb2, 0xaf, 0x82, 0x93, 0x34, 0x47, 0xfb, 0x37, 0x0d, 0x95, 0x49, 0xe8, 0x02, 0x7e, - 0x07, 0xd5, 0xa9, 0xcf, 0xbe, 0x08, 0x78, 0xe8, 0x8b, 0xa6, 0xd6, 0x2a, 0x75, 0xea, 0xf6, 0x46, - 0x1c, 0x19, 0xf5, 0xdd, 0xc3, 0xfd, 0x14, 0x24, 0x85, 0x1d, 0x6f, 0xa3, 0x06, 0xf5, 0xd9, 0x13, - 0x08, 0x54, 0x29, 0x69, 0xa1, 0x75, 0xfb, 0x46, 0x1c, 0x19, 0x8d, 0xdd, 0xc3, 0xfd, 0x19, 0x4c, - 0xe6, 0x7d, 0x54, 0xfc, 0x00, 0x04, 0x0f, 0x03, 0x07, 0x44, 0xb3, 0x54, 0xc4, 0x27, 0x33, 0x90, - 0x14, 0x76, 0xfc, 0x2e, 0xaa, 0x08, 0x87, 0xfb, 0xd0, 0x2c, 0xb7, 0xb4, 0x4e, 0xdd, 0x7e, 0x43, - 0x95, 0xdd, 0x55, 0xc0, 0x59, 0x64, 0xd4, 0x93, 0x8f, 0x47, 0xc7, 0x3e, 0x90, 0xd4, 0xa9, 0xfd, - 0xb3, 0x86, 0xb0, 0xd2, 0xf0, 0x94, 0xc9, 0xd1, 0x43, 0x1f, 0x52, 0xbd, 0x02, 0x7f, 0x8a, 0x10, - 0xcf, 0x4f, 0x99, 0x24, 0x23, 0xe9, 0xa6, 0x1c, 0x3d, 0x8b, 0x8c, 0x8d, 0xfc, 0x94, 0x84, 0x9c, - 0xa3, 0xe0, 0x43, 0x54, 0x0e, 0x42, 0x17, 0x9a, 0xd7, 0x96, 0x9e, 0xf8, 0x05, 0xef, 0xa0, 0x8a, - 0xb1, 0xd7, 0xb3, 0xfb, 0x4e, 0xae, 0x97, 0x24, 0x91, 0xda, 0x3f, 0x6a, 0xe8, 0x66, 0x17, 0x82, - 0x29, 0x73, 0x80, 0xc0, 0x00, 0x02, 0xf0, 0x1c, 0xc0, 0x16, 0xaa, 0x7b, 0x74, 0x02, 0xc2, 0xa7, - 0x0e, 0x24, 0xed, 0x54, 0xb7, 0x37, 0x33, 0x6e, 0xfd, 0xc1, 0xcc, 0x40, 0x0a, 0x1f, 0xdc, 0x42, - 0x65, 0x75, 0x48, 0xea, 0xaa, 0x17, 0x79, 0x94, 0x2f, 0x49, 0x2c, 0xf8, 0x36, 0x2a, 0xfb, 0x54, - 0x8e, 0x9a, 0xa5, 0xc4, 0xa3, 0xa6, 0xac, 0x87, 0x54, 0x8e, 0x48, 0x82, 0xb6, 0xff, 0xd0, 0x90, - 0xfe, 0x84, 0xba, 0xac, 0xff, 0xbf, 0x9b, 0xde, 0x7f, 0x34, 0xd4, 0xbe, 0x5a, 0xd9, 0x7f, 0x30, - 0xbf, 0x93, 0xc5, 0xf9, 0xfd, 0x72, 0x75, 0x59, 0x57, 0x97, 0x7e, 0xc9, 0x04, 0xff, 0x52, 0x41, - 0xd5, 0xcc, 0x3d, 0xef, 0x0c, 0xed, 0xd2, 0xce, 0x38, 0x42, 0xeb, 0x8e, 0xcb, 0xc0, 0x93, 0x69, - 0xe8, 0xac, 0xb7, 0x3f, 0x79, 0xe9, 0xab, 0xdf, 0x9b, 0x0b, 0x62, 0xbf, 0x96, 0x25, 0x5a, 0x9f, - 0x47, 0xc9, 0x42, 0x22, 0x4c, 0x51, 0x45, 0x8d, 0x40, 0x3a, 0xfb, 0x8d, 0x9d, 0x8f, 0x5f, 0x6e, - 0x9a, 0x16, 0x47, 0xbb, 0xb8, 0x09, 0x65, 0x13, 0x24, 0x8d, 0x8c, 0x0f, 0xd0, 0xc6, 0x80, 0x32, - 0x37, 0x0c, 0xe0, 0x90, 0xbb, 0xcc, 0x39, 0xce, 0xb6, 0xc7, 0xdb, 0x71, 0x64, 0x6c, 0xdc, 0x9b, - 0x37, 0x9c, 0x45, 0xc6, 0xe6, 0x02, 0x90, 0x8c, 0xfe, 0x22, 0x19, 0x7f, 0x8f, 0x36, 0xf3, 0x91, - 0xeb, 0x82, 0x0b, 0x8e, 0xe4, 0x41, 0xb3, 0x92, 0x5c, 0xd7, 0xfb, 0x2b, 0x76, 0x0b, 0xed, 0x81, - 0x3b, 0xa3, 0xda, 0xaf, 0xc7, 0x91, 0xb1, 0xf9, 0xe0, 0x7c, 0x44, 0xb2, 0x9c, 0x04, 0x7f, 0x86, - 0x1a, 0x82, 0xf5, 0xe1, 0xf3, 0xc1, 0x00, 0x1c, 0x29, 0x9a, 0xaf, 0x24, 0x2a, 0xda, 0x6a, 0xbb, - 0x76, 0x0b, 0xf8, 0x2c, 0x32, 0x6e, 0x14, 0xc7, 0x3d, 0x97, 0x0a, 0x41, 0xe6, 0x69, 0xf8, 0x2e, - 0xba, 0xae, 0x7e, 0xe0, 0x3c, 0x94, 0x5d, 0x70, 0xb8, 0xd7, 0x17, 0xcd, 0x6a, 0x4b, 0xeb, 0x54, - 0x6c, 0x1c, 0x47, 0xc6, 0xf5, 0x47, 0x0b, 0x16, 0x72, 0xce, 0x13, 0x3f, 0x46, 0xb7, 0xf2, 0x37, - 0x21, 0x30, 0x65, 0x70, 0x94, 0xef, 0xfa, 0x5a, 0xb2, 0x47, 0xdf, 0x8a, 0x23, 0xe3, 0xd6, 0xee, - 0xc5, 0x2e, 0xe4, 0x32, 0x6e, 0xfb, 0x57, 0x0d, 0xbd, 0x7a, 0x41, 0xff, 0x60, 0x8a, 0xaa, 0x22, - 0xdd, 0x8a, 0xd9, 0x38, 0xde, 0x5d, 0xbd, 0x3b, 0xce, 0xaf, 0x53, 0xbb, 0x11, 0x47, 0x46, 0x75, - 0x86, 0xce, 0xe2, 0xe2, 0x0e, 0xaa, 0x39, 0xd4, 0x0e, 0xbd, 0x7e, 0xb6, 0xcf, 0xd7, 0xed, 0x75, - 0x35, 0xbe, 0x7b, 0xbb, 0x29, 0x46, 0x72, 0x2b, 0x7e, 0x13, 0x95, 0xc2, 0xc0, 0xcd, 0x56, 0x67, - 0x35, 0x8e, 0x8c, 0xd2, 0x63, 0x72, 0x40, 0x14, 0x66, 0xdf, 0x39, 0x39, 0xd5, 0xd7, 0x9e, 0x9d, - 0xea, 0x6b, 0xcf, 0x4f, 0xf5, 0xb5, 0x1f, 0x62, 0x5d, 0x3b, 0x89, 0x75, 0xed, 0x59, 0xac, 0x6b, - 0xcf, 0x63, 0x5d, 0xfb, 0x33, 0xd6, 0xb5, 0x9f, 0xfe, 0xd2, 0xd7, 0xbe, 0xae, 0x66, 0xa5, 0xfd, - 0x1b, 0x00, 0x00, 0xff, 0xff, 0xbb, 0xc0, 0x7c, 0xc4, 0x6f, 0x0a, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index a5fa0f30d..17de1a587 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -28,9 +28,160 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; +// MutatingWebhook describes an admission webhook and the resources and operations it applies to. +message MutatingWebhook { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + optional string name = 1; + + // ClientConfig defines how to communicate with the hook. + // Required + optional WebhookClientConfig clientConfig = 2; + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + repeated RuleWithOperations rules = 3; + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Ignore. + // +optional + optional string failurePolicy = 4; + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Exact" + // +optional + optional string matchPolicy = 9; + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; + + // SideEffects states whether this webhookk has side effects. + // Acceptable values are: Unknown, None, Some, NoneOnDryRun + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. Defaults to Unknown. + // +optional + optional string sideEffects = 6; + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 30 seconds. + // +optional + optional int32 timeoutSeconds = 7; + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + // Default to `['v1beta1']`. + // +optional + repeated string admissionReviewVersions = 8; + + // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: the webhook will not be called more than once in a single admission evaluation. + // + // IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation + // if the object being admitted is modified by other admission plugins after the initial webhook call. + // Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. + // Note: + // * the number of additional invocations is not guaranteed to be exactly one. + // * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. + // * webhooks that use this option may be reordered to minimize the number of additional invocations. + // * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. + // + // Defaults to "Never". + // +optional + optional string reinvocationPolicy = 10; +} + // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead. message MutatingWebhookConfiguration { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -38,13 +189,13 @@ message MutatingWebhookConfiguration { // +optional // +patchMergeKey=name // +patchStrategy=merge - repeated Webhook Webhooks = 2; + repeated MutatingWebhook Webhooks = 2; } // MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. message MutatingWebhookConfigurationList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -123,34 +274,16 @@ message ServiceReference { // this service. // +optional optional string path = 3; -} - -// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. -message ValidatingWebhookConfiguration { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Webhooks is a list of webhooks and the affected resources and operations. + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). // +optional - // +patchMergeKey=name - // +patchStrategy=merge - repeated Webhook Webhooks = 2; + optional int32 port = 4; } -// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. -message ValidatingWebhookConfigurationList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of ValidatingWebhookConfiguration. - repeated ValidatingWebhookConfiguration items = 2; -} - -// Webhook describes an admission webhook and the resources and operations it applies to. -message Webhook { +// ValidatingWebhook describes an admission webhook and the resources and operations it applies to. +message ValidatingWebhook { // The name of the admission webhook. // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where // "imagepolicy" is the name of the webhook, and kubernetes.io is the name @@ -175,6 +308,23 @@ message Webhook { // +optional optional string failurePolicy = 4; + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Exact" + // +optional + optional string matchPolicy = 9; + // NamespaceSelector decides whether to run the webhook on an object based // on whether the namespace for that object matches the selector. If the // object itself is a namespace, the matching is performed on @@ -214,13 +364,27 @@ message Webhook { // } // // See - // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels // for more examples of label selectors. // // Default to the empty LabelSelector, which matches everything. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; + // SideEffects states whether this webhookk has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun // Webhooks with side effects MUST implement a reconciliation system, since a request may be @@ -250,6 +414,31 @@ message Webhook { repeated string admissionReviewVersions = 8; } +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead. +message ValidatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated ValidatingWebhook Webhooks = 2; +} + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +message ValidatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingWebhookConfiguration. + repeated ValidatingWebhookConfiguration items = 2; +} + // WebhookClientConfig contains the information to make a TLS // connection with the webhook message WebhookClientConfig { @@ -287,8 +476,6 @@ message WebhookClientConfig { // // If the webhook is running within the cluster, then you should use `service`. // - // Port 443 will be used if it is open, otherwise it is an error. - // // +optional optional ServiceReference service = 1; diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index f7025f608..cf065a286 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -84,6 +84,16 @@ const ( Fail FailurePolicyType = "Fail" ) +// MatchPolicyType specifies the type of match policy +type MatchPolicyType string + +const ( + // Exact means requests should only be sent to the webhook if they exactly match a given rule + Exact MatchPolicyType = "Exact" + // Equivalent means requests should be sent to the webhook if they modify a resource listed in rules via another API group or version. + Equivalent MatchPolicyType = "Equivalent" +) + type SideEffectClass string const ( @@ -105,16 +115,17 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead. type ValidatingWebhookConfiguration struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Webhooks is a list of webhooks and the affected resources and operations. // +optional // +patchMergeKey=name // +patchStrategy=merge - Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` + Webhooks []ValidatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -123,7 +134,7 @@ type ValidatingWebhookConfiguration struct { type ValidatingWebhookConfigurationList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ValidatingWebhookConfiguration. @@ -135,16 +146,17 @@ type ValidatingWebhookConfigurationList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead. type MutatingWebhookConfiguration struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Webhooks is a list of webhooks and the affected resources and operations. // +optional // +patchMergeKey=name // +patchStrategy=merge - Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` + Webhooks []MutatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -153,15 +165,147 @@ type MutatingWebhookConfiguration struct { type MutatingWebhookConfigurationList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of MutatingWebhookConfiguration. Items []MutatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` } -// Webhook describes an admission webhook and the resources and operations it applies to. -type Webhook struct { +// ValidatingWebhook describes an admission webhook and the resources and operations it applies to. +type ValidatingWebhook struct { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // ClientConfig defines how to communicate with the hook. + // Required + ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Ignore. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Exact" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,9,opt,name=matchPolicy,casttype=MatchPolicyType"` + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,10,opt,name=objectSelector"` + + // SideEffects states whether this webhookk has side effects. + // Acceptable values are: Unknown, None, Some, NoneOnDryRun + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. Defaults to Unknown. + // +optional + SideEffects *SideEffectClass `json:"sideEffects,omitempty" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 30 seconds. + // +optional + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,7,opt,name=timeoutSeconds"` + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + // Default to `['v1beta1']`. + // +optional + AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"` +} + +// MutatingWebhook describes an admission webhook and the resources and operations it applies to. +type MutatingWebhook struct { // The name of the admission webhook. // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where // "imagepolicy" is the name of the webhook, and kubernetes.io is the name @@ -186,6 +330,23 @@ type Webhook struct { // +optional FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Exact" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,9,opt,name=matchPolicy,casttype=MatchPolicyType"` + // NamespaceSelector decides whether to run the webhook on an object based // on whether the namespace for that object matches the selector. If the // object itself is a namespace, the matching is performed on @@ -232,6 +393,20 @@ type Webhook struct { // +optional NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,11,opt,name=objectSelector"` + // SideEffects states whether this webhookk has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun // Webhooks with side effects MUST implement a reconciliation system, since a request may be @@ -259,8 +434,39 @@ type Webhook struct { // Default to `['v1beta1']`. // +optional AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"` + + // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: the webhook will not be called more than once in a single admission evaluation. + // + // IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation + // if the object being admitted is modified by other admission plugins after the initial webhook call. + // Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. + // Note: + // * the number of additional invocations is not guaranteed to be exactly one. + // * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. + // * webhooks that use this option may be reordered to minimize the number of additional invocations. + // * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. + // + // Defaults to "Never". + // +optional + ReinvocationPolicy *ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` } +// ReinvocationPolicyType specifies what type of policy the admission hook uses. +type ReinvocationPolicyType string + +const ( + // NeverReinvocationPolicy indicates that the webhook must not be called more than once in a + // single admission evaluation. + NeverReinvocationPolicy ReinvocationPolicyType = "Never" + // IfNeededReinvocationPolicy indicates that the webhook may be called at least one + // additional time as part of the admission evaluation if the object being admitted is + // modified by other admission plugins after the initial webhook call. + IfNeededReinvocationPolicy ReinvocationPolicyType = "IfNeeded" +) + // RuleWithOperations is a tuple of Operations and Resources. It is recommended to make // sure that all the tuple expansions are valid. type RuleWithOperations struct { @@ -322,8 +528,6 @@ type WebhookClientConfig struct { // // If the webhook is running within the cluster, then you should use `service`. // - // Port 443 will be used if it is open, otherwise it is an error. - // // +optional Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` @@ -346,4 +550,10 @@ type ServiceReference struct { // this service. // +optional Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` } diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go index f400f9f42..f40a15d50 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -27,9 +27,28 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_MutatingWebhook = map[string]string{ + "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", + "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Exact\"", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", + "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", + "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", + "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", +} + +func (MutatingWebhook) SwaggerDoc() map[string]string { + return map_MutatingWebhook +} + var map_MutatingWebhookConfiguration = map[string]string{ - "": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", } @@ -39,7 +58,7 @@ func (MutatingWebhookConfiguration) SwaggerDoc() map[string]string { var map_MutatingWebhookConfigurationList = map[string]string{ "": "MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of MutatingWebhookConfiguration.", } @@ -73,15 +92,34 @@ var map_ServiceReference = map[string]string{ "namespace": "`namespace` is the namespace of the service. Required", "name": "`name` is the name of the service. Required", "path": "`path` is an optional URL path which will be sent in any request to this service.", + "port": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", } func (ServiceReference) SwaggerDoc() map[string]string { return map_ServiceReference } +var map_ValidatingWebhook = map[string]string{ + "": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", + "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Exact\"", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", + "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", + "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", +} + +func (ValidatingWebhook) SwaggerDoc() map[string]string { + return map_ValidatingWebhook +} + var map_ValidatingWebhookConfiguration = map[string]string{ - "": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", } @@ -91,7 +129,7 @@ func (ValidatingWebhookConfiguration) SwaggerDoc() map[string]string { var map_ValidatingWebhookConfigurationList = map[string]string{ "": "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of ValidatingWebhookConfiguration.", } @@ -99,26 +137,10 @@ func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string { return map_ValidatingWebhookConfigurationList } -var map_Webhook = map[string]string{ - "": "Webhook describes an admission webhook and the resources and operations it applies to.", - "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", - "clientConfig": "ClientConfig defines how to communicate with the hook. Required", - "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", - "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", - "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", - "sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", - "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", - "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", -} - -func (Webhook) SwaggerDoc() map[string]string { - return map_Webhook -} - var map_WebhookClientConfig = map[string]string{ "": "WebhookClientConfig contains the information to make a TLS connection with the webhook", "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", - "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nPort 443 will be used if it is open, otherwise it is an error.", + "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.", "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", } diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go index ab79ee6aa..c4570d031 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -25,6 +25,70 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SideEffects != nil { + in, out := &in.SideEffects, &out.SideEffects + *out = new(SideEffectClass) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + if in.AdmissionReviewVersions != nil { + in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ReinvocationPolicy != nil { + in, out := &in.ReinvocationPolicy, &out.ReinvocationPolicy + *out = new(ReinvocationPolicyType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhook. +func (in *MutatingWebhook) DeepCopy() *MutatingWebhook { + if in == nil { + return nil + } + out := new(MutatingWebhook) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfiguration) { *out = *in @@ -32,7 +96,7 @@ func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfigu in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Webhooks != nil { in, out := &in.Webhooks, &out.Webhooks - *out = make([]Webhook, len(*in)) + *out = make([]MutatingWebhook, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -62,7 +126,7 @@ func (in *MutatingWebhookConfiguration) DeepCopyObject() runtime.Object { func (in *MutatingWebhookConfigurationList) DeepCopyInto(out *MutatingWebhookConfigurationList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]MutatingWebhookConfiguration, len(*in)) @@ -157,6 +221,11 @@ func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { *out = new(string) **out = **in } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } return } @@ -170,6 +239,65 @@ func (in *ServiceReference) DeepCopy() *ServiceReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SideEffects != nil { + in, out := &in.SideEffects, &out.SideEffects + *out = new(SideEffectClass) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + if in.AdmissionReviewVersions != nil { + in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhook. +func (in *ValidatingWebhook) DeepCopy() *ValidatingWebhook { + if in == nil { + return nil + } + out := new(ValidatingWebhook) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookConfiguration) { *out = *in @@ -177,7 +305,7 @@ func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookCon in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Webhooks != nil { in, out := &in.Webhooks, &out.Webhooks - *out = make([]Webhook, len(*in)) + *out = make([]ValidatingWebhook, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -207,7 +335,7 @@ func (in *ValidatingWebhookConfiguration) DeepCopyObject() runtime.Object { func (in *ValidatingWebhookConfigurationList) DeepCopyInto(out *ValidatingWebhookConfigurationList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ValidatingWebhookConfiguration, len(*in)) @@ -236,55 +364,6 @@ func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Webhook) DeepCopyInto(out *Webhook) { - *out = *in - in.ClientConfig.DeepCopyInto(&out.ClientConfig) - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]RuleWithOperations, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.FailurePolicy != nil { - in, out := &in.FailurePolicy, &out.FailurePolicy - *out = new(FailurePolicyType) - **out = **in - } - if in.NamespaceSelector != nil { - in, out := &in.NamespaceSelector, &out.NamespaceSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.SideEffects != nil { - in, out := &in.SideEffects, &out.SideEffects - *out = new(SideEffectClass) - **out = **in - } - if in.TimeoutSeconds != nil { - in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - *out = new(int32) - **out = **in - } - if in.AdmissionReviewVersions != nil { - in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook. -func (in *Webhook) DeepCopy() *Webhook { - if in == nil { - return nil - } - out := new(Webhook) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { *out = *in diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go index 5b29f4320..425144d85 100644 --- a/vendor/k8s.io/api/apps/v1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -17,57 +17,25 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto - - It has these top-level messages: - ControllerRevision - ControllerRevisionList - DaemonSet - DaemonSetCondition - DaemonSetList - DaemonSetSpec - DaemonSetStatus - DaemonSetUpdateStrategy - Deployment - DeploymentCondition - DeploymentList - DeploymentSpec - DeploymentStatus - DeploymentStrategy - ReplicaSet - ReplicaSetCondition - ReplicaSetList - ReplicaSetSpec - ReplicaSetStatus - RollingUpdateDaemonSet - RollingUpdateDeployment - RollingUpdateStatefulSetStrategy - StatefulSet - StatefulSetCondition - StatefulSetList - StatefulSetSpec - StatefulSetStatus - StatefulSetUpdateStrategy -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + proto "github.com/gogo/protobuf/proto" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import strings "strings" -import reflect "reflect" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -80,124 +48,790 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } -func (*ControllerRevision) ProtoMessage() {} -func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{0} +} +func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevision.Merge(m, src) +} +func (m *ControllerRevision) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevision) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevision.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo + +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{1} +} +func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevisionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevisionList.Merge(m, src) +} +func (m *ControllerRevisionList) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevisionList) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo + +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{2} +} +func (m *DaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSet.Merge(m, src) +} +func (m *DaemonSet) XXX_Size() int { + return m.Size() +} +func (m *DaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSet.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSet proto.InternalMessageInfo + +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{3} +} +func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetCondition.Merge(m, src) +} +func (m *DaemonSetCondition) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m) +} -func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } -func (*ControllerRevisionList) ProtoMessage() {} -func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo -func (m *DaemonSet) Reset() { *m = DaemonSet{} } -func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{4} +} +func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetList.Merge(m, src) +} +func (m *DaemonSetList) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetList) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetList.DiscardUnknown(m) +} -func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } -func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo -func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } -func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{5} +} +func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetSpec.Merge(m, src) +} +func (m *DaemonSetSpec) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m) +} -func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } -func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo -func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } -func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{6} +} +func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetStatus.Merge(m, src) +} +func (m *DaemonSetStatus) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m) +} -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{7} +} +func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src) +} +func (m *DaemonSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m) +} -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{8} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_Deployment proto.InternalMessageInfo -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{9} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo -func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } -func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{10} +} +func (m *DeploymentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentList.Merge(m, src) +} +func (m *DeploymentList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentList.DiscardUnknown(m) +} -func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } -func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_DeploymentList proto.InternalMessageInfo -func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } -func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{11} +} +func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentSpec.Merge(m, src) +} +func (m *DeploymentSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) +} -func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } -func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo -func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } -func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{12} +} +func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStatus.Merge(m, src) +} +func (m *DeploymentStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) +} -func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } -func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{13} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo + +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{14} +} +func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSet.Merge(m, src) +} +func (m *ReplicaSet) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSet) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSet.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo + +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{15} +} +func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetCondition.Merge(m, src) +} +func (m *ReplicaSetCondition) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo + +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{16} +} +func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetList.Merge(m, src) +} +func (m *ReplicaSetList) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetList) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo + +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{17} +} +func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetSpec.Merge(m, src) +} +func (m *ReplicaSetSpec) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo + +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{18} +} +func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetStatus.Merge(m, src) +} +func (m *ReplicaSetStatus) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo + +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{19} +} +func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src) +} +func (m *RollingUpdateDaemonSet) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} + return fileDescriptor_e1014cab6f31e43b, []int{20} +} +func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } +func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) +} +func (m *RollingUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{21} + return fileDescriptor_e1014cab6f31e43b, []int{21} +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m) } -func (m *StatefulSet) Reset() { *m = StatefulSet{} } -func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo -func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } -func (*StatefulSetCondition) ProtoMessage() {} -func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{22} +} +func (m *StatefulSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSet.Merge(m, src) +} +func (m *StatefulSet) XXX_Size() int { + return m.Size() +} +func (m *StatefulSet) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSet.DiscardUnknown(m) +} -func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } -func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +var xxx_messageInfo_StatefulSet proto.InternalMessageInfo -func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } -func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{23} +} +func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetCondition.Merge(m, src) +} +func (m *StatefulSetCondition) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m) +} -func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } -func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo + +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{24} +} +func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetList.Merge(m, src) +} +func (m *StatefulSetList) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetList) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo + +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{25} +} +func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetSpec.Merge(m, src) +} +func (m *StatefulSetSpec) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo + +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{26} +} +func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetStatus.Merge(m, src) +} +func (m *StatefulSetStatus) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{27} + return fileDescriptor_e1014cab6f31e43b, []int{27} +} +func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src) +} +func (m *StatefulSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m) } +var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo + func init() { proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1.ControllerRevision") proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1.ControllerRevisionList") @@ -228,10 +862,146 @@ func init() { proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1.StatefulSetStatus") proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.StatefulSetUpdateStrategy") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto", fileDescriptor_e1014cab6f31e43b) +} + +var fileDescriptor_e1014cab6f31e43b = []byte{ + // 2031 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x24, 0x47, + 0x1d, 0x75, 0xcf, 0x87, 0x3d, 0x2e, 0xaf, 0xed, 0xdd, 0xb2, 0xb1, 0x27, 0xbb, 0x64, 0x66, 0x19, + 0x60, 0xe3, 0x64, 0xb3, 0x3d, 0xec, 0x66, 0x13, 0xa1, 0x2c, 0x02, 0x79, 0xc6, 0x21, 0x84, 0x78, + 0x6c, 0x53, 0x5e, 0xef, 0x61, 0x09, 0x12, 0xe5, 0xe9, 0xda, 0x71, 0xc7, 0xfd, 0xa5, 0xee, 0xea, + 0x61, 0x47, 0x5c, 0x10, 0x12, 0x9c, 0x38, 0xf0, 0x9f, 0x20, 0x84, 0xe0, 0x86, 0x22, 0xc4, 0x65, + 0x2f, 0x48, 0x11, 0x17, 0x72, 0xb2, 0xd8, 0xc9, 0x09, 0xa1, 0x1c, 0xb9, 0xe4, 0x02, 0xaa, 0xea, + 0xea, 0xef, 0x6a, 0xcf, 0xd8, 0x9b, 0x38, 0x24, 0xca, 0xcd, 0x53, 0xf5, 0x7e, 0xaf, 0x7f, 0x55, + 0xf5, 0xab, 0x7a, 0xaf, 0xab, 0x0d, 0xee, 0x1d, 0x7f, 0xdb, 0x53, 0x75, 0xbb, 0x7d, 0xec, 0x1f, + 0x12, 0xd7, 0x22, 0x94, 0x78, 0xed, 0x21, 0xb1, 0x34, 0xdb, 0x6d, 0x8b, 0x0e, 0xec, 0xe8, 0x6d, + 0xec, 0x38, 0x5e, 0x7b, 0x78, 0xbb, 0x3d, 0x20, 0x16, 0x71, 0x31, 0x25, 0x9a, 0xea, 0xb8, 0x36, + 0xb5, 0x21, 0x0c, 0x30, 0x2a, 0x76, 0x74, 0x95, 0x61, 0xd4, 0xe1, 0xed, 0xab, 0xb7, 0x06, 0x3a, + 0x3d, 0xf2, 0x0f, 0xd5, 0xbe, 0x6d, 0xb6, 0x07, 0xf6, 0xc0, 0x6e, 0x73, 0xe8, 0xa1, 0xff, 0x88, + 0xff, 0xe2, 0x3f, 0xf8, 0x5f, 0x01, 0xc5, 0xd5, 0x56, 0xe2, 0x31, 0x7d, 0xdb, 0x25, 0x92, 0xc7, + 0x5c, 0xbd, 0x1b, 0x63, 0x4c, 0xdc, 0x3f, 0xd2, 0x2d, 0xe2, 0x8e, 0xda, 0xce, 0xf1, 0x80, 0x35, + 0x78, 0x6d, 0x93, 0x50, 0x2c, 0x8b, 0x6a, 0x17, 0x45, 0xb9, 0xbe, 0x45, 0x75, 0x93, 0xe4, 0x02, + 0x5e, 0x9b, 0x14, 0xe0, 0xf5, 0x8f, 0x88, 0x89, 0x73, 0x71, 0xaf, 0x14, 0xc5, 0xf9, 0x54, 0x37, + 0xda, 0xba, 0x45, 0x3d, 0xea, 0x66, 0x83, 0x5a, 0xff, 0x51, 0x00, 0xec, 0xda, 0x16, 0x75, 0x6d, + 0xc3, 0x20, 0x2e, 0x22, 0x43, 0xdd, 0xd3, 0x6d, 0x0b, 0xfe, 0x14, 0xd4, 0xd8, 0x78, 0x34, 0x4c, + 0x71, 0x5d, 0xb9, 0xae, 0x6c, 0x2c, 0xdc, 0xf9, 0x96, 0x1a, 0x4f, 0x72, 0x44, 0xaf, 0x3a, 0xc7, + 0x03, 0xd6, 0xe0, 0xa9, 0x0c, 0xad, 0x0e, 0x6f, 0xab, 0xbb, 0x87, 0xef, 0x92, 0x3e, 0xed, 0x11, + 0x8a, 0x3b, 0xf0, 0xc9, 0x49, 0x73, 0x66, 0x7c, 0xd2, 0x04, 0x71, 0x1b, 0x8a, 0x58, 0xe1, 0x2e, + 0xa8, 0x70, 0xf6, 0x12, 0x67, 0xbf, 0x55, 0xc8, 0x2e, 0x06, 0xad, 0x22, 0xfc, 0xb3, 0x37, 0x1e, + 0x53, 0x62, 0xb1, 0xf4, 0x3a, 0x97, 0x04, 0x75, 0x65, 0x0b, 0x53, 0x8c, 0x38, 0x11, 0x7c, 0x19, + 0xd4, 0x5c, 0x91, 0x7e, 0xbd, 0x7c, 0x5d, 0xd9, 0x28, 0x77, 0x2e, 0x0b, 0x54, 0x2d, 0x1c, 0x16, + 0x8a, 0x10, 0xad, 0xbf, 0x2a, 0x60, 0x2d, 0x3f, 0xee, 0x6d, 0xdd, 0xa3, 0xf0, 0x9d, 0xdc, 0xd8, + 0xd5, 0xe9, 0xc6, 0xce, 0xa2, 0xf9, 0xc8, 0xa3, 0x07, 0x87, 0x2d, 0x89, 0x71, 0xbf, 0x0d, 0xaa, + 0x3a, 0x25, 0xa6, 0x57, 0x2f, 0x5d, 0x2f, 0x6f, 0x2c, 0xdc, 0xb9, 0xa1, 0xe6, 0x6b, 0x57, 0xcd, + 0x27, 0xd6, 0x59, 0x14, 0x94, 0xd5, 0xb7, 0x58, 0x30, 0x0a, 0x38, 0x5a, 0xff, 0x55, 0xc0, 0xfc, + 0x16, 0x26, 0xa6, 0x6d, 0xed, 0x13, 0x7a, 0x01, 0x8b, 0xd6, 0x05, 0x15, 0xcf, 0x21, 0x7d, 0xb1, + 0x68, 0x5f, 0x93, 0xe5, 0x1e, 0xa5, 0xb3, 0xef, 0x90, 0x7e, 0xbc, 0x50, 0xec, 0x17, 0xe2, 0xc1, + 0xf0, 0x6d, 0x30, 0xeb, 0x51, 0x4c, 0x7d, 0x8f, 0x2f, 0xd3, 0xc2, 0x9d, 0xaf, 0x9f, 0x4e, 0xc3, + 0xa1, 0x9d, 0x25, 0x41, 0x34, 0x1b, 0xfc, 0x46, 0x82, 0xa2, 0xf5, 0xaf, 0x12, 0x80, 0x11, 0xb6, + 0x6b, 0x5b, 0x9a, 0x4e, 0x59, 0xfd, 0xbe, 0x0e, 0x2a, 0x74, 0xe4, 0x10, 0x3e, 0x0d, 0xf3, 0x9d, + 0x1b, 0x61, 0x16, 0xf7, 0x47, 0x0e, 0xf9, 0xf8, 0xa4, 0xb9, 0x96, 0x8f, 0x60, 0x3d, 0x88, 0xc7, + 0xc0, 0xed, 0x28, 0xbf, 0x12, 0x8f, 0xbe, 0x9b, 0x7e, 0xf4, 0xc7, 0x27, 0x4d, 0xc9, 0x61, 0xa1, + 0x46, 0x4c, 0xe9, 0x04, 0xe1, 0x10, 0x40, 0x03, 0x7b, 0xf4, 0xbe, 0x8b, 0x2d, 0x2f, 0x78, 0x92, + 0x6e, 0x12, 0x31, 0xf2, 0x97, 0xa6, 0x5b, 0x1e, 0x16, 0xd1, 0xb9, 0x2a, 0xb2, 0x80, 0xdb, 0x39, + 0x36, 0x24, 0x79, 0x02, 0xbc, 0x01, 0x66, 0x5d, 0x82, 0x3d, 0xdb, 0xaa, 0x57, 0xf8, 0x28, 0xa2, + 0x09, 0x44, 0xbc, 0x15, 0x89, 0x5e, 0xf8, 0x22, 0x98, 0x33, 0x89, 0xe7, 0xe1, 0x01, 0xa9, 0x57, + 0x39, 0x70, 0x59, 0x00, 0xe7, 0x7a, 0x41, 0x33, 0x0a, 0xfb, 0x5b, 0xbf, 0x57, 0xc0, 0x62, 0x34, + 0x73, 0x17, 0xb0, 0x55, 0x3a, 0xe9, 0xad, 0xf2, 0xfc, 0xa9, 0x75, 0x52, 0xb0, 0x43, 0xde, 0x2b, + 0x27, 0x72, 0x66, 0x45, 0x08, 0x7f, 0x02, 0x6a, 0x1e, 0x31, 0x48, 0x9f, 0xda, 0xae, 0xc8, 0xf9, + 0x95, 0x29, 0x73, 0xc6, 0x87, 0xc4, 0xd8, 0x17, 0xa1, 0x9d, 0x4b, 0x2c, 0xe9, 0xf0, 0x17, 0x8a, + 0x28, 0xe1, 0x8f, 0x40, 0x8d, 0x12, 0xd3, 0x31, 0x30, 0x25, 0x62, 0x9b, 0xa4, 0xea, 0x9b, 0x95, + 0x0b, 0x23, 0xdb, 0xb3, 0xb5, 0xfb, 0x02, 0xc6, 0x37, 0x4a, 0x34, 0x0f, 0x61, 0x2b, 0x8a, 0x68, + 0xe0, 0x31, 0x58, 0xf2, 0x1d, 0x8d, 0x21, 0x29, 0x3b, 0xba, 0x07, 0x23, 0x51, 0x3e, 0x37, 0x4f, + 0x9d, 0x90, 0x83, 0x54, 0x48, 0x67, 0x4d, 0x3c, 0x60, 0x29, 0xdd, 0x8e, 0x32, 0xd4, 0x70, 0x13, + 0x2c, 0x9b, 0xba, 0x85, 0x08, 0xd6, 0x46, 0xfb, 0xa4, 0x6f, 0x5b, 0x9a, 0xc7, 0x0b, 0xa8, 0xda, + 0x59, 0x17, 0x04, 0xcb, 0xbd, 0x74, 0x37, 0xca, 0xe2, 0xe1, 0x36, 0x58, 0x0d, 0xcf, 0xd9, 0x1f, + 0xe8, 0x1e, 0xb5, 0xdd, 0xd1, 0xb6, 0x6e, 0xea, 0xb4, 0x3e, 0xcb, 0x79, 0xea, 0xe3, 0x93, 0xe6, + 0x2a, 0x92, 0xf4, 0x23, 0x69, 0x54, 0xeb, 0x37, 0xb3, 0x60, 0x39, 0x73, 0x1a, 0xc0, 0x07, 0x60, + 0xad, 0xef, 0xbb, 0x2e, 0xb1, 0xe8, 0x8e, 0x6f, 0x1e, 0x12, 0x77, 0xbf, 0x7f, 0x44, 0x34, 0xdf, + 0x20, 0x1a, 0x5f, 0xd1, 0x6a, 0xa7, 0x21, 0x72, 0x5d, 0xeb, 0x4a, 0x51, 0xa8, 0x20, 0x1a, 0xfe, + 0x10, 0x40, 0x8b, 0x37, 0xf5, 0x74, 0xcf, 0x8b, 0x38, 0x4b, 0x9c, 0x33, 0xda, 0x80, 0x3b, 0x39, + 0x04, 0x92, 0x44, 0xb1, 0x1c, 0x35, 0xe2, 0xe9, 0x2e, 0xd1, 0xb2, 0x39, 0x96, 0xd3, 0x39, 0x6e, + 0x49, 0x51, 0xa8, 0x20, 0x1a, 0xbe, 0x0a, 0x16, 0x82, 0xa7, 0xf1, 0x39, 0x17, 0x8b, 0xb3, 0x22, + 0xc8, 0x16, 0x76, 0xe2, 0x2e, 0x94, 0xc4, 0xb1, 0xa1, 0xd9, 0x87, 0x1e, 0x71, 0x87, 0x44, 0x7b, + 0x33, 0xf0, 0x00, 0x4c, 0x28, 0xab, 0x5c, 0x28, 0xa3, 0xa1, 0xed, 0xe6, 0x10, 0x48, 0x12, 0xc5, + 0x86, 0x16, 0x54, 0x4d, 0x6e, 0x68, 0xb3, 0xe9, 0xa1, 0x1d, 0x48, 0x51, 0xa8, 0x20, 0x9a, 0xd5, + 0x5e, 0x90, 0xf2, 0xe6, 0x10, 0xeb, 0x06, 0x3e, 0x34, 0x48, 0x7d, 0x2e, 0x5d, 0x7b, 0x3b, 0xe9, + 0x6e, 0x94, 0xc5, 0xc3, 0x37, 0xc1, 0x95, 0xa0, 0xe9, 0xc0, 0xc2, 0x11, 0x49, 0x8d, 0x93, 0x3c, + 0x27, 0x48, 0xae, 0xec, 0x64, 0x01, 0x28, 0x1f, 0x03, 0x5f, 0x07, 0x4b, 0x7d, 0xdb, 0x30, 0x78, + 0x3d, 0x76, 0x6d, 0xdf, 0xa2, 0xf5, 0x79, 0xce, 0x02, 0xd9, 0x1e, 0xea, 0xa6, 0x7a, 0x50, 0x06, + 0x09, 0x1f, 0x02, 0xd0, 0x0f, 0xe5, 0xc0, 0xab, 0x83, 0x62, 0xa1, 0xcf, 0xeb, 0x50, 0x2c, 0xc0, + 0x51, 0x93, 0x87, 0x12, 0x6c, 0xad, 0xf7, 0x14, 0xb0, 0x5e, 0xb0, 0xc7, 0xe1, 0xf7, 0x52, 0xaa, + 0x77, 0x33, 0xa3, 0x7a, 0xd7, 0x0a, 0xc2, 0x12, 0xd2, 0xd7, 0x07, 0x8b, 0xcc, 0x77, 0xe8, 0xd6, + 0x20, 0x80, 0x88, 0x13, 0xec, 0x25, 0x59, 0xee, 0x28, 0x09, 0x8c, 0x8f, 0xe1, 0x2b, 0xe3, 0x93, + 0xe6, 0x62, 0xaa, 0x0f, 0xa5, 0x39, 0x5b, 0xbf, 0x2c, 0x01, 0xb0, 0x45, 0x1c, 0xc3, 0x1e, 0x99, + 0xc4, 0xba, 0x08, 0xd7, 0xb2, 0x95, 0x72, 0x2d, 0x2d, 0xe9, 0x42, 0x44, 0xf9, 0x14, 0xda, 0x96, + 0xed, 0x8c, 0x6d, 0xf9, 0xc6, 0x04, 0x9e, 0xd3, 0x7d, 0xcb, 0x3f, 0xca, 0x60, 0x25, 0x06, 0xc7, + 0xc6, 0xe5, 0x5e, 0x6a, 0x09, 0x5f, 0xc8, 0x2c, 0xe1, 0xba, 0x24, 0xe4, 0x53, 0x73, 0x2e, 0xef, + 0x82, 0x25, 0xe6, 0x2b, 0x82, 0x55, 0xe3, 0xae, 0x65, 0xf6, 0xcc, 0xae, 0x25, 0x52, 0x9d, 0xed, + 0x14, 0x13, 0xca, 0x30, 0x17, 0xb8, 0xa4, 0xb9, 0xcf, 0xa3, 0x4b, 0xfa, 0x83, 0x02, 0x96, 0xe2, + 0x65, 0xba, 0x00, 0x9b, 0xd4, 0x4d, 0xdb, 0xa4, 0xc6, 0xe9, 0x75, 0x59, 0xe0, 0x93, 0xfe, 0x5e, + 0x49, 0x66, 0xcd, 0x8d, 0xd2, 0x06, 0x7b, 0xa1, 0x72, 0x0c, 0xbd, 0x8f, 0x3d, 0x21, 0xab, 0x97, + 0x82, 0x97, 0xa9, 0xa0, 0x0d, 0x45, 0xbd, 0x29, 0x4b, 0x55, 0xfa, 0x74, 0x2d, 0x55, 0xf9, 0x93, + 0xb1, 0x54, 0xf7, 0x41, 0xcd, 0x0b, 0xcd, 0x54, 0x85, 0x53, 0xde, 0x98, 0xb4, 0x9d, 0x85, 0x8f, + 0x8a, 0x58, 0x23, 0x07, 0x15, 0x31, 0xc9, 0xbc, 0x53, 0xf5, 0xb3, 0xf4, 0x4e, 0xac, 0xbc, 0x1d, + 0xec, 0x7b, 0x44, 0xe3, 0x5b, 0xa9, 0x16, 0x97, 0xf7, 0x1e, 0x6f, 0x45, 0xa2, 0x17, 0x1e, 0x80, + 0x75, 0xc7, 0xb5, 0x07, 0x2e, 0xf1, 0xbc, 0x2d, 0x82, 0x35, 0x43, 0xb7, 0x48, 0x38, 0x80, 0x40, + 0xf5, 0xae, 0x8d, 0x4f, 0x9a, 0xeb, 0x7b, 0x72, 0x08, 0x2a, 0x8a, 0x6d, 0xfd, 0xb9, 0x02, 0x2e, + 0x67, 0x4f, 0xc4, 0x02, 0x23, 0xa2, 0x9c, 0xcb, 0x88, 0xbc, 0x9c, 0x28, 0xd1, 0xc0, 0xa5, 0x25, + 0xde, 0xf9, 0x73, 0x65, 0xba, 0x09, 0x96, 0x85, 0xf1, 0x08, 0x3b, 0x85, 0x15, 0x8b, 0x96, 0xe7, + 0x20, 0xdd, 0x8d, 0xb2, 0x78, 0x78, 0x0f, 0x2c, 0xba, 0xdc, 0x5b, 0x85, 0x04, 0x81, 0x3f, 0xf9, + 0x8a, 0x20, 0x58, 0x44, 0xc9, 0x4e, 0x94, 0xc6, 0x32, 0x6f, 0x12, 0x5b, 0x8e, 0x90, 0xa0, 0x92, + 0xf6, 0x26, 0x9b, 0x59, 0x00, 0xca, 0xc7, 0xc0, 0x1e, 0x58, 0xf1, 0xad, 0x3c, 0x55, 0x50, 0x6b, + 0xd7, 0x04, 0xd5, 0xca, 0x41, 0x1e, 0x82, 0x64, 0x71, 0xf0, 0xc7, 0x29, 0xbb, 0x32, 0xcb, 0x4f, + 0x91, 0x17, 0x4e, 0xdf, 0x0e, 0x53, 0xfb, 0x15, 0x89, 0x8f, 0xaa, 0x4d, 0xeb, 0xa3, 0x5a, 0x7f, + 0x52, 0x00, 0xcc, 0x6f, 0xc1, 0x89, 0x2f, 0xf7, 0xb9, 0x88, 0x84, 0x44, 0x6a, 0x72, 0x87, 0x73, + 0x73, 0xb2, 0xc3, 0x89, 0x4f, 0xd0, 0xe9, 0x2c, 0x8e, 0x98, 0xde, 0x8b, 0xb9, 0x98, 0x99, 0xc2, + 0xe2, 0xc4, 0xf9, 0x3c, 0x9b, 0xc5, 0x49, 0xf0, 0x9c, 0x6e, 0x71, 0xfe, 0x5d, 0x02, 0x2b, 0x31, + 0x78, 0x6a, 0x8b, 0x23, 0x09, 0xf9, 0xf2, 0x72, 0x66, 0x3a, 0xdb, 0x11, 0x4f, 0xdd, 0xff, 0x89, + 0xed, 0x88, 0x13, 0x2a, 0xb0, 0x1d, 0xbf, 0x2b, 0x25, 0xb3, 0x3e, 0xa3, 0xed, 0xf8, 0x04, 0xae, + 0x2a, 0x3e, 0x77, 0xce, 0xa5, 0xf5, 0x97, 0x32, 0xb8, 0x9c, 0xdd, 0x82, 0x29, 0x1d, 0x54, 0x26, + 0xea, 0xe0, 0x1e, 0x58, 0x7d, 0xe4, 0x1b, 0xc6, 0x88, 0x8f, 0x21, 0x21, 0x86, 0x81, 0x82, 0x7e, + 0x55, 0x44, 0xae, 0x7e, 0x5f, 0x82, 0x41, 0xd2, 0xc8, 0xbc, 0x2c, 0x56, 0x9e, 0x55, 0x16, 0xab, + 0xe7, 0x90, 0x45, 0xb9, 0xb3, 0x28, 0x9f, 0xcb, 0x59, 0x4c, 0xad, 0x89, 0x92, 0xe3, 0x6a, 0xe2, + 0x3b, 0xfc, 0xaf, 0x15, 0xb0, 0x26, 0x7f, 0x7d, 0x86, 0x06, 0x58, 0x32, 0xf1, 0xe3, 0xe4, 0xe5, + 0xc5, 0x24, 0xc1, 0xf0, 0xa9, 0x6e, 0xa8, 0xc1, 0xd7, 0x1d, 0xf5, 0x2d, 0x8b, 0xee, 0xba, 0xfb, + 0xd4, 0xd5, 0xad, 0x41, 0x20, 0xb0, 0xbd, 0x14, 0x17, 0xca, 0x70, 0xb7, 0x3e, 0x54, 0xc0, 0x7a, + 0x81, 0xca, 0x5d, 0x6c, 0x26, 0xf0, 0x21, 0xa8, 0x99, 0xf8, 0xf1, 0xbe, 0xef, 0x0e, 0x42, 0x49, + 0x3e, 0xfb, 0x73, 0xf8, 0x2e, 0xec, 0x09, 0x16, 0x14, 0xf1, 0xb5, 0x76, 0xc1, 0xf5, 0xd4, 0x20, + 0xd9, 0xa6, 0x21, 0x8f, 0x7c, 0x83, 0xef, 0x1f, 0xe1, 0x29, 0x6e, 0x82, 0x79, 0x07, 0xbb, 0x54, + 0x8f, 0xcc, 0x68, 0xb5, 0xb3, 0x38, 0x3e, 0x69, 0xce, 0xef, 0x85, 0x8d, 0x28, 0xee, 0x6f, 0xfd, + 0xaa, 0x04, 0x16, 0x12, 0x24, 0x17, 0xa0, 0xef, 0x6f, 0xa4, 0xf4, 0x5d, 0xfa, 0xc5, 0x24, 0x39, + 0xaa, 0x22, 0x81, 0xef, 0x65, 0x04, 0xfe, 0x9b, 0x93, 0x88, 0x4e, 0x57, 0xf8, 0x8f, 0x4a, 0x60, + 0x35, 0x81, 0x8e, 0x25, 0xfe, 0x3b, 0x29, 0x89, 0xdf, 0xc8, 0x48, 0x7c, 0x5d, 0x16, 0xf3, 0xa5, + 0xc6, 0x4f, 0xd6, 0xf8, 0x3f, 0x2a, 0x60, 0x39, 0x31, 0x77, 0x17, 0x20, 0xf2, 0x5b, 0x69, 0x91, + 0x6f, 0x4e, 0xa8, 0x97, 0x02, 0x95, 0x7f, 0x52, 0x4d, 0xe5, 0xfd, 0x85, 0xbf, 0x5d, 0xf8, 0x39, + 0x58, 0x1d, 0xda, 0x86, 0x6f, 0x92, 0xae, 0x81, 0x75, 0x33, 0x04, 0x30, 0x55, 0x64, 0x93, 0xf8, + 0xa2, 0x94, 0x9e, 0xb8, 0x9e, 0xee, 0x51, 0x62, 0xd1, 0x07, 0x71, 0x64, 0xac, 0xc5, 0x0f, 0x24, + 0x74, 0x48, 0xfa, 0x10, 0xf8, 0x2a, 0x58, 0x60, 0x6a, 0xa6, 0xf7, 0xc9, 0x0e, 0x36, 0xc3, 0x9a, + 0x8a, 0xbe, 0x0f, 0xec, 0xc7, 0x5d, 0x28, 0x89, 0x83, 0x47, 0x60, 0xc5, 0xb1, 0xb5, 0x1e, 0xb6, + 0xf0, 0x80, 0xb0, 0xf3, 0x7f, 0xcf, 0x36, 0xf4, 0xfe, 0x88, 0xdf, 0x3b, 0xcc, 0x77, 0x5e, 0x0b, + 0xdf, 0x29, 0xf7, 0xf2, 0x10, 0xe6, 0xd9, 0x25, 0xcd, 0x7c, 0x3f, 0xcb, 0x28, 0xa1, 0x99, 0xfb, + 0x9c, 0x35, 0x97, 0xfb, 0x1f, 0x00, 0x59, 0x71, 0x9d, 0xf3, 0x83, 0x56, 0xd1, 0x8d, 0x4a, 0xed, + 0x5c, 0x5f, 0xa3, 0x3e, 0xaa, 0x80, 0x2b, 0xb9, 0x03, 0xf2, 0x33, 0xbc, 0xd3, 0xc8, 0x39, 0xaf, + 0xf2, 0x19, 0x9c, 0xd7, 0x26, 0x58, 0x16, 0x1f, 0xc2, 0x32, 0xc6, 0x2d, 0x32, 0xd0, 0xdd, 0x74, + 0x37, 0xca, 0xe2, 0x65, 0x77, 0x2a, 0xd5, 0x33, 0xde, 0xa9, 0x24, 0xb3, 0x10, 0xff, 0xbf, 0x11, + 0x54, 0x5d, 0x3e, 0x0b, 0xf1, 0x6f, 0x1c, 0x59, 0x3c, 0xfc, 0x6e, 0x58, 0x52, 0x11, 0xc3, 0x1c, + 0x67, 0xc8, 0xd4, 0x48, 0x44, 0x90, 0x41, 0x3f, 0xd3, 0xc7, 0x9e, 0x77, 0x24, 0x1f, 0x7b, 0x36, + 0x26, 0x94, 0xf2, 0xf4, 0x56, 0xf1, 0x6f, 0x0a, 0x78, 0xae, 0x70, 0x0f, 0xc0, 0xcd, 0x94, 0xce, + 0xde, 0xca, 0xe8, 0xec, 0xf3, 0x85, 0x81, 0x09, 0xb1, 0x35, 0xe5, 0x17, 0x22, 0x77, 0x27, 0x5e, + 0x88, 0x48, 0x5c, 0xd4, 0xe4, 0x9b, 0x91, 0xce, 0xc6, 0x93, 0xa7, 0x8d, 0x99, 0xf7, 0x9f, 0x36, + 0x66, 0x3e, 0x78, 0xda, 0x98, 0xf9, 0xc5, 0xb8, 0xa1, 0x3c, 0x19, 0x37, 0x94, 0xf7, 0xc7, 0x0d, + 0xe5, 0x83, 0x71, 0x43, 0xf9, 0xe7, 0xb8, 0xa1, 0xfc, 0xf6, 0xc3, 0xc6, 0xcc, 0xc3, 0xd2, 0xf0, + 0xf6, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x59, 0xb3, 0x11, 0xc0, 0x12, 0x26, 0x00, 0x00, +} + func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -239,36 +1009,45 @@ func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x18 + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) - n2, err := m.Data.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -276,37 +1055,46 @@ func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevisionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -314,41 +1102,52 @@ func (m *DaemonSet) Marshal() (dAtA []byte, err error) { } func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -356,41 +1155,52 @@ func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n7, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -398,37 +1208,46 @@ func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n8, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -436,51 +1255,62 @@ func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Selector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n9, err := m.Selector.MarshalTo(dAtA[i:]) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n9 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n10, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n11, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x12 + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -488,58 +1318,65 @@ func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + i-- + dAtA[i] = 0x40 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + i-- + dAtA[i] = 0x38 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -547,31 +1384,39 @@ func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n12, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n12 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Deployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -579,41 +1424,52 @@ func (m *Deployment) Marshal() (dAtA []byte, err error) { } func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n14, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n15, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -621,49 +1477,62 @@ func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n16, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n17, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 - return i, nil + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -671,37 +1540,46 @@ func (m *DeploymentList) Marshal() (dAtA []byte, err error) { } func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n18, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -709,69 +1587,80 @@ func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.ProgressDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x48 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n19, err := m.Selector.MarshalTo(dAtA[i:]) + i-- + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x28 + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n19 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n20, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n20 + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n21, err := m.Strategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x38 - i++ - if m.Paused { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i++ - if m.ProgressDeadlineSeconds != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -779,52 +1668,59 @@ func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x40 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x38 if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -832,31 +1728,39 @@ func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n22 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -864,41 +1768,52 @@ func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n24, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n24 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n25, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n25 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -906,41 +1821,52 @@ func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n26, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -948,37 +1874,46 @@ func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n27, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -986,43 +1921,52 @@ func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n28, err := m.Selector.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n28 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n29, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n29 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - return i, nil + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1030,44 +1974,51 @@ func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + _ = l if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1075,27 +2026,34 @@ func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n30 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1103,37 +2061,46 @@ func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n31, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - } if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n32, err := m.MaxSurge.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n32 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1141,22 +2108,27 @@ func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.Partition != nil { - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *StatefulSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1164,41 +2136,52 @@ func (m *StatefulSet) Marshal() (dAtA []byte, err error) { } func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n34, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n34 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n35, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n35 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1206,41 +2189,52 @@ func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n36, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1248,37 +2242,46 @@ func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n37, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n37 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1286,73 +2289,88 @@ func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x40 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n38, err := m.Selector.MarshalTo(dAtA[i:]) + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n38 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n39, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n39 + i-- + dAtA[i] = 0x3a + i -= len(m.PodManagementPolicy) + copy(dAtA[i:], m.PodManagementPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i-- + dAtA[i] = 0x32 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x2a if len(m.VolumeClaimTemplates) > 0 { - for _, msg := range m.VolumeClaimTemplates { + for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + } + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) - i += copy(dAtA[i:], m.PodManagementPolicy) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n40, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n40 - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1360,57 +2378,66 @@ func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) - i += copy(dAtA[i:], m.CurrentRevision) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) - i += copy(dAtA[i:], m.UpdateRevision) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i -= len(m.UpdateRevision) + copy(dAtA[i:], m.UpdateRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i-- + dAtA[i] = 0x3a + i -= len(m.CurrentRevision) + copy(dAtA[i:], m.CurrentRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1418,37 +2445,50 @@ func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n41, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n41 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *ControllerRevision) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1460,6 +2500,9 @@ func (m *ControllerRevision) Size() (n int) { } func (m *ControllerRevisionList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1474,6 +2517,9 @@ func (m *ControllerRevisionList) Size() (n int) { } func (m *DaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1486,6 +2532,9 @@ func (m *DaemonSet) Size() (n int) { } func (m *DaemonSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1502,6 +2551,9 @@ func (m *DaemonSetCondition) Size() (n int) { } func (m *DaemonSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1516,6 +2568,9 @@ func (m *DaemonSetList) Size() (n int) { } func (m *DaemonSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Selector != nil { @@ -1534,6 +2589,9 @@ func (m *DaemonSetSpec) Size() (n int) { } func (m *DaemonSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) @@ -1557,6 +2615,9 @@ func (m *DaemonSetStatus) Size() (n int) { } func (m *DaemonSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1569,6 +2630,9 @@ func (m *DaemonSetUpdateStrategy) Size() (n int) { } func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1581,6 +2645,9 @@ func (m *Deployment) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1599,6 +2666,9 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1613,6 +2683,9 @@ func (m *DeploymentList) Size() (n int) { } func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1638,6 +2711,9 @@ func (m *DeploymentSpec) Size() (n int) { } func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -1659,6 +2735,9 @@ func (m *DeploymentStatus) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1671,6 +2750,9 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *ReplicaSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1683,6 +2765,9 @@ func (m *ReplicaSet) Size() (n int) { } func (m *ReplicaSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1699,6 +2784,9 @@ func (m *ReplicaSetCondition) Size() (n int) { } func (m *ReplicaSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1713,6 +2801,9 @@ func (m *ReplicaSetList) Size() (n int) { } func (m *ReplicaSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1729,6 +2820,9 @@ func (m *ReplicaSetSpec) Size() (n int) { } func (m *ReplicaSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1746,6 +2840,9 @@ func (m *ReplicaSetStatus) Size() (n int) { } func (m *RollingUpdateDaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -1756,6 +2853,9 @@ func (m *RollingUpdateDaemonSet) Size() (n int) { } func (m *RollingUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -1770,6 +2870,9 @@ func (m *RollingUpdateDeployment) Size() (n int) { } func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Partition != nil { @@ -1779,6 +2882,9 @@ func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { } func (m *StatefulSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1791,6 +2897,9 @@ func (m *StatefulSet) Size() (n int) { } func (m *StatefulSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1807,6 +2916,9 @@ func (m *StatefulSetCondition) Size() (n int) { } func (m *StatefulSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1821,6 +2933,9 @@ func (m *StatefulSetList) Size() (n int) { } func (m *StatefulSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1851,6 +2966,9 @@ func (m *StatefulSetSpec) Size() (n int) { } func (m *StatefulSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -1875,6 +2993,9 @@ func (m *StatefulSetStatus) Size() (n int) { } func (m *StatefulSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1887,14 +3008,7 @@ func (m *StatefulSetUpdateStrategy) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1904,8 +3018,8 @@ func (this *ControllerRevision) String() string { return "nil" } s := strings.Join([]string{`&ControllerRevision{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, `}`, }, "") @@ -1915,9 +3029,14 @@ func (this *ControllerRevisionList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ControllerRevision{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ControllerRevisionList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1927,7 +3046,7 @@ func (this *DaemonSet) String() string { return "nil" } s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1941,7 +3060,7 @@ func (this *DaemonSetCondition) String() string { s := strings.Join([]string{`&DaemonSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -1952,9 +3071,14 @@ func (this *DaemonSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]DaemonSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1964,8 +3088,8 @@ func (this *DaemonSetSpec) String() string { return "nil" } s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -1977,6 +3101,11 @@ func (this *DaemonSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DaemonSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DaemonSetStatus{`, `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, @@ -1987,7 +3116,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -1998,7 +3127,7 @@ func (this *DaemonSetUpdateStrategy) String() string { } s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, `}`, }, "") return s @@ -2008,7 +3137,7 @@ func (this *Deployment) String() string { return "nil" } s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2024,8 +3153,8 @@ func (this *DeploymentCondition) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2034,9 +3163,14 @@ func (this *DeploymentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2047,8 +3181,8 @@ func (this *DeploymentSpec) String() string { } s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -2062,13 +3196,18 @@ func (this *DeploymentStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, @@ -2081,7 +3220,7 @@ func (this *DeploymentStrategy) String() string { } s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, `}`, }, "") return s @@ -2091,7 +3230,7 @@ func (this *ReplicaSet) String() string { return "nil" } s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2105,7 +3244,7 @@ func (this *ReplicaSetCondition) String() string { s := strings.Join([]string{`&ReplicaSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2116,9 +3255,14 @@ func (this *ReplicaSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ReplicaSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2129,8 +3273,8 @@ func (this *ReplicaSetSpec) String() string { } s := strings.Join([]string{`&ReplicaSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `}`, }, "") @@ -2140,13 +3284,18 @@ func (this *ReplicaSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]ReplicaSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&ReplicaSetStatus{`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2156,7 +3305,7 @@ func (this *RollingUpdateDaemonSet) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -2166,8 +3315,8 @@ func (this *RollingUpdateDeployment) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -2187,7 +3336,7 @@ func (this *StatefulSet) String() string { return "nil" } s := strings.Join([]string{`&StatefulSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2201,7 +3350,7 @@ func (this *StatefulSetCondition) String() string { s := strings.Join([]string{`&StatefulSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2212,9 +3361,14 @@ func (this *StatefulSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]StatefulSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&StatefulSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2223,11 +3377,16 @@ func (this *StatefulSetSpec) String() string { if this == nil { return "nil" } + repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{" + for _, f := range this.VolumeClaimTemplates { + repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeClaimTemplates += "}" s := strings.Join([]string{`&StatefulSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, @@ -2240,6 +3399,11 @@ func (this *StatefulSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]StatefulSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&StatefulSetStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, @@ -2249,7 +3413,7 @@ func (this *StatefulSetStatus) String() string { `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2260,7 +3424,7 @@ func (this *StatefulSetUpdateStrategy) String() string { } s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, `}`, }, "") return s @@ -2288,7 +3452,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2316,7 +3480,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2325,6 +3489,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2346,7 +3513,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2355,6 +3522,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2376,7 +3546,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2390,6 +3560,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2417,7 +3590,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2445,7 +3618,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2454,6 +3627,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2475,7 +3651,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2484,6 +3660,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2501,6 +3680,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2528,7 +3710,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2556,7 +3738,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2565,6 +3747,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2586,7 +3771,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2595,6 +3780,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2616,7 +3804,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2625,6 +3813,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2641,6 +3832,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2668,7 +3862,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2696,7 +3890,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2706,6 +3900,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2725,7 +3922,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2735,6 +3932,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2754,7 +3954,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2763,6 +3963,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2784,7 +3987,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2794,6 +3997,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2813,7 +4019,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2823,6 +4029,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2837,6 +4046,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2864,7 +4076,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2892,7 +4104,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2901,6 +4113,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2922,7 +4137,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2931,6 +4146,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2948,6 +4166,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2975,7 +4196,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3003,7 +4224,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3012,11 +4233,14 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3036,7 +4260,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3045,6 +4269,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3066,7 +4293,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3075,6 +4302,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3096,7 +4326,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3115,7 +4345,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3130,6 +4360,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3157,7 +4390,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3185,7 +4418,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift + m.CurrentNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3204,7 +4437,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberMisscheduled |= (int32(b) & 0x7F) << shift + m.NumberMisscheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3223,7 +4456,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift + m.DesiredNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3242,7 +4475,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberReady |= (int32(b) & 0x7F) << shift + m.NumberReady |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3261,7 +4494,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -3280,7 +4513,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift + m.UpdatedNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3299,7 +4532,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberAvailable |= (int32(b) & 0x7F) << shift + m.NumberAvailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3318,7 +4551,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberUnavailable |= (int32(b) & 0x7F) << shift + m.NumberUnavailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3337,7 +4570,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3357,7 +4590,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3366,6 +4599,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3383,6 +4619,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3410,7 +4649,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3438,7 +4677,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3448,6 +4687,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3467,7 +4709,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3476,6 +4718,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3495,6 +4740,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3522,7 +4770,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3550,7 +4798,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3559,6 +4807,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3580,7 +4831,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3589,6 +4840,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3610,7 +4864,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3619,6 +4873,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3635,6 +4892,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3662,7 +4922,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3690,7 +4950,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3700,6 +4960,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3719,7 +4982,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3729,6 +4992,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3748,7 +5014,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3758,6 +5024,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3777,7 +5046,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3787,6 +5056,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3806,7 +5078,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3815,6 +5087,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3836,7 +5111,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3845,6 +5120,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3861,6 +5139,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3888,7 +5169,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3916,7 +5197,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3925,6 +5206,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3946,7 +5230,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3955,6 +5239,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3972,6 +5259,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3999,7 +5289,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4027,7 +5317,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4047,7 +5337,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4056,11 +5346,14 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4080,7 +5373,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4089,6 +5382,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4110,7 +5406,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4119,6 +5415,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4140,7 +5439,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4159,7 +5458,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4179,7 +5478,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4199,7 +5498,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4214,6 +5513,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4241,7 +5543,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4269,7 +5571,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4288,7 +5590,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4307,7 +5609,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4326,7 +5628,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4345,7 +5647,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + m.UnavailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4364,7 +5666,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4373,6 +5675,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4395,7 +5700,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4414,7 +5719,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4429,6 +5734,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4456,7 +5764,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4484,7 +5792,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4494,6 +5802,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4513,7 +5824,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4522,6 +5833,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4541,6 +5855,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4568,7 +5885,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4596,7 +5913,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4605,6 +5922,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4626,7 +5946,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4635,6 +5955,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4656,7 +5979,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4665,6 +5988,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4681,6 +6007,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4708,7 +6037,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4736,7 +6065,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4746,6 +6075,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4765,7 +6097,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4775,6 +6107,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4794,7 +6129,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4803,6 +6138,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4824,7 +6162,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4834,6 +6172,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4853,7 +6194,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4863,6 +6204,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4877,6 +6221,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4904,7 +6251,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4932,7 +6279,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4941,6 +6288,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4962,7 +6312,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4971,6 +6321,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4988,6 +6341,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5015,7 +6371,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5043,7 +6399,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5063,7 +6419,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5072,11 +6428,14 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5096,7 +6455,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5105,6 +6464,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5126,7 +6488,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5140,6 +6502,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5167,7 +6532,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5195,7 +6560,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5214,7 +6579,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + m.FullyLabeledReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5233,7 +6598,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5252,7 +6617,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5271,7 +6636,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5290,7 +6655,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5299,6 +6664,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5316,6 +6684,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5343,7 +6714,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5371,7 +6742,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5380,11 +6751,14 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5399,6 +6773,9 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5426,7 +6803,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5454,7 +6831,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5463,11 +6840,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5487,7 +6867,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5496,11 +6876,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxSurge = &intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5515,6 +6898,9 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5542,7 +6928,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5570,7 +6956,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5585,6 +6971,9 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5612,7 +7001,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5640,7 +7029,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5649,6 +7038,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5670,7 +7062,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5679,6 +7071,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5700,7 +7095,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5709,6 +7104,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5725,6 +7123,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5752,7 +7153,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5780,7 +7181,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5790,6 +7191,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5809,7 +7213,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5819,6 +7223,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5838,7 +7245,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5847,6 +7254,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5868,7 +7278,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5878,6 +7288,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5897,7 +7310,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5907,6 +7320,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5921,6 +7337,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5948,7 +7367,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5976,7 +7395,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5985,6 +7404,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6006,7 +7428,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6015,6 +7437,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6032,6 +7457,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6059,7 +7487,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6087,7 +7515,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6107,7 +7535,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6116,11 +7544,14 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6140,7 +7571,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6149,6 +7580,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6170,7 +7604,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6179,10 +7613,13 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v11.PersistentVolumeClaim{}) if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -6201,7 +7638,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6211,6 +7648,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6230,7 +7670,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6240,6 +7680,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6259,7 +7702,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6268,6 +7711,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6289,7 +7735,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6304,6 +7750,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6331,7 +7780,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6359,7 +7808,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6378,7 +7827,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6397,7 +7846,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6416,7 +7865,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6435,7 +7884,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6454,7 +7903,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6464,6 +7913,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6483,7 +7935,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6493,6 +7945,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6512,7 +7967,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6532,7 +7987,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6541,6 +7996,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6558,6 +8016,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6585,7 +8046,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6613,7 +8074,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6623,6 +8084,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6642,7 +8106,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6651,6 +8115,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6670,6 +8137,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6736,10 +8206,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -6768,6 +8241,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -6786,139 +8262,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 2037 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x24, 0x47, - 0x1d, 0x75, 0xcf, 0x87, 0x3d, 0x2e, 0xaf, 0xed, 0xdd, 0xb2, 0xb1, 0x27, 0xbb, 0x64, 0x66, 0x19, - 0x60, 0xe3, 0x64, 0xb3, 0x3d, 0xec, 0x66, 0x13, 0xa1, 0x2c, 0x02, 0x79, 0xc6, 0x21, 0x84, 0x78, - 0x6c, 0x53, 0x5e, 0xef, 0x61, 0x09, 0x12, 0xe5, 0xe9, 0xda, 0x71, 0xc7, 0xfd, 0xa5, 0xee, 0xea, - 0x61, 0x47, 0x5c, 0x10, 0x12, 0x9c, 0x38, 0xf0, 0x9f, 0x20, 0x84, 0xe0, 0x86, 0x22, 0xc4, 0x65, - 0x2f, 0x48, 0x11, 0x17, 0x72, 0xb2, 0xd8, 0xc9, 0x09, 0xa1, 0x1c, 0xb9, 0xe4, 0x02, 0xaa, 0xea, - 0xea, 0xef, 0x6a, 0xcf, 0xd8, 0x9b, 0x75, 0x50, 0x94, 0x9b, 0xa7, 0xea, 0xfd, 0x5e, 0xff, 0xaa, - 0xea, 0x57, 0xf5, 0x5e, 0x57, 0x1b, 0xdc, 0x3b, 0xfe, 0xb6, 0xa7, 0xea, 0x76, 0xfb, 0xd8, 0x3f, - 0x24, 0xae, 0x45, 0x28, 0xf1, 0xda, 0x43, 0x62, 0x69, 0xb6, 0xdb, 0x16, 0x1d, 0xd8, 0xd1, 0xdb, - 0xd8, 0x71, 0xbc, 0xf6, 0xf0, 0x76, 0x7b, 0x40, 0x2c, 0xe2, 0x62, 0x4a, 0x34, 0xd5, 0x71, 0x6d, - 0x6a, 0x43, 0x18, 0x60, 0x54, 0xec, 0xe8, 0x2a, 0xc3, 0xa8, 0xc3, 0xdb, 0x57, 0x6f, 0x0d, 0x74, - 0x7a, 0xe4, 0x1f, 0xaa, 0x7d, 0xdb, 0x6c, 0x0f, 0xec, 0x81, 0xdd, 0xe6, 0xd0, 0x43, 0xff, 0x11, - 0xff, 0xc5, 0x7f, 0xf0, 0xbf, 0x02, 0x8a, 0xab, 0xad, 0xc4, 0x63, 0xfa, 0xb6, 0x4b, 0x24, 0x8f, - 0xb9, 0x7a, 0x37, 0xc6, 0x98, 0xb8, 0x7f, 0xa4, 0x5b, 0xc4, 0x1d, 0xb5, 0x9d, 0xe3, 0x01, 0x6b, - 0xf0, 0xda, 0x26, 0xa1, 0x58, 0x16, 0xd5, 0x2e, 0x8a, 0x72, 0x7d, 0x8b, 0xea, 0x26, 0xc9, 0x05, - 0xbc, 0x31, 0x29, 0xc0, 0xeb, 0x1f, 0x11, 0x13, 0xe7, 0xe2, 0x5e, 0x2b, 0x8a, 0xf3, 0xa9, 0x6e, - 0xb4, 0x75, 0x8b, 0x7a, 0xd4, 0xcd, 0x06, 0xb5, 0xfe, 0xa3, 0x00, 0xd8, 0xb5, 0x2d, 0xea, 0xda, - 0x86, 0x41, 0x5c, 0x44, 0x86, 0xba, 0xa7, 0xdb, 0x16, 0xfc, 0x29, 0xa8, 0xb1, 0xf1, 0x68, 0x98, - 0xe2, 0xba, 0x72, 0x5d, 0xd9, 0x58, 0xb8, 0xf3, 0x2d, 0x35, 0x9e, 0xe4, 0x88, 0x5e, 0x75, 0x8e, - 0x07, 0xac, 0xc1, 0x53, 0x19, 0x5a, 0x1d, 0xde, 0x56, 0x77, 0x0f, 0xdf, 0x27, 0x7d, 0xda, 0x23, - 0x14, 0x77, 0xe0, 0x93, 0x93, 0xe6, 0xcc, 0xf8, 0xa4, 0x09, 0xe2, 0x36, 0x14, 0xb1, 0xc2, 0x5d, - 0x50, 0xe1, 0xec, 0x25, 0xce, 0x7e, 0xab, 0x90, 0x5d, 0x0c, 0x5a, 0x45, 0xf8, 0x67, 0x6f, 0x3d, - 0xa6, 0xc4, 0x62, 0xe9, 0x75, 0x2e, 0x09, 0xea, 0xca, 0x16, 0xa6, 0x18, 0x71, 0x22, 0xf8, 0x2a, - 0xa8, 0xb9, 0x22, 0xfd, 0x7a, 0xf9, 0xba, 0xb2, 0x51, 0xee, 0x5c, 0x16, 0xa8, 0x5a, 0x38, 0x2c, - 0x14, 0x21, 0x5a, 0x7f, 0x55, 0xc0, 0x5a, 0x7e, 0xdc, 0xdb, 0xba, 0x47, 0xe1, 0x7b, 0xb9, 0xb1, - 0xab, 0xd3, 0x8d, 0x9d, 0x45, 0xf3, 0x91, 0x47, 0x0f, 0x0e, 0x5b, 0x12, 0xe3, 0x7e, 0x17, 0x54, - 0x75, 0x4a, 0x4c, 0xaf, 0x5e, 0xba, 0x5e, 0xde, 0x58, 0xb8, 0x73, 0x43, 0xcd, 0xd7, 0xae, 0x9a, - 0x4f, 0xac, 0xb3, 0x28, 0x28, 0xab, 0xef, 0xb0, 0x60, 0x14, 0x70, 0xb4, 0xfe, 0xab, 0x80, 0xf9, - 0x2d, 0x4c, 0x4c, 0xdb, 0xda, 0x27, 0xf4, 0x02, 0x16, 0xad, 0x0b, 0x2a, 0x9e, 0x43, 0xfa, 0x62, - 0xd1, 0xbe, 0x26, 0xcb, 0x3d, 0x4a, 0x67, 0xdf, 0x21, 0xfd, 0x78, 0xa1, 0xd8, 0x2f, 0xc4, 0x83, - 0xe1, 0xbb, 0x60, 0xd6, 0xa3, 0x98, 0xfa, 0x1e, 0x5f, 0xa6, 0x85, 0x3b, 0x5f, 0x3f, 0x9d, 0x86, - 0x43, 0x3b, 0x4b, 0x82, 0x68, 0x36, 0xf8, 0x8d, 0x04, 0x45, 0xeb, 0x5f, 0x25, 0x00, 0x23, 0x6c, - 0xd7, 0xb6, 0x34, 0x9d, 0xb2, 0xfa, 0x7d, 0x13, 0x54, 0xe8, 0xc8, 0x21, 0x7c, 0x1a, 0xe6, 0x3b, - 0x37, 0xc2, 0x2c, 0xee, 0x8f, 0x1c, 0xf2, 0xe9, 0x49, 0x73, 0x2d, 0x1f, 0xc1, 0x7a, 0x10, 0x8f, - 0x81, 0xdb, 0x51, 0x7e, 0x25, 0x1e, 0x7d, 0x37, 0xfd, 0xe8, 0x4f, 0x4f, 0x9a, 0x92, 0xc3, 0x42, - 0x8d, 0x98, 0xd2, 0x09, 0xc2, 0x21, 0x80, 0x06, 0xf6, 0xe8, 0x7d, 0x17, 0x5b, 0x5e, 0xf0, 0x24, - 0xdd, 0x24, 0x62, 0xe4, 0xaf, 0x4c, 0xb7, 0x3c, 0x2c, 0xa2, 0x73, 0x55, 0x64, 0x01, 0xb7, 0x73, - 0x6c, 0x48, 0xf2, 0x04, 0x78, 0x03, 0xcc, 0xba, 0x04, 0x7b, 0xb6, 0x55, 0xaf, 0xf0, 0x51, 0x44, - 0x13, 0x88, 0x78, 0x2b, 0x12, 0xbd, 0xf0, 0x65, 0x30, 0x67, 0x12, 0xcf, 0xc3, 0x03, 0x52, 0xaf, - 0x72, 0xe0, 0xb2, 0x00, 0xce, 0xf5, 0x82, 0x66, 0x14, 0xf6, 0xb7, 0x7e, 0xaf, 0x80, 0xc5, 0x68, - 0xe6, 0x2e, 0x60, 0xab, 0x74, 0xd2, 0x5b, 0xe5, 0xc5, 0x53, 0xeb, 0xa4, 0x60, 0x87, 0x7c, 0x50, - 0x4e, 0xe4, 0xcc, 0x8a, 0x10, 0xfe, 0x04, 0xd4, 0x3c, 0x62, 0x90, 0x3e, 0xb5, 0x5d, 0x91, 0xf3, - 0x6b, 0x53, 0xe6, 0x8c, 0x0f, 0x89, 0xb1, 0x2f, 0x42, 0x3b, 0x97, 0x58, 0xd2, 0xe1, 0x2f, 0x14, - 0x51, 0xc2, 0x1f, 0x81, 0x1a, 0x25, 0xa6, 0x63, 0x60, 0x4a, 0xc4, 0x36, 0x49, 0xd5, 0x37, 0x2b, - 0x17, 0x46, 0xb6, 0x67, 0x6b, 0xf7, 0x05, 0x8c, 0x6f, 0x94, 0x68, 0x1e, 0xc2, 0x56, 0x14, 0xd1, - 0xc0, 0x63, 0xb0, 0xe4, 0x3b, 0x1a, 0x43, 0x52, 0x76, 0x74, 0x0f, 0x46, 0xa2, 0x7c, 0x6e, 0x9e, - 0x3a, 0x21, 0x07, 0xa9, 0x90, 0xce, 0x9a, 0x78, 0xc0, 0x52, 0xba, 0x1d, 0x65, 0xa8, 0xe1, 0x26, - 0x58, 0x36, 0x75, 0x0b, 0x11, 0xac, 0x8d, 0xf6, 0x49, 0xdf, 0xb6, 0x34, 0x8f, 0x17, 0x50, 0xb5, - 0xb3, 0x2e, 0x08, 0x96, 0x7b, 0xe9, 0x6e, 0x94, 0xc5, 0xc3, 0x6d, 0xb0, 0x1a, 0x9e, 0xb3, 0x3f, - 0xd0, 0x3d, 0x6a, 0xbb, 0xa3, 0x6d, 0xdd, 0xd4, 0x69, 0x7d, 0x96, 0xf3, 0xd4, 0xc7, 0x27, 0xcd, - 0x55, 0x24, 0xe9, 0x47, 0xd2, 0xa8, 0xd6, 0x6f, 0x66, 0xc1, 0x72, 0xe6, 0x34, 0x80, 0x0f, 0xc0, - 0x5a, 0xdf, 0x77, 0x5d, 0x62, 0xd1, 0x1d, 0xdf, 0x3c, 0x24, 0xee, 0x7e, 0xff, 0x88, 0x68, 0xbe, - 0x41, 0x34, 0xbe, 0xa2, 0xd5, 0x4e, 0x43, 0xe4, 0xba, 0xd6, 0x95, 0xa2, 0x50, 0x41, 0x34, 0xfc, - 0x21, 0x80, 0x16, 0x6f, 0xea, 0xe9, 0x9e, 0x17, 0x71, 0x96, 0x38, 0x67, 0xb4, 0x01, 0x77, 0x72, - 0x08, 0x24, 0x89, 0x62, 0x39, 0x6a, 0xc4, 0xd3, 0x5d, 0xa2, 0x65, 0x73, 0x2c, 0xa7, 0x73, 0xdc, - 0x92, 0xa2, 0x50, 0x41, 0x34, 0x7c, 0x1d, 0x2c, 0x04, 0x4f, 0xe3, 0x73, 0x2e, 0x16, 0x67, 0x45, - 0x90, 0x2d, 0xec, 0xc4, 0x5d, 0x28, 0x89, 0x63, 0x43, 0xb3, 0x0f, 0x3d, 0xe2, 0x0e, 0x89, 0xf6, - 0x76, 0xe0, 0x01, 0x98, 0x50, 0x56, 0xb9, 0x50, 0x46, 0x43, 0xdb, 0xcd, 0x21, 0x90, 0x24, 0x8a, - 0x0d, 0x2d, 0xa8, 0x9a, 0xdc, 0xd0, 0x66, 0xd3, 0x43, 0x3b, 0x90, 0xa2, 0x50, 0x41, 0x34, 0xab, - 0xbd, 0x20, 0xe5, 0xcd, 0x21, 0xd6, 0x0d, 0x7c, 0x68, 0x90, 0xfa, 0x5c, 0xba, 0xf6, 0x76, 0xd2, - 0xdd, 0x28, 0x8b, 0x87, 0x6f, 0x83, 0x2b, 0x41, 0xd3, 0x81, 0x85, 0x23, 0x92, 0x1a, 0x27, 0x79, - 0x41, 0x90, 0x5c, 0xd9, 0xc9, 0x02, 0x50, 0x3e, 0x06, 0xbe, 0x09, 0x96, 0xfa, 0xb6, 0x61, 0xf0, - 0x7a, 0xec, 0xda, 0xbe, 0x45, 0xeb, 0xf3, 0x9c, 0x05, 0xb2, 0x3d, 0xd4, 0x4d, 0xf5, 0xa0, 0x0c, - 0x12, 0x3e, 0x04, 0xa0, 0x1f, 0xca, 0x81, 0x57, 0x07, 0xc5, 0x42, 0x9f, 0xd7, 0xa1, 0x58, 0x80, - 0xa3, 0x26, 0x0f, 0x25, 0xd8, 0x5a, 0x1f, 0x28, 0x60, 0xbd, 0x60, 0x8f, 0xc3, 0xef, 0xa5, 0x54, - 0xef, 0x66, 0x46, 0xf5, 0xae, 0x15, 0x84, 0x25, 0xa4, 0xaf, 0x0f, 0x16, 0x99, 0xef, 0xd0, 0xad, - 0x41, 0x00, 0x11, 0x27, 0xd8, 0x2b, 0xb2, 0xdc, 0x51, 0x12, 0x18, 0x1f, 0xc3, 0x57, 0xc6, 0x27, - 0xcd, 0xc5, 0x54, 0x1f, 0x4a, 0x73, 0xb6, 0x7e, 0x59, 0x02, 0x60, 0x8b, 0x38, 0x86, 0x3d, 0x32, - 0x89, 0x75, 0x11, 0xae, 0x65, 0x2b, 0xe5, 0x5a, 0x5a, 0xd2, 0x85, 0x88, 0xf2, 0x29, 0xb4, 0x2d, - 0xdb, 0x19, 0xdb, 0xf2, 0x8d, 0x09, 0x3c, 0xa7, 0xfb, 0x96, 0x7f, 0x94, 0xc1, 0x4a, 0x0c, 0x8e, - 0x8d, 0xcb, 0xbd, 0xd4, 0x12, 0xbe, 0x94, 0x59, 0xc2, 0x75, 0x49, 0xc8, 0x73, 0x73, 0x2e, 0x9f, - 0xbd, 0x83, 0x80, 0xef, 0x83, 0x25, 0x66, 0x55, 0x82, 0x42, 0xe0, 0x46, 0x68, 0xf6, 0xcc, 0x46, - 0x28, 0x12, 0xb2, 0xed, 0x14, 0x13, 0xca, 0x30, 0x17, 0x18, 0xaf, 0xb9, 0xe7, 0x6d, 0xbc, 0x5a, - 0x7f, 0x50, 0xc0, 0x52, 0xbc, 0x4c, 0x17, 0x60, 0x93, 0xba, 0x69, 0x9b, 0xd4, 0x38, 0xbd, 0x2e, - 0x0b, 0x7c, 0xd2, 0xdf, 0x2b, 0xc9, 0xac, 0xb9, 0x51, 0xda, 0x60, 0x2f, 0x54, 0x8e, 0xa1, 0xf7, - 0xb1, 0x27, 0x64, 0xf5, 0x52, 0xf0, 0x32, 0x15, 0xb4, 0xa1, 0xa8, 0x37, 0x65, 0xa9, 0x4a, 0xcf, - 0xd7, 0x52, 0x95, 0x3f, 0x1b, 0x4b, 0x75, 0x1f, 0xd4, 0xbc, 0xd0, 0x4c, 0x55, 0x38, 0xe5, 0x8d, - 0x49, 0xdb, 0x59, 0xf8, 0xa8, 0x88, 0x35, 0x72, 0x50, 0x11, 0x93, 0xcc, 0x3b, 0x55, 0x3f, 0x4f, - 0xef, 0xc4, 0xb6, 0xb0, 0x83, 0x7d, 0x8f, 0x68, 0xbc, 0xee, 0x6b, 0xf1, 0x16, 0xde, 0xe3, 0xad, - 0x48, 0xf4, 0xc2, 0x03, 0xb0, 0xee, 0xb8, 0xf6, 0xc0, 0x25, 0x9e, 0xb7, 0x45, 0xb0, 0x66, 0xe8, - 0x16, 0x09, 0x07, 0x10, 0xa8, 0xde, 0xb5, 0xf1, 0x49, 0x73, 0x7d, 0x4f, 0x0e, 0x41, 0x45, 0xb1, - 0xad, 0x3f, 0x57, 0xc0, 0xe5, 0xec, 0x89, 0x58, 0x60, 0x44, 0x94, 0x73, 0x19, 0x91, 0x57, 0x13, - 0x25, 0x1a, 0xb8, 0xb4, 0xc4, 0x3b, 0x7f, 0xae, 0x4c, 0x37, 0xc1, 0xb2, 0x30, 0x1e, 0x61, 0xa7, - 0xb0, 0x62, 0xd1, 0xf2, 0x1c, 0xa4, 0xbb, 0x51, 0x16, 0xcf, 0xec, 0x45, 0xec, 0x1a, 0x42, 0x92, - 0x4a, 0xda, 0x5e, 0x6c, 0x66, 0x01, 0x28, 0x1f, 0x03, 0x7b, 0x60, 0xc5, 0xb7, 0xf2, 0x54, 0x41, - 0xb9, 0x5c, 0x13, 0x54, 0x2b, 0x07, 0x79, 0x08, 0x92, 0xc5, 0xc1, 0x1f, 0xa7, 0x1c, 0xc7, 0x2c, - 0x3f, 0x08, 0x5e, 0x3a, 0xbd, 0xa2, 0xa7, 0xb6, 0x1c, 0xf0, 0x1e, 0x58, 0x74, 0xb9, 0xa1, 0x0c, - 0xb3, 0x0c, 0x4c, 0xd9, 0x57, 0x44, 0xd8, 0x22, 0x4a, 0x76, 0xa2, 0x34, 0x56, 0xe2, 0xa3, 0x6a, - 0xd3, 0xfa, 0xa8, 0xd6, 0x9f, 0x14, 0x00, 0xf3, 0x5b, 0x70, 0xe2, 0xcb, 0x7d, 0x2e, 0x22, 0x21, - 0x91, 0x9a, 0xdc, 0xe1, 0xdc, 0x9c, 0xec, 0x70, 0xe2, 0x13, 0x74, 0x3a, 0x8b, 0x23, 0x66, 0xe0, - 0x62, 0x2e, 0x66, 0xa6, 0xb0, 0x38, 0x71, 0x3e, 0xcf, 0x66, 0x71, 0x12, 0x3c, 0xa7, 0x5b, 0x9c, - 0x7f, 0x97, 0xc0, 0x4a, 0x0c, 0x9e, 0xda, 0xe2, 0x48, 0x42, 0xbe, 0xbc, 0x9c, 0x99, 0x7c, 0x39, - 0xc3, 0x6c, 0x47, 0x3c, 0x75, 0xff, 0x27, 0xb6, 0x23, 0x4e, 0xa8, 0xc0, 0x76, 0xfc, 0xae, 0x94, - 0xcc, 0xfa, 0x0b, 0x6f, 0x3b, 0x9e, 0xfd, 0x72, 0xa5, 0xf5, 0x97, 0x32, 0xb8, 0x9c, 0xdd, 0x82, - 0x29, 0x1d, 0x54, 0x26, 0xea, 0xe0, 0x1e, 0x58, 0x7d, 0xe4, 0x1b, 0xc6, 0x88, 0x4f, 0x43, 0x42, - 0x0c, 0x03, 0x05, 0xfd, 0xaa, 0x88, 0x5c, 0xfd, 0xbe, 0x04, 0x83, 0xa4, 0x91, 0x05, 0x9a, 0x5e, - 0x3e, 0x97, 0xa6, 0xe7, 0xd4, 0xa6, 0x72, 0x06, 0xb5, 0x91, 0xea, 0x73, 0xf5, 0x1c, 0xfa, 0x3c, - 0xb5, 0xa0, 0x4a, 0x8e, 0xab, 0x89, 0xef, 0xf0, 0xbf, 0x56, 0xc0, 0x9a, 0xfc, 0xf5, 0x19, 0x1a, - 0x60, 0xc9, 0xc4, 0x8f, 0x93, 0x97, 0x17, 0x93, 0x04, 0xc3, 0xa7, 0xba, 0xa1, 0x06, 0x5f, 0x77, - 0xd4, 0x77, 0x2c, 0xba, 0xeb, 0xee, 0x53, 0x57, 0xb7, 0x06, 0x81, 0xc0, 0xf6, 0x52, 0x5c, 0x28, - 0xc3, 0xdd, 0xfa, 0x58, 0x01, 0xeb, 0x05, 0x2a, 0x77, 0xb1, 0x99, 0xc0, 0x87, 0xa0, 0x66, 0xe2, - 0xc7, 0xfb, 0xbe, 0x3b, 0x08, 0x25, 0xf9, 0xec, 0xcf, 0xe1, 0x1b, 0xb9, 0x27, 0x58, 0x50, 0xc4, - 0xd7, 0xda, 0x05, 0xd7, 0x53, 0x83, 0x64, 0x9b, 0x86, 0x3c, 0xf2, 0x0d, 0xbe, 0x7f, 0x84, 0xa7, - 0xb8, 0x09, 0xe6, 0x1d, 0xec, 0x52, 0x3d, 0x32, 0xa3, 0xd5, 0xce, 0xe2, 0xf8, 0xa4, 0x39, 0xbf, - 0x17, 0x36, 0xa2, 0xb8, 0xbf, 0xf5, 0xab, 0x12, 0x58, 0x48, 0x90, 0x5c, 0x80, 0xbe, 0xbf, 0x95, - 0xd2, 0x77, 0xe9, 0x17, 0x93, 0xe4, 0xa8, 0x8a, 0x04, 0xbe, 0x97, 0x11, 0xf8, 0x6f, 0x4e, 0x22, - 0x3a, 0x5d, 0xe1, 0x3f, 0x29, 0x81, 0xd5, 0x04, 0x3a, 0x96, 0xf8, 0xef, 0xa4, 0x24, 0x7e, 0x23, - 0x23, 0xf1, 0x75, 0x59, 0xcc, 0x97, 0x1a, 0x3f, 0x59, 0xe3, 0xff, 0xa8, 0x80, 0xe5, 0xc4, 0xdc, - 0x5d, 0x80, 0xc8, 0x6f, 0xa5, 0x45, 0xbe, 0x39, 0xa1, 0x5e, 0x0a, 0x54, 0xfe, 0x49, 0x35, 0x95, - 0xf7, 0x17, 0x5e, 0xe6, 0x7f, 0x0e, 0x56, 0x87, 0xb6, 0xe1, 0x9b, 0xa4, 0x6b, 0x60, 0xdd, 0x0c, - 0x01, 0x4c, 0xc9, 0xd8, 0x24, 0xbe, 0x2c, 0xa5, 0x27, 0xae, 0xa7, 0x7b, 0x94, 0x58, 0xf4, 0x41, - 0x1c, 0x19, 0x6b, 0xf1, 0x03, 0x09, 0x1d, 0x92, 0x3e, 0x04, 0xbe, 0x0e, 0x16, 0x98, 0xa6, 0xea, - 0x7d, 0xb2, 0x83, 0xcd, 0xb0, 0xa6, 0xa2, 0xef, 0x03, 0xfb, 0x71, 0x17, 0x4a, 0xe2, 0xe0, 0x11, - 0x58, 0x71, 0x6c, 0xad, 0x87, 0x2d, 0x3c, 0x20, 0xec, 0xfc, 0xdf, 0xb3, 0x0d, 0xbd, 0x3f, 0xe2, - 0xf7, 0x0e, 0xf3, 0x9d, 0x37, 0xc2, 0x17, 0xd2, 0xbd, 0x3c, 0x84, 0x79, 0x76, 0x49, 0x33, 0xdf, - 0xcf, 0x32, 0x4a, 0x68, 0xe6, 0x3e, 0x67, 0xcd, 0xe5, 0xfe, 0x07, 0x40, 0x56, 0x5c, 0xe7, 0xfc, - 0xa0, 0x55, 0x74, 0xa3, 0x52, 0x3b, 0xd7, 0xd7, 0xa8, 0x4f, 0x2a, 0xe0, 0x4a, 0xee, 0x80, 0xfc, - 0x1c, 0xef, 0x34, 0x72, 0x6e, 0xa9, 0x7c, 0x06, 0xb7, 0xb4, 0x09, 0x96, 0xc5, 0x87, 0xb0, 0x8c, - 0xd9, 0x8a, 0xec, 0x68, 0x37, 0xdd, 0x8d, 0xb2, 0x78, 0xd9, 0x9d, 0x4a, 0xf5, 0x8c, 0x77, 0x2a, - 0xc9, 0x2c, 0xc4, 0xff, 0x6f, 0x04, 0x55, 0x97, 0xcf, 0x42, 0xfc, 0x1b, 0x47, 0x16, 0x0f, 0xbf, - 0x1b, 0x96, 0x54, 0xc4, 0x30, 0xc7, 0x19, 0x32, 0x35, 0x12, 0x11, 0x64, 0xd0, 0xcf, 0xf4, 0xb1, - 0xe7, 0x3d, 0xc9, 0xc7, 0x9e, 0x8d, 0x09, 0xa5, 0x3c, 0xbd, 0x55, 0xfc, 0x9b, 0x02, 0x5e, 0x28, - 0xdc, 0x03, 0x70, 0x33, 0xa5, 0xb3, 0xb7, 0x32, 0x3a, 0xfb, 0x62, 0x61, 0x60, 0x42, 0x6c, 0x4d, - 0xf9, 0x85, 0xc8, 0xdd, 0x89, 0x17, 0x22, 0x12, 0x17, 0x35, 0xf9, 0x66, 0xa4, 0xb3, 0xf1, 0xe4, - 0x69, 0x63, 0xe6, 0xc3, 0xa7, 0x8d, 0x99, 0x8f, 0x9e, 0x36, 0x66, 0x7e, 0x31, 0x6e, 0x28, 0x4f, - 0xc6, 0x0d, 0xe5, 0xc3, 0x71, 0x43, 0xf9, 0x68, 0xdc, 0x50, 0xfe, 0x39, 0x6e, 0x28, 0xbf, 0xfd, - 0xb8, 0x31, 0xf3, 0xb0, 0x34, 0xbc, 0xfd, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x6b, 0x01, - 0x7b, 0x12, 0x26, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto index fea81922f..6c5527974 100644 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -41,7 +41,7 @@ option go_package = "v1"; // depend on its stability. It is primarily for internal use by controllers. message ControllerRevision { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -54,7 +54,7 @@ message ControllerRevision { // ControllerRevisionList is a resource containing a list of ControllerRevision objects. message ControllerRevisionList { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -65,12 +65,12 @@ message ControllerRevisionList { // DaemonSet represents the configuration of a daemon set. message DaemonSet { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional DaemonSetSpec spec = 2; @@ -78,7 +78,7 @@ message DaemonSet { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional DaemonSetStatus status = 3; } @@ -107,7 +107,7 @@ message DaemonSetCondition { // DaemonSetList is a collection of daemon sets. message DaemonSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -366,12 +366,12 @@ message DeploymentStrategy { message ReplicaSet { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicaSetSpec spec = 2; @@ -379,7 +379,7 @@ message ReplicaSet { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicaSetStatus status = 3; } @@ -408,7 +408,7 @@ message ReplicaSetCondition { // ReplicaSetList is a collection of ReplicaSets. message ReplicaSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go index 2fe857458..e003a0c4f 100644 --- a/vendor/k8s.io/api/apps/v1/types.go +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -95,13 +95,13 @@ const ( // ordering constraints. When a scale operation is performed with this // strategy, new Pods will be created from the specification version indicated // by the StatefulSet's updateRevision. - RollingUpdateStatefulSetStrategyType = "RollingUpdate" + RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate" // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version // tracking and ordered rolling restarts are disabled. Pods are recreated // from the StatefulSetSpec when they are manually deleted. When a scale // operation is performed with this strategy,specification version indicated // by the StatefulSet's currentRevision. - OnDeleteStatefulSetStrategyType = "OnDelete" + OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete" ) // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. @@ -618,12 +618,12 @@ type DaemonSetCondition struct { type DaemonSet struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -631,7 +631,7 @@ type DaemonSet struct { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -649,7 +649,7 @@ const ( type DaemonSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -668,12 +668,12 @@ type ReplicaSet struct { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -681,7 +681,7 @@ type ReplicaSet struct { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -692,7 +692,7 @@ type ReplicaSet struct { type ReplicaSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -800,7 +800,7 @@ type ReplicaSetCondition struct { type ControllerRevision struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -817,7 +817,7 @@ type ControllerRevision struct { type ControllerRevisionList struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 7e992c584..3f0299d03 100644 --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ "": "ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "data": "Data is the serialized representation of the state.", "revision": "Revision indicates the revision of the state represented by Data.", } @@ -40,7 +40,7 @@ func (ControllerRevision) SwaggerDoc() map[string]string { var map_ControllerRevisionList = map[string]string{ "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of ControllerRevisions", } @@ -50,9 +50,9 @@ func (ControllerRevisionList) SwaggerDoc() map[string]string { var map_DaemonSet = map[string]string{ "": "DaemonSet represents the configuration of a daemon set.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (DaemonSet) SwaggerDoc() map[string]string { @@ -74,7 +74,7 @@ func (DaemonSetCondition) SwaggerDoc() map[string]string { var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "A list of daemon sets.", } @@ -202,9 +202,9 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { var map_ReplicaSet = map[string]string{ "": "ReplicaSet ensures that a specified number of pod replicas are running at any given time.", - "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (ReplicaSet) SwaggerDoc() map[string]string { @@ -226,7 +226,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string { var map_ReplicaSetList = map[string]string{ "": "ReplicaSetList is a collection of ReplicaSets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", } diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go index 885203fca..7b7ff385c 100644 --- a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go @@ -58,7 +58,7 @@ func (in *ControllerRevision) DeepCopyObject() runtime.Object { func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ControllerRevision, len(*in)) @@ -136,7 +136,7 @@ func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DaemonSet, len(*in)) @@ -292,7 +292,7 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -457,7 +457,7 @@ func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicaSet, len(*in)) @@ -653,7 +653,7 @@ func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StatefulSet, len(*in)) diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go index 935304755..921e055cf 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -17,52 +17,26 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto - - It has these top-level messages: - ControllerRevision - ControllerRevisionList - Deployment - DeploymentCondition - DeploymentList - DeploymentRollback - DeploymentSpec - DeploymentStatus - DeploymentStrategy - RollbackConfig - RollingUpdateDeployment - RollingUpdateStatefulSetStrategy - Scale - ScaleSpec - ScaleStatus - StatefulSet - StatefulSetCondition - StatefulSetList - StatefulSetSpec - StatefulSetStatus - StatefulSetUpdateStrategy -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import strings "strings" -import reflect "reflect" - -import io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -75,95 +49,593 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } -func (*ControllerRevision) ProtoMessage() {} -func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{0} +} +func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevision.Merge(m, src) +} +func (m *ControllerRevision) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevision) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevision.DiscardUnknown(m) +} -func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } -func (*ControllerRevisionList) ProtoMessage() {} -func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{1} +} +func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevisionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevisionList.Merge(m, src) +} +func (m *ControllerRevisionList) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevisionList) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m) +} -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{2} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} -func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } -func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_Deployment proto.InternalMessageInfo -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{3} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{4} +} +func (m *DeploymentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentList.Merge(m, src) +} +func (m *DeploymentList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentList.DiscardUnknown(m) +} -func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } -func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_DeploymentList proto.InternalMessageInfo + +func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } +func (*DeploymentRollback) ProtoMessage() {} +func (*DeploymentRollback) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{5} +} +func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentRollback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentRollback) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentRollback.Merge(m, src) +} +func (m *DeploymentRollback) XXX_Size() int { + return m.Size() +} +func (m *DeploymentRollback) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentRollback.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo + +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{6} +} +func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentSpec.Merge(m, src) +} +func (m *DeploymentSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{7} +} +func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStatus.Merge(m, src) +} +func (m *DeploymentStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{8} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo + +func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } +func (*RollbackConfig) ProtoMessage() {} +func (*RollbackConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{9} +} +func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollbackConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollbackConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollbackConfig.Merge(m, src) +} +func (m *RollbackConfig) XXX_Size() int { + return m.Size() +} +func (m *RollbackConfig) XXX_DiscardUnknown() { + xxx_messageInfo_RollbackConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} + return fileDescriptor_2a07313e8f66e805, []int{10} +} +func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) +} +func (m *RollingUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) } +var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo + func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} + return fileDescriptor_2a07313e8f66e805, []int{11} +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{12} +} +func (m *Scale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) +} +func (m *Scale) XXX_Size() int { + return m.Size() +} +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) +} + +var xxx_messageInfo_Scale proto.InternalMessageInfo + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{13} +} +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) +} +func (m *ScaleSpec) XXX_Size() int { + return m.Size() +} +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{14} +} +func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleStatus.Merge(m, src) +} +func (m *ScaleStatus) XXX_Size() int { + return m.Size() +} +func (m *ScaleStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo + +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{15} } +func (m *StatefulSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSet.Merge(m, src) +} +func (m *StatefulSet) XXX_Size() int { + return m.Size() +} +func (m *StatefulSet) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSet.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSet proto.InternalMessageInfo -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{16} +} +func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetCondition.Merge(m, src) +} +func (m *StatefulSetCondition) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m) +} -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{17} +} +func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetList.Merge(m, src) +} +func (m *StatefulSetList) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetList) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetList.DiscardUnknown(m) +} -func (m *StatefulSet) Reset() { *m = StatefulSet{} } -func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo -func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } -func (*StatefulSetCondition) ProtoMessage() {} -func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{18} +} +func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetSpec.Merge(m, src) +} +func (m *StatefulSetSpec) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m) +} -func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } -func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo -func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } -func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{19} +} +func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetStatus.Merge(m, src) +} +func (m *StatefulSetStatus) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m) +} -func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } -func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} + return fileDescriptor_2a07313e8f66e805, []int{20} +} +func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src) +} +func (m *StatefulSetUpdateStrategy) XXX_Size() int { + return m.Size() } +func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo func init() { proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta1.ControllerRevision") @@ -172,6 +644,7 @@ func init() { proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1beta1.DeploymentCondition") proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1beta1.DeploymentList") proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.api.apps.v1beta1.DeploymentRollback") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.apps.v1beta1.DeploymentRollback.UpdatedAnnotationsEntry") proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1beta1.DeploymentSpec") proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1beta1.DeploymentStatus") proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1beta1.DeploymentStrategy") @@ -181,6 +654,7 @@ func init() { proto.RegisterType((*Scale)(nil), "k8s.io.api.apps.v1beta1.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta1.ScaleStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.apps.v1beta1.ScaleStatus.SelectorEntry") proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta1.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta1.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta1.StatefulSetList") @@ -188,10 +662,135 @@ func init() { proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta1.StatefulSetStatus") proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta1.StatefulSetUpdateStrategy") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto", fileDescriptor_2a07313e8f66e805) +} + +var fileDescriptor_2a07313e8f66e805 = []byte{ + // 1855 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x6f, 0x24, 0x47, + 0x15, 0x77, 0x8f, 0x67, 0xec, 0xf1, 0x73, 0x3c, 0xde, 0x2d, 0x9b, 0xf5, 0xc4, 0x81, 0xb1, 0x35, + 0x44, 0x89, 0xf3, 0xe1, 0x9e, 0xac, 0x13, 0xa2, 0x64, 0x17, 0x45, 0x78, 0xbc, 0x4b, 0xb2, 0x91, + 0x8d, 0x9d, 0xb2, 0x1d, 0x44, 0x00, 0x29, 0x35, 0x3d, 0xb5, 0xb3, 0x1d, 0xf7, 0x97, 0xba, 0xab, + 0x87, 0x1d, 0x71, 0xe1, 0x0f, 0x40, 0x0a, 0x67, 0xfe, 0x0a, 0x8e, 0x08, 0x6e, 0x9c, 0xf6, 0x82, + 0x14, 0x71, 0x21, 0x27, 0x8b, 0x9d, 0x5c, 0x81, 0x1b, 0x97, 0x95, 0x90, 0x50, 0x55, 0x57, 0x7f, + 0x77, 0xdb, 0x6d, 0xa4, 0xb5, 0x04, 0xb7, 0xe9, 0x7a, 0xef, 0xfd, 0x5e, 0x7d, 0xbc, 0xaf, 0xdf, + 0xc0, 0x0f, 0xce, 0xde, 0xf3, 0x54, 0xdd, 0xee, 0x9d, 0xf9, 0x03, 0xea, 0x5a, 0x94, 0x51, 0xaf, + 0x37, 0xa6, 0xd6, 0xd0, 0x76, 0x7b, 0x52, 0x40, 0x1c, 0xbd, 0x47, 0x1c, 0xc7, 0xeb, 0x8d, 0x6f, + 0x0f, 0x28, 0x23, 0xb7, 0x7b, 0x23, 0x6a, 0x51, 0x97, 0x30, 0x3a, 0x54, 0x1d, 0xd7, 0x66, 0x36, + 0x5a, 0x0b, 0x14, 0x55, 0xe2, 0xe8, 0x2a, 0x57, 0x54, 0xa5, 0xe2, 0xfa, 0xf6, 0x48, 0x67, 0x8f, + 0xfc, 0x81, 0xaa, 0xd9, 0x66, 0x6f, 0x64, 0x8f, 0xec, 0x9e, 0xd0, 0x1f, 0xf8, 0x0f, 0xc5, 0x97, + 0xf8, 0x10, 0xbf, 0x02, 0x9c, 0xf5, 0x6e, 0xc2, 0xa1, 0x66, 0xbb, 0xb4, 0x37, 0xce, 0xf9, 0x5a, + 0x7f, 0x27, 0xd6, 0x31, 0x89, 0xf6, 0x48, 0xb7, 0xa8, 0x3b, 0xe9, 0x39, 0x67, 0x23, 0xbe, 0xe0, + 0xf5, 0x4c, 0xca, 0x48, 0x91, 0x55, 0xaf, 0xcc, 0xca, 0xf5, 0x2d, 0xa6, 0x9b, 0x34, 0x67, 0xf0, + 0xee, 0x65, 0x06, 0x9e, 0xf6, 0x88, 0x9a, 0x24, 0x67, 0xf7, 0x76, 0x99, 0x9d, 0xcf, 0x74, 0xa3, + 0xa7, 0x5b, 0xcc, 0x63, 0x6e, 0xd6, 0xa8, 0xfb, 0x2f, 0x05, 0xd0, 0x9e, 0x6d, 0x31, 0xd7, 0x36, + 0x0c, 0xea, 0x62, 0x3a, 0xd6, 0x3d, 0xdd, 0xb6, 0xd0, 0xe7, 0xd0, 0xe4, 0xe7, 0x19, 0x12, 0x46, + 0xda, 0xca, 0xa6, 0xb2, 0xb5, 0xb8, 0xf3, 0x96, 0x1a, 0xdf, 0x74, 0x04, 0xaf, 0x3a, 0x67, 0x23, + 0xbe, 0xe0, 0xa9, 0x5c, 0x5b, 0x1d, 0xdf, 0x56, 0x0f, 0x07, 0x5f, 0x50, 0x8d, 0x1d, 0x50, 0x46, + 0xfa, 0xe8, 0xc9, 0xf9, 0xc6, 0xcc, 0xf4, 0x7c, 0x03, 0xe2, 0x35, 0x1c, 0xa1, 0xa2, 0x43, 0xa8, + 0x0b, 0xf4, 0x9a, 0x40, 0xdf, 0x2e, 0x45, 0x97, 0x87, 0x56, 0x31, 0xf9, 0xc5, 0xfd, 0xc7, 0x8c, + 0x5a, 0x7c, 0x7b, 0xfd, 0x17, 0x24, 0x74, 0xfd, 0x1e, 0x61, 0x04, 0x0b, 0x20, 0xf4, 0x26, 0x34, + 0x5d, 0xb9, 0xfd, 0xf6, 0xec, 0xa6, 0xb2, 0x35, 0xdb, 0xbf, 0x21, 0xb5, 0x9a, 0xe1, 0xb1, 0x70, + 0xa4, 0xd1, 0x7d, 0xa2, 0xc0, 0xad, 0xfc, 0xb9, 0xf7, 0x75, 0x8f, 0xa1, 0x9f, 0xe5, 0xce, 0xae, + 0x56, 0x3b, 0x3b, 0xb7, 0x16, 0x27, 0x8f, 0x1c, 0x87, 0x2b, 0x89, 0x73, 0x1f, 0x41, 0x43, 0x67, + 0xd4, 0xf4, 0xda, 0xb5, 0xcd, 0xd9, 0xad, 0xc5, 0x9d, 0x37, 0xd4, 0x92, 0x00, 0x56, 0xf3, 0xbb, + 0xeb, 0x2f, 0x49, 0xdc, 0xc6, 0x03, 0x8e, 0x80, 0x03, 0xa0, 0xee, 0xaf, 0x6b, 0x00, 0xf7, 0xa8, + 0x63, 0xd8, 0x13, 0x93, 0x5a, 0xec, 0x1a, 0x9e, 0xee, 0x01, 0xd4, 0x3d, 0x87, 0x6a, 0xf2, 0xe9, + 0x5e, 0x2d, 0x3d, 0x41, 0xbc, 0xa9, 0x63, 0x87, 0x6a, 0xf1, 0xa3, 0xf1, 0x2f, 0x2c, 0x20, 0xd0, + 0x27, 0x30, 0xe7, 0x31, 0xc2, 0x7c, 0x4f, 0x3c, 0xd9, 0xe2, 0xce, 0x6b, 0x55, 0xc0, 0x84, 0x41, + 0xbf, 0x25, 0xe1, 0xe6, 0x82, 0x6f, 0x2c, 0x81, 0xba, 0x7f, 0x9d, 0x85, 0x95, 0x58, 0x79, 0xcf, + 0xb6, 0x86, 0x3a, 0xe3, 0x21, 0x7d, 0x17, 0xea, 0x6c, 0xe2, 0x50, 0x71, 0x27, 0x0b, 0xfd, 0x57, + 0xc3, 0xcd, 0x9c, 0x4c, 0x1c, 0xfa, 0xec, 0x7c, 0x63, 0xad, 0xc0, 0x84, 0x8b, 0xb0, 0x30, 0x42, + 0xfb, 0xd1, 0x3e, 0x6b, 0xc2, 0xfc, 0x9d, 0xb4, 0xf3, 0x67, 0xe7, 0x1b, 0x05, 0x05, 0x44, 0x8d, + 0x90, 0xd2, 0x5b, 0x44, 0x5f, 0x40, 0xcb, 0x20, 0x1e, 0x3b, 0x75, 0x86, 0x84, 0xd1, 0x13, 0xdd, + 0xa4, 0xed, 0x39, 0x71, 0xfa, 0xd7, 0xab, 0x3d, 0x14, 0xb7, 0xe8, 0xdf, 0x92, 0x3b, 0x68, 0xed, + 0xa7, 0x90, 0x70, 0x06, 0x19, 0x8d, 0x01, 0xf1, 0x95, 0x13, 0x97, 0x58, 0x5e, 0x70, 0x2a, 0xee, + 0x6f, 0xfe, 0xca, 0xfe, 0xd6, 0xa5, 0x3f, 0xb4, 0x9f, 0x43, 0xc3, 0x05, 0x1e, 0xd0, 0x2b, 0x30, + 0xe7, 0x52, 0xe2, 0xd9, 0x56, 0xbb, 0x2e, 0x6e, 0x2c, 0x7a, 0x2e, 0x2c, 0x56, 0xb1, 0x94, 0xa2, + 0xd7, 0x60, 0xde, 0xa4, 0x9e, 0x47, 0x46, 0xb4, 0xdd, 0x10, 0x8a, 0xcb, 0x52, 0x71, 0xfe, 0x20, + 0x58, 0xc6, 0xa1, 0xbc, 0xfb, 0x7b, 0x05, 0x5a, 0xf1, 0x33, 0x5d, 0x43, 0xae, 0x7e, 0x94, 0xce, + 0xd5, 0xef, 0x56, 0x08, 0xce, 0x92, 0x1c, 0xfd, 0x7b, 0x0d, 0x50, 0xac, 0x84, 0x6d, 0xc3, 0x18, + 0x10, 0xed, 0x0c, 0x6d, 0x42, 0xdd, 0x22, 0x66, 0x18, 0x93, 0x51, 0x82, 0xfc, 0x88, 0x98, 0x14, + 0x0b, 0x09, 0xfa, 0x52, 0x01, 0xe4, 0x8b, 0xd7, 0x1c, 0xee, 0x5a, 0x96, 0xcd, 0x08, 0xbf, 0xe0, + 0x70, 0x43, 0x7b, 0x15, 0x36, 0x14, 0xfa, 0x52, 0x4f, 0x73, 0x28, 0xf7, 0x2d, 0xe6, 0x4e, 0xe2, + 0x87, 0xcd, 0x2b, 0xe0, 0x02, 0xd7, 0xe8, 0xa7, 0x00, 0xae, 0xc4, 0x3c, 0xb1, 0x65, 0xda, 0x96, + 0xd7, 0x80, 0xd0, 0xfd, 0x9e, 0x6d, 0x3d, 0xd4, 0x47, 0x71, 0x61, 0xc1, 0x11, 0x04, 0x4e, 0xc0, + 0xad, 0xdf, 0x87, 0xb5, 0x92, 0x7d, 0xa2, 0x1b, 0x30, 0x7b, 0x46, 0x27, 0xc1, 0x55, 0x61, 0xfe, + 0x13, 0xad, 0x42, 0x63, 0x4c, 0x0c, 0x9f, 0x06, 0x39, 0x89, 0x83, 0x8f, 0x3b, 0xb5, 0xf7, 0x94, + 0xee, 0xef, 0x1a, 0xc9, 0x48, 0xe1, 0xf5, 0x06, 0x6d, 0xf1, 0xf6, 0xe0, 0x18, 0xba, 0x46, 0x3c, + 0x81, 0xd1, 0xe8, 0xbf, 0x10, 0xb4, 0x86, 0x60, 0x0d, 0x47, 0x52, 0xf4, 0x73, 0x68, 0x7a, 0xd4, + 0xa0, 0x1a, 0xb3, 0x5d, 0x59, 0xe2, 0xde, 0xae, 0x18, 0x53, 0x64, 0x40, 0x8d, 0x63, 0x69, 0x1a, + 0xc0, 0x87, 0x5f, 0x38, 0x82, 0x44, 0x9f, 0x40, 0x93, 0x51, 0xd3, 0x31, 0x08, 0xa3, 0xf2, 0xf6, + 0x52, 0x71, 0xc5, 0x6b, 0x07, 0x07, 0x3b, 0xb2, 0x87, 0x27, 0x52, 0x4d, 0x54, 0xcf, 0x28, 0x4e, + 0xc3, 0x55, 0x1c, 0xc1, 0xa0, 0x9f, 0x40, 0xd3, 0x63, 0xbc, 0xab, 0x8f, 0x26, 0x22, 0xdb, 0x2e, + 0x6a, 0x2b, 0xc9, 0x3a, 0x1a, 0x98, 0xc4, 0xd0, 0xe1, 0x0a, 0x8e, 0xe0, 0xd0, 0x2e, 0x2c, 0x9b, + 0xba, 0x85, 0x29, 0x19, 0x4e, 0x8e, 0xa9, 0x66, 0x5b, 0x43, 0x4f, 0xa4, 0x69, 0xa3, 0xbf, 0x26, + 0x8d, 0x96, 0x0f, 0xd2, 0x62, 0x9c, 0xd5, 0x47, 0xfb, 0xb0, 0x1a, 0xb6, 0xdd, 0x8f, 0x74, 0x8f, + 0xd9, 0xee, 0x64, 0x5f, 0x37, 0x75, 0x26, 0x6a, 0x5e, 0xa3, 0xdf, 0x9e, 0x9e, 0x6f, 0xac, 0xe2, + 0x02, 0x39, 0x2e, 0xb4, 0xe2, 0x75, 0xc5, 0x21, 0xbe, 0x47, 0x87, 0xa2, 0x86, 0x35, 0xe3, 0xba, + 0x72, 0x24, 0x56, 0xb1, 0x94, 0xa2, 0x1f, 0xa7, 0xc2, 0xb4, 0x79, 0xb5, 0x30, 0x6d, 0x95, 0x87, + 0x28, 0x3a, 0x85, 0x35, 0xc7, 0xb5, 0x47, 0x2e, 0xf5, 0xbc, 0x7b, 0x94, 0x0c, 0x0d, 0xdd, 0xa2, + 0xe1, 0xcd, 0x2c, 0x88, 0x13, 0xbd, 0x34, 0x3d, 0xdf, 0x58, 0x3b, 0x2a, 0x56, 0xc1, 0x65, 0xb6, + 0xdd, 0x3f, 0xd5, 0xe1, 0x46, 0xb6, 0xc7, 0xa1, 0x8f, 0x01, 0xd9, 0x03, 0x8f, 0xba, 0x63, 0x3a, + 0xfc, 0x30, 0x18, 0xdc, 0xf8, 0x74, 0xa3, 0x88, 0xe9, 0x26, 0xca, 0xdb, 0xc3, 0x9c, 0x06, 0x2e, + 0xb0, 0x0a, 0xe6, 0x23, 0x99, 0x00, 0x35, 0xb1, 0xd1, 0xc4, 0x7c, 0x94, 0x4b, 0x82, 0x5d, 0x58, + 0x96, 0xb9, 0x1f, 0x0a, 0x45, 0xb0, 0x26, 0xde, 0xfd, 0x34, 0x2d, 0xc6, 0x59, 0x7d, 0x74, 0x17, + 0x96, 0x5c, 0x1e, 0x07, 0x11, 0xc0, 0xbc, 0x00, 0xf8, 0x96, 0x04, 0x58, 0xc2, 0x49, 0x21, 0x4e, + 0xeb, 0xa2, 0x0f, 0xe1, 0x26, 0x19, 0x13, 0xdd, 0x20, 0x03, 0x83, 0x46, 0x00, 0x75, 0x01, 0xf0, + 0xa2, 0x04, 0xb8, 0xb9, 0x9b, 0x55, 0xc0, 0x79, 0x1b, 0x74, 0x00, 0x2b, 0xbe, 0x95, 0x87, 0x0a, + 0x82, 0xf8, 0x25, 0x09, 0xb5, 0x72, 0x9a, 0x57, 0xc1, 0x45, 0x76, 0xe8, 0x73, 0x00, 0x2d, 0xec, + 0xea, 0x5e, 0x7b, 0x4e, 0x94, 0xe1, 0x37, 0x2b, 0x24, 0x5b, 0x34, 0x0a, 0xc4, 0x25, 0x30, 0x5a, + 0xf2, 0x70, 0x02, 0x13, 0xdd, 0x81, 0x96, 0x66, 0x1b, 0x86, 0x88, 0xfc, 0x3d, 0xdb, 0xb7, 0x98, + 0x08, 0xde, 0x46, 0x1f, 0xf1, 0x66, 0xbf, 0x97, 0x92, 0xe0, 0x8c, 0x66, 0xf7, 0x8f, 0x4a, 0xb2, + 0xcd, 0x84, 0xe9, 0x8c, 0xee, 0xa4, 0x46, 0x9f, 0x57, 0x32, 0xa3, 0xcf, 0xad, 0xbc, 0x45, 0x62, + 0xf2, 0xd1, 0x61, 0x89, 0x07, 0xbf, 0x6e, 0x8d, 0x82, 0x07, 0x97, 0x25, 0xf1, 0xad, 0x0b, 0x53, + 0x29, 0xd2, 0x4e, 0x34, 0xc6, 0x9b, 0xe2, 0xcd, 0x93, 0x42, 0x9c, 0x46, 0xee, 0x7e, 0x00, 0xad, + 0x74, 0x1e, 0xa6, 0x66, 0x7a, 0xe5, 0xd2, 0x99, 0xfe, 0x1b, 0x05, 0xd6, 0x4a, 0xbc, 0x23, 0x03, + 0x5a, 0x26, 0x79, 0x9c, 0x78, 0xe6, 0x4b, 0x67, 0x63, 0xce, 0x9a, 0xd4, 0x80, 0x35, 0xa9, 0x0f, + 0x2c, 0x76, 0xe8, 0x1e, 0x33, 0x57, 0xb7, 0x46, 0xc1, 0x3b, 0x1c, 0xa4, 0xb0, 0x70, 0x06, 0x1b, + 0x7d, 0x06, 0x4d, 0x93, 0x3c, 0x3e, 0xf6, 0xdd, 0x51, 0xd1, 0x7d, 0x55, 0xf3, 0x23, 0xfa, 0xc7, + 0x81, 0x44, 0xc1, 0x11, 0x5e, 0xf7, 0x10, 0x36, 0x53, 0x87, 0xe4, 0xa5, 0x82, 0x3e, 0xf4, 0x8d, + 0x63, 0x1a, 0x3f, 0xf8, 0x1b, 0xb0, 0xe0, 0x10, 0x97, 0xe9, 0x51, 0xb9, 0x68, 0xf4, 0x97, 0xa6, + 0xe7, 0x1b, 0x0b, 0x47, 0xe1, 0x22, 0x8e, 0xe5, 0xdd, 0x7f, 0x2b, 0xd0, 0x38, 0xd6, 0x88, 0x41, + 0xaf, 0x81, 0x3a, 0xdc, 0x4b, 0x51, 0x87, 0x6e, 0x69, 0x10, 0x89, 0xfd, 0x94, 0xb2, 0x86, 0xfd, + 0x0c, 0x6b, 0x78, 0xf9, 0x12, 0x9c, 0x8b, 0x09, 0xc3, 0xfb, 0xb0, 0x10, 0xb9, 0x4b, 0x55, 0x49, + 0xe5, 0xb2, 0x2a, 0xd9, 0xfd, 0x6d, 0x0d, 0x16, 0x13, 0x2e, 0xae, 0x66, 0xcd, 0xaf, 0x3b, 0x31, + 0x68, 0xf0, 0x4a, 0xb2, 0x53, 0xe5, 0x20, 0x6a, 0x38, 0x54, 0x04, 0xf3, 0x5b, 0xdc, 0xbd, 0xf3, + 0xb3, 0xc6, 0x07, 0xd0, 0x62, 0xc4, 0x1d, 0x51, 0x16, 0xca, 0xc4, 0x85, 0x2d, 0xc4, 0xe4, 0xe1, + 0x24, 0x25, 0xc5, 0x19, 0xed, 0xf5, 0xbb, 0xb0, 0x94, 0x72, 0x76, 0xa5, 0x21, 0xec, 0x4b, 0x7e, + 0x39, 0x71, 0x70, 0x5e, 0x43, 0x74, 0x7d, 0x9c, 0x8a, 0xae, 0xad, 0xf2, 0xcb, 0x4c, 0xa4, 0x4c, + 0x59, 0x8c, 0xe1, 0x4c, 0x8c, 0xbd, 0x5e, 0x09, 0xed, 0xe2, 0x48, 0xfb, 0x47, 0x0d, 0x56, 0x13, + 0xda, 0x31, 0x37, 0xfd, 0x7e, 0xaa, 0x40, 0x6f, 0x65, 0x0a, 0x74, 0xbb, 0xc8, 0xe6, 0xb9, 0x91, + 0xd3, 0x62, 0xc2, 0x38, 0xfb, 0xbf, 0x48, 0x18, 0xff, 0xa0, 0xc0, 0x72, 0xe2, 0xee, 0xae, 0x81, + 0x31, 0x3e, 0x48, 0x33, 0xc6, 0x97, 0xab, 0x04, 0x4d, 0x09, 0x65, 0xfc, 0x73, 0x23, 0xb5, 0xf9, + 0xff, 0x7b, 0x12, 0xf3, 0x4b, 0x58, 0x1d, 0xdb, 0x86, 0x6f, 0xd2, 0x3d, 0x83, 0xe8, 0x66, 0xa8, + 0xc0, 0x87, 0xbe, 0xd9, 0xec, 0x1f, 0x43, 0x11, 0x3c, 0x75, 0x3d, 0xdd, 0x63, 0xd4, 0x62, 0x9f, + 0xc6, 0x96, 0xfd, 0x6f, 0x4b, 0x27, 0xab, 0x9f, 0x16, 0xc0, 0xe1, 0x42, 0x27, 0xe8, 0x7b, 0xb0, + 0xc8, 0x07, 0x66, 0x5d, 0xa3, 0x9c, 0x7b, 0xcb, 0xc0, 0x5a, 0x91, 0x40, 0x8b, 0xc7, 0xb1, 0x08, + 0x27, 0xf5, 0xd0, 0x23, 0x58, 0x71, 0xec, 0xe1, 0x01, 0xb1, 0xc8, 0x88, 0xf2, 0x31, 0xe3, 0xc8, + 0x36, 0x74, 0x6d, 0x22, 0x98, 0xcd, 0x42, 0xff, 0xdd, 0x70, 0xb8, 0x3c, 0xca, 0xab, 0x3c, 0xe3, + 0x14, 0x21, 0xbf, 0x2c, 0x92, 0xba, 0x08, 0x12, 0xb9, 0xd0, 0xf2, 0x65, 0xbb, 0x97, 0x44, 0x2f, + 0xf8, 0x0b, 0x67, 0xa7, 0x4a, 0x84, 0x9d, 0xa6, 0x2c, 0xe3, 0xea, 0x9f, 0x5e, 0xc7, 0x19, 0x0f, + 0xa5, 0xc4, 0xad, 0xf9, 0xdf, 0x10, 0xb7, 0xee, 0x3f, 0xeb, 0x70, 0x33, 0x57, 0x2a, 0xd1, 0x0f, + 0x2f, 0x60, 0x38, 0xb7, 0x9e, 0x1b, 0xbb, 0xc9, 0x51, 0x93, 0xd9, 0x2b, 0x50, 0x93, 0x5d, 0x58, + 0xd6, 0x7c, 0xd7, 0xa5, 0x16, 0xcb, 0x10, 0x93, 0x88, 0x1a, 0xed, 0xa5, 0xc5, 0x38, 0xab, 0x5f, + 0xc4, 0xae, 0x1a, 0x57, 0x64, 0x57, 0xc9, 0x5d, 0xc8, 0x09, 0x39, 0x08, 0xbb, 0xfc, 0x2e, 0xe4, + 0xa0, 0x9c, 0xd5, 0xe7, 0xd3, 0x41, 0x80, 0x1a, 0x21, 0xcc, 0xa7, 0xa7, 0x83, 0xd3, 0x94, 0x14, + 0x67, 0xb4, 0x0b, 0x98, 0xca, 0x42, 0x55, 0xa6, 0x82, 0x48, 0x8a, 0x47, 0x81, 0xc8, 0xf1, 0xed, + 0x2a, 0xb1, 0x5c, 0x99, 0x48, 0x75, 0xff, 0xa2, 0xc0, 0x8b, 0xa5, 0x49, 0x80, 0x76, 0x53, 0x2d, + 0x77, 0x3b, 0xd3, 0x72, 0xbf, 0x53, 0x6a, 0x98, 0xe8, 0xbb, 0x6e, 0x31, 0x35, 0x7a, 0xbf, 0x1a, + 0x35, 0x2a, 0x98, 0xdb, 0x2f, 0xe7, 0x48, 0xfd, 0xed, 0x27, 0x4f, 0x3b, 0x33, 0x5f, 0x3d, 0xed, + 0xcc, 0x7c, 0xfd, 0xb4, 0x33, 0xf3, 0xab, 0x69, 0x47, 0x79, 0x32, 0xed, 0x28, 0x5f, 0x4d, 0x3b, + 0xca, 0xd7, 0xd3, 0x8e, 0xf2, 0xb7, 0x69, 0x47, 0xf9, 0xcd, 0x37, 0x9d, 0x99, 0xcf, 0xe6, 0xa5, + 0xc7, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x99, 0x8d, 0x1e, 0xaf, 0x61, 0x1b, 0x00, 0x00, +} + func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -199,36 +798,45 @@ func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x18 + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) - n2, err := m.Data.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -236,37 +844,46 @@ func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevisionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Deployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -274,41 +891,52 @@ func (m *Deployment) Marshal() (dAtA []byte, err error) { } func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -316,49 +944,62 @@ func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n7, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n8, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - return i, nil + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -366,37 +1007,46 @@ func (m *DeploymentList) Marshal() (dAtA []byte, err error) { } func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -404,51 +1054,61 @@ func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { } func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentRollback) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + { + size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a if len(m.UpdatedAnnotations) > 0 { keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) for k := range m.UpdatedAnnotations { keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - for _, k := range keysForUpdatedAnnotations { + for iNdEx := len(keysForUpdatedAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.UpdatedAnnotations[string(keysForUpdatedAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.UpdatedAnnotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForUpdatedAnnotations[iNdEx]) + copy(dAtA[i:], keysForUpdatedAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUpdatedAnnotations[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n10, err := m.RollbackTo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -456,79 +1116,92 @@ func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.ProgressDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x48 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n11, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.RollbackTo != nil { + { + size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n12, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n13, err := m.Strategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x42 } - dAtA[i] = 0x38 - i++ + i-- if m.Paused { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.RollbackTo != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n14, err := m.RollbackTo.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x38 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x28 + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n14 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ProgressDeadlineSeconds != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x22 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - return i, nil + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -536,52 +1209,59 @@ func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x40 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x38 if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -589,31 +1269,39 @@ func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n15, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -621,20 +1309,25 @@ func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { } func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -642,37 +1335,46 @@ func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n16, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - } if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n17, err := m.MaxSurge.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -680,22 +1382,27 @@ func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.Partition != nil { - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -703,41 +1410,52 @@ func (m *Scale) Marshal() (dAtA []byte, err error) { } func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n18, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n19, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n19 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n20, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n20 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -745,20 +1463,25 @@ func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { } func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -766,46 +1489,54 @@ func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { } func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i -= len(m.TargetSelector) + copy(dAtA[i:], m.TargetSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i-- + dAtA[i] = 0x1a if len(m.Selector) > 0 { keysForSelector := make([]string, 0, len(m.Selector)) for k := range m.Selector { keysForSelector = append(keysForSelector, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i += copy(dAtA[i:], m.TargetSelector) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *StatefulSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -813,41 +1544,52 @@ func (m *StatefulSet) Marshal() (dAtA []byte, err error) { } func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n21, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n22, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n22 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n23, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n23 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -855,41 +1597,52 @@ func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n24, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -897,37 +1650,46 @@ func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n25, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -935,73 +1697,88 @@ func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x40 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n26, err := m.Selector.MarshalTo(dAtA[i:]) + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n26 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n27, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n27 + i-- + dAtA[i] = 0x3a + i -= len(m.PodManagementPolicy) + copy(dAtA[i:], m.PodManagementPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i-- + dAtA[i] = 0x32 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x2a if len(m.VolumeClaimTemplates) > 0 { - for _, msg := range m.VolumeClaimTemplates { + for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + } + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) - i += copy(dAtA[i:], m.PodManagementPolicy) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n28, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1009,59 +1786,68 @@ func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) - } - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) - i += copy(dAtA[i:], m.CurrentRevision) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) - i += copy(dAtA[i:], m.UpdateRevision) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i -= len(m.UpdateRevision) + copy(dAtA[i:], m.UpdateRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i-- + dAtA[i] = 0x3a + i -= len(m.CurrentRevision) + copy(dAtA[i:], m.CurrentRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1069,37 +1855,50 @@ func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n29, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n29 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *ControllerRevision) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1111,6 +1910,9 @@ func (m *ControllerRevision) Size() (n int) { } func (m *ControllerRevisionList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1125,6 +1927,9 @@ func (m *ControllerRevisionList) Size() (n int) { } func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1137,6 +1942,9 @@ func (m *Deployment) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1155,6 +1963,9 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1169,6 +1980,9 @@ func (m *DeploymentList) Size() (n int) { } func (m *DeploymentRollback) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1187,6 +2001,9 @@ func (m *DeploymentRollback) Size() (n int) { } func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1216,6 +2033,9 @@ func (m *DeploymentSpec) Size() (n int) { } func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -1237,6 +2057,9 @@ func (m *DeploymentStatus) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1249,6 +2072,9 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *RollbackConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Revision)) @@ -1256,6 +2082,9 @@ func (m *RollbackConfig) Size() (n int) { } func (m *RollingUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -1270,6 +2099,9 @@ func (m *RollingUpdateDeployment) Size() (n int) { } func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Partition != nil { @@ -1279,6 +2111,9 @@ func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { } func (m *Scale) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1291,6 +2126,9 @@ func (m *Scale) Size() (n int) { } func (m *ScaleSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1298,6 +2136,9 @@ func (m *ScaleSpec) Size() (n int) { } func (m *ScaleStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1315,6 +2156,9 @@ func (m *ScaleStatus) Size() (n int) { } func (m *StatefulSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1327,6 +2171,9 @@ func (m *StatefulSet) Size() (n int) { } func (m *StatefulSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1343,6 +2190,9 @@ func (m *StatefulSetCondition) Size() (n int) { } func (m *StatefulSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1357,6 +2207,9 @@ func (m *StatefulSetList) Size() (n int) { } func (m *StatefulSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1387,6 +2240,9 @@ func (m *StatefulSetSpec) Size() (n int) { } func (m *StatefulSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ObservedGeneration != nil { @@ -1413,6 +2269,9 @@ func (m *StatefulSetStatus) Size() (n int) { } func (m *StatefulSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1425,14 +2284,7 @@ func (m *StatefulSetUpdateStrategy) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1442,8 +2294,8 @@ func (this *ControllerRevision) String() string { return "nil" } s := strings.Join([]string{`&ControllerRevision{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, `}`, }, "") @@ -1453,9 +2305,14 @@ func (this *ControllerRevisionList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ControllerRevision{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ControllerRevisionList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1465,7 +2322,7 @@ func (this *Deployment) String() string { return "nil" } s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1481,8 +2338,8 @@ func (this *DeploymentCondition) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1491,9 +2348,14 @@ func (this *DeploymentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1526,13 +2388,13 @@ func (this *DeploymentSpec) String() string { } s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `RollbackTo:` + strings.Replace(fmt.Sprintf("%v", this.RollbackTo), "RollbackConfig", "RollbackConfig", 1) + `,`, + `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, `}`, }, "") @@ -1542,13 +2404,18 @@ func (this *DeploymentStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, @@ -1561,7 +2428,7 @@ func (this *DeploymentStrategy) String() string { } s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, `}`, }, "") return s @@ -1581,8 +2448,8 @@ func (this *RollingUpdateDeployment) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -1602,7 +2469,7 @@ func (this *Scale) String() string { return "nil" } s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1646,7 +2513,7 @@ func (this *StatefulSet) String() string { return "nil" } s := strings.Join([]string{`&StatefulSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1660,7 +2527,7 @@ func (this *StatefulSetCondition) String() string { s := strings.Join([]string{`&StatefulSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -1671,9 +2538,14 @@ func (this *StatefulSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]StatefulSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&StatefulSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1682,11 +2554,16 @@ func (this *StatefulSetSpec) String() string { if this == nil { return "nil" } + repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{" + for _, f := range this.VolumeClaimTemplates { + repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeClaimTemplates += "}" s := strings.Join([]string{`&StatefulSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, @@ -1699,6 +2576,11 @@ func (this *StatefulSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]StatefulSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&StatefulSetStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, @@ -1708,7 +2590,7 @@ func (this *StatefulSetStatus) String() string { `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -1719,7 +2601,7 @@ func (this *StatefulSetUpdateStrategy) String() string { } s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, `}`, }, "") return s @@ -1747,7 +2629,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1775,7 +2657,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1784,6 +2666,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1805,7 +2690,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1814,6 +2699,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1835,7 +2723,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -1849,6 +2737,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1876,7 +2767,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1904,7 +2795,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1913,6 +2804,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1934,7 +2828,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1943,6 +2837,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1960,6 +2857,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1987,7 +2887,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2015,7 +2915,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2024,6 +2924,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2045,7 +2948,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2054,6 +2957,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2075,7 +2981,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2084,6 +2990,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2100,6 +3009,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2127,7 +3039,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2155,7 +3067,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2165,6 +3077,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2184,7 +3099,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2194,6 +3109,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2213,7 +3131,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2223,6 +3141,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2242,7 +3163,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2252,6 +3173,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2271,7 +3195,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2280,6 +3204,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2301,7 +3228,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2310,6 +3237,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2326,6 +3256,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2353,7 +3286,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2381,7 +3314,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2390,6 +3323,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2411,7 +3347,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2420,6 +3356,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2437,6 +3376,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2464,7 +3406,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2492,7 +3434,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2502,6 +3444,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2521,7 +3466,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2530,6 +3475,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2550,7 +3498,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2567,7 +3515,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2577,6 +3525,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -2593,7 +3544,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2603,6 +3554,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -2639,7 +3593,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2648,6 +3602,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2664,6 +3621,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2691,7 +3651,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2719,7 +3679,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2739,7 +3699,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2748,11 +3708,14 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2772,7 +3735,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2781,6 +3744,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2802,7 +3768,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2811,6 +3777,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2832,7 +3801,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2851,7 +3820,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2871,7 +3840,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2891,7 +3860,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2900,6 +3869,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2924,7 +3896,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2939,6 +3911,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2966,7 +3941,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2994,7 +3969,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -3013,7 +3988,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3032,7 +4007,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3051,7 +4026,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3070,7 +4045,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + m.UnavailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3089,7 +4064,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3098,6 +4073,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3120,7 +4098,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3139,7 +4117,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3154,6 +4132,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3181,7 +4162,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3209,7 +4190,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3219,6 +4200,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3238,7 +4222,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3247,6 +4231,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3266,6 +4253,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3293,7 +4283,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3321,7 +4311,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -3335,6 +4325,9 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3362,7 +4355,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3390,7 +4383,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3399,11 +4392,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3423,7 +4419,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3432,11 +4428,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxSurge = &intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3451,6 +4450,9 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3478,7 +4480,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3506,7 +4508,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3521,6 +4523,9 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3548,7 +4553,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3576,7 +4581,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3585,6 +4590,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3606,7 +4614,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3615,6 +4623,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3636,7 +4647,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3645,6 +4656,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3661,6 +4675,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3688,7 +4705,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3716,7 +4733,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3730,6 +4747,9 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3757,7 +4777,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3785,7 +4805,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3804,7 +4824,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3813,6 +4833,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3833,7 +4856,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3850,7 +4873,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3860,6 +4883,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -3876,7 +4902,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3886,6 +4912,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -3922,7 +4951,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3932,6 +4961,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3946,6 +4978,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3973,7 +5008,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4001,7 +5036,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4010,6 +5045,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4031,7 +5069,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4040,6 +5078,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4061,7 +5102,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4070,6 +5111,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4086,6 +5130,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4113,7 +5160,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4141,7 +5188,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4151,6 +5198,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4170,7 +5220,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4180,6 +5230,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4199,7 +5252,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4208,6 +5261,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4229,7 +5285,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4239,6 +5295,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4258,7 +5317,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4268,6 +5327,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4282,6 +5344,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4309,7 +5374,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4337,7 +5402,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4346,6 +5411,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4367,7 +5435,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4376,6 +5444,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4393,6 +5464,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4420,7 +5494,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4448,7 +5522,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4468,7 +5542,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4477,11 +5551,14 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4501,7 +5578,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4510,6 +5587,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4531,7 +5611,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4540,10 +5620,13 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v11.PersistentVolumeClaim{}) if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -4562,7 +5645,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4572,6 +5655,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4591,7 +5677,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4601,6 +5687,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4620,7 +5709,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4629,6 +5718,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4650,7 +5742,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4665,6 +5757,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4692,7 +5787,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4720,7 +5815,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4740,7 +5835,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4759,7 +5854,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4778,7 +5873,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4797,7 +5892,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4816,7 +5911,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4826,6 +5921,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4845,7 +5943,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4855,6 +5953,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4874,7 +5975,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4894,7 +5995,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4903,6 +6004,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4920,6 +6024,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4947,7 +6054,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4975,7 +6082,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4985,6 +6092,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5004,7 +6114,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5013,6 +6123,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5032,6 +6145,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5098,10 +6214,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -5130,6 +6249,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -5148,128 +6270,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1859 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x6f, 0x24, 0x47, - 0x15, 0x77, 0x8f, 0x67, 0xec, 0xf1, 0x73, 0x3c, 0xde, 0x2d, 0x9b, 0xf5, 0xc4, 0x81, 0xb1, 0xd5, - 0x44, 0x89, 0xf3, 0xe1, 0x9e, 0xac, 0x13, 0xa2, 0x64, 0x17, 0x45, 0x78, 0xbc, 0x4b, 0xb2, 0x91, - 0x8d, 0x9d, 0xb2, 0x1d, 0x44, 0x00, 0x29, 0x35, 0x3d, 0xb5, 0xb3, 0x1d, 0xf7, 0x97, 0xba, 0x6b, - 0x86, 0x1d, 0x71, 0xe1, 0x0f, 0x40, 0x0a, 0x67, 0xfe, 0x0a, 0x8e, 0x08, 0x6e, 0x9c, 0xf6, 0x82, - 0x14, 0x71, 0x21, 0x27, 0x8b, 0x9d, 0x5c, 0x81, 0x1b, 0x97, 0x95, 0x90, 0x50, 0x55, 0x57, 0x7f, - 0x77, 0xdb, 0x6d, 0xa4, 0xf5, 0x21, 0xb7, 0xe9, 0x7a, 0xef, 0xfd, 0x5e, 0x7d, 0xbc, 0xaf, 0xdf, - 0xc0, 0x8f, 0xce, 0xde, 0xf3, 0x35, 0xc3, 0xe9, 0x9e, 0x8d, 0xfa, 0xd4, 0xb3, 0x29, 0xa3, 0x7e, - 0x77, 0x4c, 0xed, 0x81, 0xe3, 0x75, 0xa5, 0x80, 0xb8, 0x46, 0x97, 0xb8, 0xae, 0xdf, 0x1d, 0xdf, - 0xee, 0x53, 0x46, 0x6e, 0x77, 0x87, 0xd4, 0xa6, 0x1e, 0x61, 0x74, 0xa0, 0xb9, 0x9e, 0xc3, 0x1c, - 0xb4, 0x16, 0x28, 0x6a, 0xc4, 0x35, 0x34, 0xae, 0xa8, 0x49, 0xc5, 0xf5, 0xed, 0xa1, 0xc1, 0x1e, - 0x8d, 0xfa, 0x9a, 0xee, 0x58, 0xdd, 0xa1, 0x33, 0x74, 0xba, 0x42, 0xbf, 0x3f, 0x7a, 0x28, 0xbe, - 0xc4, 0x87, 0xf8, 0x15, 0xe0, 0xac, 0xab, 0x09, 0x87, 0xba, 0xe3, 0xd1, 0xee, 0x38, 0xe7, 0x6b, - 0xfd, 0x9d, 0x58, 0xc7, 0x22, 0xfa, 0x23, 0xc3, 0xa6, 0xde, 0xa4, 0xeb, 0x9e, 0x0d, 0xf9, 0x82, - 0xdf, 0xb5, 0x28, 0x23, 0x45, 0x56, 0xdd, 0x32, 0x2b, 0x6f, 0x64, 0x33, 0xc3, 0xa2, 0x39, 0x83, - 0x77, 0x2f, 0x33, 0xf0, 0xf5, 0x47, 0xd4, 0x22, 0x39, 0xbb, 0xb7, 0xcb, 0xec, 0x46, 0xcc, 0x30, - 0xbb, 0x86, 0xcd, 0x7c, 0xe6, 0x65, 0x8d, 0xd4, 0xff, 0x28, 0x80, 0xf6, 0x1c, 0x9b, 0x79, 0x8e, - 0x69, 0x52, 0x0f, 0xd3, 0xb1, 0xe1, 0x1b, 0x8e, 0x8d, 0x3e, 0x87, 0x26, 0x3f, 0xcf, 0x80, 0x30, - 0xd2, 0x56, 0x36, 0x95, 0xad, 0xc5, 0x9d, 0xb7, 0xb4, 0xf8, 0xa6, 0x23, 0x78, 0xcd, 0x3d, 0x1b, - 0xf2, 0x05, 0x5f, 0xe3, 0xda, 0xda, 0xf8, 0xb6, 0x76, 0xd8, 0xff, 0x82, 0xea, 0xec, 0x80, 0x32, - 0xd2, 0x43, 0x4f, 0xce, 0x37, 0x66, 0xa6, 0xe7, 0x1b, 0x10, 0xaf, 0xe1, 0x08, 0x15, 0x1d, 0x42, - 0x5d, 0xa0, 0xd7, 0x04, 0xfa, 0x76, 0x29, 0xba, 0x3c, 0xb4, 0x86, 0xc9, 0xaf, 0xee, 0x3f, 0x66, - 0xd4, 0xe6, 0xdb, 0xeb, 0xbd, 0x20, 0xa1, 0xeb, 0xf7, 0x08, 0x23, 0x58, 0x00, 0xa1, 0x37, 0xa1, - 0xe9, 0xc9, 0xed, 0xb7, 0x67, 0x37, 0x95, 0xad, 0xd9, 0xde, 0x0d, 0xa9, 0xd5, 0x0c, 0x8f, 0x85, - 0x23, 0x0d, 0xf5, 0x89, 0x02, 0xb7, 0xf2, 0xe7, 0xde, 0x37, 0x7c, 0x86, 0x7e, 0x91, 0x3b, 0xbb, - 0x56, 0xed, 0xec, 0xdc, 0x5a, 0x9c, 0x3c, 0x72, 0x1c, 0xae, 0x24, 0xce, 0x7d, 0x04, 0x0d, 0x83, - 0x51, 0xcb, 0x6f, 0xd7, 0x36, 0x67, 0xb7, 0x16, 0x77, 0xde, 0xd0, 0x4a, 0x02, 0x58, 0xcb, 0xef, - 0xae, 0xb7, 0x24, 0x71, 0x1b, 0x0f, 0x38, 0x02, 0x0e, 0x80, 0xd4, 0xdf, 0xd6, 0x00, 0xee, 0x51, - 0xd7, 0x74, 0x26, 0x16, 0xb5, 0xd9, 0x35, 0x3c, 0xdd, 0x03, 0xa8, 0xfb, 0x2e, 0xd5, 0xe5, 0xd3, - 0xbd, 0x5a, 0x7a, 0x82, 0x78, 0x53, 0xc7, 0x2e, 0xd5, 0xe3, 0x47, 0xe3, 0x5f, 0x58, 0x40, 0xa0, - 0x4f, 0x60, 0xce, 0x67, 0x84, 0x8d, 0x7c, 0xf1, 0x64, 0x8b, 0x3b, 0xaf, 0x55, 0x01, 0x13, 0x06, - 0xbd, 0x96, 0x84, 0x9b, 0x0b, 0xbe, 0xb1, 0x04, 0x52, 0xff, 0x3e, 0x0b, 0x2b, 0xb1, 0xf2, 0x9e, - 0x63, 0x0f, 0x0c, 0xc6, 0x43, 0xfa, 0x2e, 0xd4, 0xd9, 0xc4, 0xa5, 0xe2, 0x4e, 0x16, 0x7a, 0xaf, - 0x86, 0x9b, 0x39, 0x99, 0xb8, 0xf4, 0xd9, 0xf9, 0xc6, 0x5a, 0x81, 0x09, 0x17, 0x61, 0x61, 0x84, - 0xf6, 0xa3, 0x7d, 0xd6, 0x84, 0xf9, 0x3b, 0x69, 0xe7, 0xcf, 0xce, 0x37, 0x0a, 0x0a, 0x88, 0x16, - 0x21, 0xa5, 0xb7, 0x88, 0x5e, 0x81, 0x39, 0x8f, 0x12, 0xdf, 0xb1, 0xdb, 0x75, 0x81, 0x16, 0x1d, - 0x05, 0x8b, 0x55, 0x2c, 0xa5, 0xe8, 0x35, 0x98, 0xb7, 0xa8, 0xef, 0x93, 0x21, 0x6d, 0x37, 0x84, - 0xe2, 0xb2, 0x54, 0x9c, 0x3f, 0x08, 0x96, 0x71, 0x28, 0x47, 0x5f, 0x40, 0xcb, 0x24, 0x3e, 0x3b, - 0x75, 0x07, 0x84, 0xd1, 0x13, 0xc3, 0xa2, 0xed, 0x39, 0x71, 0xa1, 0xaf, 0x57, 0x7b, 0x7b, 0x6e, - 0xd1, 0xbb, 0x25, 0xd1, 0x5b, 0xfb, 0x29, 0x24, 0x9c, 0x41, 0x46, 0x63, 0x40, 0x7c, 0xe5, 0xc4, - 0x23, 0xb6, 0x1f, 0x5c, 0x14, 0xf7, 0x37, 0x7f, 0x65, 0x7f, 0xeb, 0xd2, 0x1f, 0xda, 0xcf, 0xa1, - 0xe1, 0x02, 0x0f, 0xea, 0x1f, 0x15, 0x68, 0xc5, 0xcf, 0x74, 0x0d, 0xb9, 0xfa, 0x51, 0x3a, 0x57, - 0xbf, 0x5f, 0x21, 0x38, 0x4b, 0x72, 0xf4, 0x9f, 0x35, 0x40, 0xb1, 0x12, 0x76, 0x4c, 0xb3, 0x4f, - 0xf4, 0x33, 0xb4, 0x09, 0x75, 0x9b, 0x58, 0x61, 0x4c, 0x46, 0x09, 0xf2, 0x13, 0x62, 0x51, 0x2c, - 0x24, 0xe8, 0x4b, 0x05, 0xd0, 0x48, 0x5c, 0xfd, 0x60, 0xd7, 0xb6, 0x1d, 0x46, 0xf8, 0x6d, 0x84, - 0x1b, 0xda, 0xab, 0xb0, 0xa1, 0xd0, 0x97, 0x76, 0x9a, 0x43, 0xb9, 0x6f, 0x33, 0x6f, 0x12, 0xbf, - 0x42, 0x5e, 0x01, 0x17, 0xb8, 0x46, 0x3f, 0x07, 0xf0, 0x24, 0xe6, 0x89, 0x23, 0xd3, 0xb6, 0xbc, - 0x06, 0x84, 0xee, 0xf7, 0x1c, 0xfb, 0xa1, 0x31, 0x8c, 0x0b, 0x0b, 0x8e, 0x20, 0x70, 0x02, 0x6e, - 0xfd, 0x3e, 0xac, 0x95, 0xec, 0x13, 0xdd, 0x80, 0xd9, 0x33, 0x3a, 0x09, 0xae, 0x0a, 0xf3, 0x9f, - 0x68, 0x15, 0x1a, 0x63, 0x62, 0x8e, 0x68, 0x90, 0x93, 0x38, 0xf8, 0xb8, 0x53, 0x7b, 0x4f, 0x51, - 0xff, 0xd0, 0x48, 0x46, 0x0a, 0xaf, 0x37, 0x68, 0x8b, 0xb7, 0x07, 0xd7, 0x34, 0x74, 0xe2, 0x0b, - 0x8c, 0x46, 0xef, 0x85, 0xa0, 0x35, 0x04, 0x6b, 0x38, 0x92, 0xa2, 0x5f, 0x42, 0xd3, 0xa7, 0x26, - 0xd5, 0x99, 0xe3, 0xc9, 0x12, 0xf7, 0x76, 0xc5, 0x98, 0x22, 0x7d, 0x6a, 0x1e, 0x4b, 0xd3, 0x00, - 0x3e, 0xfc, 0xc2, 0x11, 0x24, 0xfa, 0x04, 0x9a, 0x8c, 0x5a, 0xae, 0x49, 0x18, 0x95, 0xb7, 0x97, - 0x8a, 0x2b, 0x5e, 0x3b, 0x38, 0xd8, 0x91, 0x33, 0x38, 0x91, 0x6a, 0xa2, 0x7a, 0x46, 0x71, 0x1a, - 0xae, 0xe2, 0x08, 0x06, 0xfd, 0x0c, 0x9a, 0x3e, 0xe3, 0x5d, 0x7d, 0x38, 0x11, 0x15, 0xe5, 0xa2, - 0xb6, 0x92, 0xac, 0xa3, 0x81, 0x49, 0x0c, 0x1d, 0xae, 0xe0, 0x08, 0x0e, 0xed, 0xc2, 0xb2, 0x65, - 0xd8, 0x98, 0x92, 0xc1, 0xe4, 0x98, 0xea, 0x8e, 0x3d, 0xf0, 0x45, 0x29, 0x6a, 0xf4, 0xd6, 0xa4, - 0xd1, 0xf2, 0x41, 0x5a, 0x8c, 0xb3, 0xfa, 0x68, 0x1f, 0x56, 0xc3, 0xb6, 0xfb, 0x91, 0xe1, 0x33, - 0xc7, 0x9b, 0xec, 0x1b, 0x96, 0xc1, 0x44, 0x81, 0x6a, 0xf4, 0xda, 0xd3, 0xf3, 0x8d, 0x55, 0x5c, - 0x20, 0xc7, 0x85, 0x56, 0xbc, 0x76, 0xba, 0x64, 0xe4, 0xd3, 0x81, 0x28, 0x38, 0xcd, 0xb8, 0x76, - 0x1e, 0x89, 0x55, 0x2c, 0xa5, 0xe8, 0xa7, 0xa9, 0x30, 0x6d, 0x5e, 0x2d, 0x4c, 0x5b, 0xe5, 0x21, - 0x8a, 0x4e, 0x61, 0xcd, 0xf5, 0x9c, 0xa1, 0x47, 0x7d, 0xff, 0x1e, 0x25, 0x03, 0xd3, 0xb0, 0x69, - 0x78, 0x33, 0x0b, 0xe2, 0x44, 0x2f, 0x4d, 0xcf, 0x37, 0xd6, 0x8e, 0x8a, 0x55, 0x70, 0x99, 0xad, - 0xfa, 0x97, 0x3a, 0xdc, 0xc8, 0xf6, 0x38, 0xf4, 0x31, 0x20, 0xa7, 0xef, 0x53, 0x6f, 0x4c, 0x07, - 0x1f, 0x06, 0x83, 0x1b, 0x9f, 0x6e, 0x14, 0x31, 0xdd, 0x44, 0x79, 0x7b, 0x98, 0xd3, 0xc0, 0x05, - 0x56, 0xc1, 0x7c, 0x24, 0x13, 0xa0, 0x26, 0x36, 0x9a, 0x98, 0x8f, 0x72, 0x49, 0xb0, 0x0b, 0xcb, - 0x32, 0xf7, 0x43, 0xa1, 0x08, 0xd6, 0xc4, 0xbb, 0x9f, 0xa6, 0xc5, 0x38, 0xab, 0x8f, 0x3e, 0x84, - 0x9b, 0x64, 0x4c, 0x0c, 0x93, 0xf4, 0x4d, 0x1a, 0x81, 0xd4, 0x05, 0xc8, 0x8b, 0x12, 0xe4, 0xe6, - 0x6e, 0x56, 0x01, 0xe7, 0x6d, 0xd0, 0x01, 0xac, 0x8c, 0xec, 0x3c, 0x54, 0x10, 0x87, 0x2f, 0x49, - 0xa8, 0x95, 0xd3, 0xbc, 0x0a, 0x2e, 0xb2, 0x43, 0x9f, 0x03, 0xe8, 0x61, 0x63, 0xf6, 0xdb, 0x73, - 0xa2, 0x92, 0xbe, 0x59, 0x21, 0x5f, 0xa2, 0x6e, 0x1e, 0x57, 0xb1, 0x68, 0xc9, 0xc7, 0x09, 0x4c, - 0x74, 0x17, 0x96, 0x3c, 0x9e, 0x01, 0xd1, 0x56, 0xe7, 0xc5, 0x56, 0xbf, 0x23, 0xcd, 0x96, 0x70, - 0x52, 0x88, 0xd3, 0xba, 0xe8, 0x0e, 0xb4, 0x74, 0xc7, 0x34, 0x45, 0xe4, 0xef, 0x39, 0x23, 0x9b, - 0x89, 0xe0, 0x6d, 0xf4, 0x10, 0xef, 0xcc, 0x7b, 0x29, 0x09, 0xce, 0x68, 0xaa, 0x7f, 0x56, 0x92, - 0x6d, 0x26, 0x4c, 0x67, 0x74, 0x27, 0x35, 0xfa, 0xbc, 0x92, 0x19, 0x7d, 0x6e, 0xe5, 0x2d, 0x12, - 0x93, 0x8f, 0x01, 0x4b, 0x3c, 0xf8, 0x0d, 0x7b, 0x18, 0x3c, 0xb8, 0x2c, 0x89, 0x6f, 0x5d, 0x98, - 0x4a, 0x91, 0x76, 0xa2, 0x31, 0xde, 0x14, 0x27, 0x4f, 0x0a, 0x71, 0x1a, 0x59, 0xfd, 0x00, 0x5a, - 0xe9, 0x3c, 0x4c, 0xcd, 0xf4, 0xca, 0xa5, 0x33, 0xfd, 0x37, 0x0a, 0xac, 0x95, 0x78, 0x47, 0x26, - 0xb4, 0x2c, 0xf2, 0x38, 0x11, 0x23, 0x97, 0xce, 0xc6, 0x9c, 0x35, 0x69, 0x01, 0x6b, 0xd2, 0x1e, - 0xd8, 0xec, 0xd0, 0x3b, 0x66, 0x9e, 0x61, 0x0f, 0x83, 0x77, 0x38, 0x48, 0x61, 0xe1, 0x0c, 0x36, - 0xfa, 0x0c, 0x9a, 0x16, 0x79, 0x7c, 0x3c, 0xf2, 0x86, 0x45, 0xf7, 0x55, 0xcd, 0x8f, 0xe8, 0x1f, - 0x07, 0x12, 0x05, 0x47, 0x78, 0xea, 0x21, 0x6c, 0xa6, 0x0e, 0xc9, 0x4b, 0x05, 0x7d, 0x38, 0x32, - 0x8f, 0x69, 0xfc, 0xe0, 0x6f, 0xc0, 0x82, 0x4b, 0x3c, 0x66, 0x44, 0xe5, 0xa2, 0xd1, 0x5b, 0x9a, - 0x9e, 0x6f, 0x2c, 0x1c, 0x85, 0x8b, 0x38, 0x96, 0xab, 0xff, 0x55, 0xa0, 0x71, 0xac, 0x13, 0x93, - 0x5e, 0x03, 0x75, 0xb8, 0x97, 0xa2, 0x0e, 0x6a, 0x69, 0x10, 0x89, 0xfd, 0x94, 0xb2, 0x86, 0xfd, - 0x0c, 0x6b, 0x78, 0xf9, 0x12, 0x9c, 0x8b, 0x09, 0xc3, 0xfb, 0xb0, 0x10, 0xb9, 0x4b, 0x55, 0x49, - 0xe5, 0xb2, 0x2a, 0xa9, 0xfe, 0xbe, 0x06, 0x8b, 0x09, 0x17, 0x57, 0xb3, 0xe6, 0xd7, 0x9d, 0x18, - 0x34, 0x78, 0x19, 0xda, 0xa9, 0x72, 0x10, 0x2d, 0x1c, 0x2a, 0x82, 0xf9, 0x2d, 0xee, 0xde, 0xf9, - 0x59, 0xe3, 0x03, 0x68, 0x31, 0xe2, 0x0d, 0x29, 0x0b, 0x65, 0xe2, 0xc2, 0x16, 0xe2, 0x49, 0xff, - 0x24, 0x25, 0xc5, 0x19, 0xed, 0xf5, 0xbb, 0xb0, 0x94, 0x72, 0x76, 0xa5, 0x21, 0xec, 0x4b, 0x7e, - 0x39, 0x71, 0x70, 0x5e, 0x43, 0x74, 0x7d, 0x9c, 0x8a, 0xae, 0xad, 0xf2, 0xcb, 0x4c, 0xa4, 0x4c, - 0x59, 0x8c, 0xe1, 0x4c, 0x8c, 0xbd, 0x5e, 0x09, 0xed, 0xe2, 0x48, 0xfb, 0x57, 0x0d, 0x56, 0x13, - 0xda, 0x31, 0x37, 0xfd, 0x61, 0xaa, 0x40, 0x6f, 0x65, 0x0a, 0x74, 0xbb, 0xc8, 0xe6, 0xb9, 0x91, - 0xd3, 0x62, 0x76, 0x37, 0xfb, 0xbc, 0xd9, 0xdd, 0x73, 0x20, 0xc5, 0xea, 0x9f, 0x14, 0x58, 0x4e, - 0xdc, 0xdd, 0x35, 0x30, 0xc6, 0x07, 0x69, 0xc6, 0xf8, 0x72, 0x95, 0xa0, 0x29, 0xa1, 0x8c, 0x7f, - 0x6d, 0xa4, 0x36, 0xff, 0xad, 0x27, 0x31, 0xbf, 0x86, 0xd5, 0xb1, 0x63, 0x8e, 0x2c, 0xba, 0x67, - 0x12, 0xc3, 0x0a, 0x15, 0xf8, 0xc4, 0x38, 0x9b, 0xfd, 0x63, 0x28, 0x82, 0xa7, 0x9e, 0x6f, 0xf8, - 0x8c, 0xda, 0xec, 0xd3, 0xd8, 0xb2, 0xf7, 0x5d, 0xe9, 0x64, 0xf5, 0xd3, 0x02, 0x38, 0x5c, 0xe8, - 0x04, 0xfd, 0x00, 0x16, 0xf9, 0xc0, 0x6c, 0xe8, 0x94, 0x73, 0x6f, 0x19, 0x58, 0x2b, 0x12, 0x68, - 0xf1, 0x38, 0x16, 0xe1, 0xa4, 0x1e, 0x7a, 0x04, 0x2b, 0xae, 0x33, 0x38, 0x20, 0x36, 0x19, 0x52, - 0x3e, 0x66, 0x1c, 0x39, 0xa6, 0xa1, 0x4f, 0x04, 0xb3, 0x59, 0xe8, 0xbd, 0x1b, 0x4e, 0xa6, 0x47, - 0x79, 0x95, 0x67, 0x9c, 0x22, 0xe4, 0x97, 0x45, 0x52, 0x17, 0x41, 0x22, 0x0f, 0x5a, 0x23, 0xd9, - 0xee, 0x25, 0xd1, 0x0b, 0xfe, 0x6f, 0xd9, 0xa9, 0x12, 0x61, 0xa7, 0x29, 0xcb, 0xb8, 0xfa, 0xa7, - 0xd7, 0x71, 0xc6, 0x43, 0x29, 0x71, 0x6b, 0xfe, 0x3f, 0xc4, 0x4d, 0xfd, 0x77, 0x1d, 0x6e, 0xe6, - 0x4a, 0x25, 0xfa, 0xf1, 0x05, 0x0c, 0xe7, 0xd6, 0x73, 0x63, 0x37, 0xb9, 0x01, 0x7d, 0xf6, 0x0a, - 0x03, 0xfa, 0x2e, 0x2c, 0xeb, 0x23, 0xcf, 0xa3, 0x36, 0xcb, 0xb0, 0x9a, 0x88, 0x1a, 0xed, 0xa5, - 0xc5, 0x38, 0xab, 0x5f, 0xc4, 0xae, 0x1a, 0x57, 0x64, 0x57, 0xc9, 0x5d, 0xc8, 0x09, 0x39, 0x08, - 0xbb, 0xfc, 0x2e, 0xe4, 0xa0, 0x9c, 0xd5, 0xe7, 0xd3, 0x41, 0x80, 0x1a, 0x21, 0xcc, 0xa7, 0xa7, - 0x83, 0xd3, 0x94, 0x14, 0x67, 0xb4, 0x0b, 0x98, 0xca, 0x42, 0x55, 0xa6, 0x82, 0x48, 0x8a, 0x84, - 0x81, 0xc8, 0xf1, 0xed, 0x2a, 0xb1, 0x5c, 0x99, 0x85, 0xa9, 0x7f, 0x53, 0xe0, 0xc5, 0xd2, 0x24, - 0x40, 0xbb, 0xa9, 0x96, 0xbb, 0x9d, 0x69, 0xb9, 0xdf, 0x2b, 0x35, 0x4c, 0xf4, 0x5d, 0xaf, 0x98, - 0x1a, 0xbd, 0x5f, 0x8d, 0x1a, 0x15, 0xcc, 0xed, 0x97, 0x73, 0xa4, 0xde, 0xf6, 0x93, 0xa7, 0x9d, - 0x99, 0xaf, 0x9e, 0x76, 0x66, 0xbe, 0x7e, 0xda, 0x99, 0xf9, 0xcd, 0xb4, 0xa3, 0x3c, 0x99, 0x76, - 0x94, 0xaf, 0xa6, 0x1d, 0xe5, 0xeb, 0x69, 0x47, 0xf9, 0xc7, 0xb4, 0xa3, 0xfc, 0xee, 0x9b, 0xce, - 0xcc, 0x67, 0xf3, 0xd2, 0xe3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xe9, 0x89, 0x29, 0x5c, 0x61, - 0x1b, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index 7942b997b..694f6570d 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -43,7 +43,7 @@ option go_package = "v1beta1"; // depend on its stability. It is primarily for internal use by controllers. message ControllerRevision { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -56,7 +56,7 @@ message ControllerRevision { // ControllerRevisionList is a resource containing a list of ControllerRevision objects. message ControllerRevisionList { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -277,15 +277,15 @@ message RollingUpdateStatefulSetStrategy { // Scale represents a scaling request for a resource. message Scale { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index cf6039df6..b77fcf7af 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -60,15 +60,15 @@ type ScaleStatus struct { // Scale represents a scaling request for a resource. type Scale struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -134,13 +134,13 @@ const ( // ordering constraints. When a scale operation is performed with this // strategy, new Pods will be created from the specification version indicated // by the StatefulSet's updateRevision. - RollingUpdateStatefulSetStrategyType = "RollingUpdate" + RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate" // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version // tracking and ordered rolling restarts are disabled. Pods are recreated // from the StatefulSetSpec when they are manually deleted. When a scale // operation is performed with this strategy,specification version indicated // by the StatefulSet's currentRevision. - OnDeleteStatefulSetStrategyType = "OnDelete" + OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete" ) // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. @@ -541,7 +541,7 @@ type DeploymentList struct { type ControllerRevision struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -558,7 +558,7 @@ type ControllerRevision struct { type ControllerRevisionList struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index da1eb5996..504b85863 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "data": "Data is the serialized representation of the state.", "revision": "Revision indicates the revision of the state represented by Data.", } @@ -40,7 +40,7 @@ func (ControllerRevision) SwaggerDoc() map[string]string { var map_ControllerRevisionList = map[string]string{ "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of ControllerRevisions", } @@ -167,9 +167,9 @@ func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go index 93892bfd0..fb2761241 100644 --- a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go @@ -58,7 +58,7 @@ func (in *ControllerRevision) DeepCopyObject() runtime.Object { func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ControllerRevision, len(*in)) @@ -137,7 +137,7 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -470,7 +470,7 @@ func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StatefulSet, len(*in)) diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go index fc1efbc90..624bb9425 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -17,62 +17,26 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto -/* - Package v1beta2 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto - - It has these top-level messages: - ControllerRevision - ControllerRevisionList - DaemonSet - DaemonSetCondition - DaemonSetList - DaemonSetSpec - DaemonSetStatus - DaemonSetUpdateStrategy - Deployment - DeploymentCondition - DeploymentList - DeploymentSpec - DeploymentStatus - DeploymentStrategy - ReplicaSet - ReplicaSetCondition - ReplicaSetList - ReplicaSetSpec - ReplicaSetStatus - RollingUpdateDaemonSet - RollingUpdateDeployment - RollingUpdateStatefulSetStrategy - Scale - ScaleSpec - ScaleStatus - StatefulSet - StatefulSetCondition - StatefulSetList - StatefulSetSpec - StatefulSetStatus - StatefulSetUpdateStrategy -*/ package v1beta2 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + io "io" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import strings "strings" -import reflect "reflect" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -85,135 +49,873 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } -func (*ControllerRevision) ProtoMessage() {} -func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{0} +} +func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevision.Merge(m, src) +} +func (m *ControllerRevision) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevision) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevision.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo + +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{1} +} +func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevisionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevisionList.Merge(m, src) +} +func (m *ControllerRevisionList) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevisionList) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo -func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } -func (*ControllerRevisionList) ProtoMessage() {} -func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{2} +} +func (m *DaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSet.Merge(m, src) +} +func (m *DaemonSet) XXX_Size() int { + return m.Size() +} +func (m *DaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSet.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSet proto.InternalMessageInfo -func (m *DaemonSet) Reset() { *m = DaemonSet{} } -func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{3} +} +func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetCondition.Merge(m, src) +} +func (m *DaemonSetCondition) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo + +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{4} +} +func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetList.Merge(m, src) +} +func (m *DaemonSetList) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetList) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo + +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{5} +} +func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetSpec.Merge(m, src) +} +func (m *DaemonSetSpec) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo + +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{6} +} +func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetStatus.Merge(m, src) +} +func (m *DaemonSetStatus) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo + +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{7} +} +func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src) +} +func (m *DaemonSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m) +} -func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } -func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo -func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } -func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{8} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} -func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } -func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_Deployment proto.InternalMessageInfo -func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } -func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{9} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{10} +} +func (m *DeploymentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentList.Merge(m, src) +} +func (m *DeploymentList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentList.DiscardUnknown(m) +} -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_DeploymentList proto.InternalMessageInfo -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{11} +} +func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentSpec.Merge(m, src) +} +func (m *DeploymentSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) +} -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{12} +} +func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStatus.Merge(m, src) +} +func (m *DeploymentStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) +} -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo -func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } -func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{13} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} -func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } -func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo -func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } -func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{14} +} +func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSet.Merge(m, src) +} +func (m *ReplicaSet) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSet) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSet.DiscardUnknown(m) +} -func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } -func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo -func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } -func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{15} +} +func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetCondition.Merge(m, src) +} +func (m *ReplicaSetCondition) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m) +} -func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } -func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo + +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{16} +} +func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetList.Merge(m, src) +} +func (m *ReplicaSetList) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetList) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo + +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{17} +} +func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetSpec.Merge(m, src) +} +func (m *ReplicaSetSpec) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo + +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{18} +} +func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetStatus.Merge(m, src) +} +func (m *ReplicaSetStatus) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo + +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{19} +} +func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src) +} +func (m *RollingUpdateDaemonSet) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} + return fileDescriptor_42fe616264472f7e, []int{20} +} +func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) +} +func (m *RollingUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{21} + return fileDescriptor_42fe616264472f7e, []int{21} +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m) } -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{22} +} +func (m *Scale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) +} +func (m *Scale) XXX_Size() int { + return m.Size() +} +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) +} -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +var xxx_messageInfo_Scale proto.InternalMessageInfo -func (m *StatefulSet) Reset() { *m = StatefulSet{} } -func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{23} +} +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) +} +func (m *ScaleSpec) XXX_Size() int { + return m.Size() +} +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +} -func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } -func (*StatefulSetCondition) ProtoMessage() {} -func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo -func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } -func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{24} +} +func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleStatus.Merge(m, src) +} +func (m *ScaleStatus) XXX_Size() int { + return m.Size() +} +func (m *ScaleStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleStatus.DiscardUnknown(m) +} -func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } -func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo -func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } -func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{25} +} +func (m *StatefulSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSet.Merge(m, src) +} +func (m *StatefulSet) XXX_Size() int { + return m.Size() +} +func (m *StatefulSet) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSet.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSet proto.InternalMessageInfo + +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{26} +} +func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetCondition.Merge(m, src) +} +func (m *StatefulSetCondition) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo + +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{27} +} +func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetList.Merge(m, src) +} +func (m *StatefulSetList) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetList) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo + +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{28} +} +func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetSpec.Merge(m, src) +} +func (m *StatefulSetSpec) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo + +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{29} +} +func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetStatus.Merge(m, src) +} +func (m *StatefulSetStatus) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{30} + return fileDescriptor_42fe616264472f7e, []int{30} +} +func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src) +} +func (m *StatefulSetUpdateStrategy) XXX_Size() int { + return m.Size() } +func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo func init() { proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta2.ControllerRevision") @@ -241,6 +943,7 @@ func init() { proto.RegisterType((*Scale)(nil), "k8s.io.api.apps.v1beta2.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta2.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta2.ScaleStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.apps.v1beta2.ScaleStatus.SelectorEntry") proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta2.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta2.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta2.StatefulSetList") @@ -248,10 +951,155 @@ func init() { proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta2.StatefulSetStatus") proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta2.StatefulSetUpdateStrategy") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto", fileDescriptor_42fe616264472f7e) +} + +var fileDescriptor_42fe616264472f7e = []byte{ + // 2171 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1c, 0xb7, + 0xf9, 0xd6, 0xec, 0x87, 0xb4, 0xa2, 0x2c, 0xc9, 0xa6, 0xf4, 0x93, 0x36, 0xf2, 0xaf, 0x2b, 0x63, + 0x13, 0x38, 0x4a, 0x6c, 0xcd, 0xda, 0xca, 0x07, 0x12, 0xbb, 0x68, 0xab, 0x95, 0x52, 0xdb, 0x81, + 0xbe, 0x42, 0x59, 0x06, 0x1a, 0xb4, 0xa8, 0xa9, 0x5d, 0x7a, 0x35, 0xd1, 0x7c, 0x61, 0x86, 0xb3, + 0xf5, 0xa2, 0x97, 0x9e, 0x0a, 0x14, 0x28, 0xd0, 0xf6, 0xda, 0x7f, 0xa2, 0xb7, 0xa2, 0x68, 0x6f, + 0x45, 0x50, 0xf8, 0x52, 0x20, 0xe8, 0x25, 0x39, 0x09, 0xf5, 0xe6, 0x54, 0x14, 0xbd, 0x14, 0xe8, + 0x25, 0x40, 0x81, 0x82, 0x1c, 0xce, 0x07, 0xe7, 0xc3, 0x3b, 0x52, 0x1c, 0xa5, 0x29, 0x72, 0xd3, + 0x92, 0xcf, 0xfb, 0xf0, 0x7d, 0xc9, 0x97, 0x7c, 0x1f, 0x72, 0x04, 0xbe, 0x73, 0xfc, 0x96, 0xab, + 0x6a, 0x56, 0xeb, 0xd8, 0x3b, 0x24, 0x8e, 0x49, 0x28, 0x71, 0x5b, 0x7d, 0x62, 0x76, 0x2d, 0xa7, + 0x25, 0x3a, 0xb0, 0xad, 0xb5, 0xb0, 0x6d, 0xbb, 0xad, 0xfe, 0xcd, 0x43, 0x42, 0xf1, 0x5a, 0xab, + 0x47, 0x4c, 0xe2, 0x60, 0x4a, 0xba, 0xaa, 0xed, 0x58, 0xd4, 0x82, 0x8b, 0x3e, 0x50, 0xc5, 0xb6, + 0xa6, 0x32, 0xa0, 0x2a, 0x80, 0x4b, 0xab, 0x3d, 0x8d, 0x1e, 0x79, 0x87, 0x6a, 0xc7, 0x32, 0x5a, + 0x3d, 0xab, 0x67, 0xb5, 0x38, 0xfe, 0xd0, 0x7b, 0xc4, 0x7f, 0xf1, 0x1f, 0xfc, 0x2f, 0x9f, 0x67, + 0xa9, 0x19, 0x1b, 0xb0, 0x63, 0x39, 0xa4, 0xd5, 0xbf, 0x99, 0x1c, 0x6b, 0xe9, 0xf5, 0x08, 0x63, + 0xe0, 0xce, 0x91, 0x66, 0x12, 0x67, 0xd0, 0xb2, 0x8f, 0x7b, 0xac, 0xc1, 0x6d, 0x19, 0x84, 0xe2, + 0x2c, 0xab, 0x56, 0x9e, 0x95, 0xe3, 0x99, 0x54, 0x33, 0x48, 0xca, 0xe0, 0xcd, 0x51, 0x06, 0x6e, + 0xe7, 0x88, 0x18, 0x38, 0x65, 0xf7, 0x5a, 0x9e, 0x9d, 0x47, 0x35, 0xbd, 0xa5, 0x99, 0xd4, 0xa5, + 0x4e, 0xd2, 0xa8, 0xf9, 0x2f, 0x05, 0xc0, 0x0d, 0xcb, 0xa4, 0x8e, 0xa5, 0xeb, 0xc4, 0x41, 0xa4, + 0xaf, 0xb9, 0x9a, 0x65, 0xc2, 0x87, 0xa0, 0xc6, 0xe2, 0xe9, 0x62, 0x8a, 0xeb, 0xca, 0x15, 0x65, + 0x65, 0x6a, 0xed, 0x86, 0x1a, 0xcd, 0x74, 0x48, 0xaf, 0xda, 0xc7, 0x3d, 0xd6, 0xe0, 0xaa, 0x0c, + 0xad, 0xf6, 0x6f, 0xaa, 0xbb, 0x87, 0x1f, 0x90, 0x0e, 0xdd, 0x26, 0x14, 0xb7, 0xe1, 0x93, 0x93, + 0xe5, 0xb1, 0xe1, 0xc9, 0x32, 0x88, 0xda, 0x50, 0xc8, 0x0a, 0x77, 0x41, 0x85, 0xb3, 0x97, 0x38, + 0xfb, 0x6a, 0x2e, 0xbb, 0x08, 0x5a, 0x45, 0xf8, 0x47, 0xef, 0x3c, 0xa6, 0xc4, 0x64, 0xee, 0xb5, + 0x2f, 0x08, 0xea, 0xca, 0x26, 0xa6, 0x18, 0x71, 0x22, 0x78, 0x1d, 0xd4, 0x1c, 0xe1, 0x7e, 0xbd, + 0x7c, 0x45, 0x59, 0x29, 0xb7, 0x2f, 0x0a, 0x54, 0x2d, 0x08, 0x0b, 0x85, 0x88, 0xe6, 0x13, 0x05, + 0x2c, 0xa4, 0xe3, 0xde, 0xd2, 0x5c, 0x0a, 0xbf, 0x9f, 0x8a, 0x5d, 0x2d, 0x16, 0x3b, 0xb3, 0xe6, + 0x91, 0x87, 0x03, 0x07, 0x2d, 0xb1, 0xb8, 0xf7, 0x40, 0x55, 0xa3, 0xc4, 0x70, 0xeb, 0xa5, 0x2b, + 0xe5, 0x95, 0xa9, 0xb5, 0x6b, 0x6a, 0x4e, 0x02, 0xab, 0x69, 0xef, 0xda, 0xd3, 0x82, 0xb7, 0x7a, + 0x8f, 0x31, 0x20, 0x9f, 0xa8, 0xf9, 0xb3, 0x12, 0x98, 0xdc, 0xc4, 0xc4, 0xb0, 0xcc, 0x7d, 0x42, + 0xcf, 0x61, 0xe5, 0xee, 0x82, 0x8a, 0x6b, 0x93, 0x8e, 0x58, 0xb9, 0xab, 0xb9, 0x01, 0x84, 0x3e, + 0xed, 0xdb, 0xa4, 0x13, 0x2d, 0x19, 0xfb, 0x85, 0x38, 0x03, 0xdc, 0x03, 0xe3, 0x2e, 0xc5, 0xd4, + 0x73, 0xf9, 0x82, 0x4d, 0xad, 0xad, 0x14, 0xe0, 0xe2, 0xf8, 0xf6, 0x8c, 0x60, 0x1b, 0xf7, 0x7f, + 0x23, 0xc1, 0xd3, 0xfc, 0x5b, 0x09, 0xc0, 0x10, 0xbb, 0x61, 0x99, 0x5d, 0x8d, 0xb2, 0x74, 0xbe, + 0x05, 0x2a, 0x74, 0x60, 0x13, 0x3e, 0x21, 0x93, 0xed, 0xab, 0x81, 0x2b, 0xf7, 0x07, 0x36, 0xf9, + 0xec, 0x64, 0x79, 0x21, 0x6d, 0xc1, 0x7a, 0x10, 0xb7, 0x81, 0x5b, 0xa1, 0x93, 0x25, 0x6e, 0xfd, + 0xba, 0x3c, 0xf4, 0x67, 0x27, 0xcb, 0x19, 0x67, 0x87, 0x1a, 0x32, 0xc9, 0x0e, 0xc2, 0x3e, 0x80, + 0x3a, 0x76, 0xe9, 0x7d, 0x07, 0x9b, 0xae, 0x3f, 0x92, 0x66, 0x10, 0x11, 0xfe, 0xab, 0xc5, 0x16, + 0x8a, 0x59, 0xb4, 0x97, 0x84, 0x17, 0x70, 0x2b, 0xc5, 0x86, 0x32, 0x46, 0x80, 0x57, 0xc1, 0xb8, + 0x43, 0xb0, 0x6b, 0x99, 0xf5, 0x0a, 0x8f, 0x22, 0x9c, 0x40, 0xc4, 0x5b, 0x91, 0xe8, 0x85, 0xaf, + 0x80, 0x09, 0x83, 0xb8, 0x2e, 0xee, 0x91, 0x7a, 0x95, 0x03, 0x67, 0x05, 0x70, 0x62, 0xdb, 0x6f, + 0x46, 0x41, 0x7f, 0xf3, 0xb7, 0x0a, 0x98, 0x0e, 0x67, 0xee, 0x1c, 0x76, 0xce, 0x1d, 0x79, 0xe7, + 0x34, 0x47, 0x27, 0x4b, 0xce, 0x86, 0xf9, 0xb0, 0x1c, 0x73, 0x9c, 0xa5, 0x23, 0xfc, 0x01, 0xa8, + 0xb9, 0x44, 0x27, 0x1d, 0x6a, 0x39, 0xc2, 0xf1, 0xd7, 0x0a, 0x3a, 0x8e, 0x0f, 0x89, 0xbe, 0x2f, + 0x4c, 0xdb, 0x17, 0x98, 0xe7, 0xc1, 0x2f, 0x14, 0x52, 0xc2, 0xf7, 0x40, 0x8d, 0x12, 0xc3, 0xd6, + 0x31, 0x25, 0x62, 0xd7, 0xbc, 0x18, 0x77, 0x9e, 0xe5, 0x0c, 0x23, 0xdb, 0xb3, 0xba, 0xf7, 0x05, + 0x8c, 0x6f, 0x99, 0x70, 0x32, 0x82, 0x56, 0x14, 0xd2, 0x40, 0x1b, 0xcc, 0x78, 0x76, 0x97, 0x21, + 0x29, 0x3b, 0xce, 0x7b, 0x03, 0x91, 0x43, 0x37, 0x46, 0xcf, 0xca, 0x81, 0x64, 0xd7, 0x5e, 0x10, + 0xa3, 0xcc, 0xc8, 0xed, 0x28, 0xc1, 0x0f, 0xd7, 0xc1, 0xac, 0xa1, 0x99, 0x88, 0xe0, 0xee, 0x60, + 0x9f, 0x74, 0x2c, 0xb3, 0xeb, 0xf2, 0x54, 0xaa, 0xb6, 0x17, 0x05, 0xc1, 0xec, 0xb6, 0xdc, 0x8d, + 0x92, 0x78, 0xb8, 0x05, 0xe6, 0x83, 0x03, 0xf8, 0xae, 0xe6, 0x52, 0xcb, 0x19, 0x6c, 0x69, 0x86, + 0x46, 0xeb, 0xe3, 0x9c, 0xa7, 0x3e, 0x3c, 0x59, 0x9e, 0x47, 0x19, 0xfd, 0x28, 0xd3, 0xaa, 0xf9, + 0xab, 0x71, 0x30, 0x9b, 0x38, 0x17, 0xe0, 0x03, 0xb0, 0xd0, 0xf1, 0x1c, 0x87, 0x98, 0x74, 0xc7, + 0x33, 0x0e, 0x89, 0xb3, 0xdf, 0x39, 0x22, 0x5d, 0x4f, 0x27, 0x5d, 0xbe, 0xac, 0xd5, 0x76, 0x43, + 0xf8, 0xba, 0xb0, 0x91, 0x89, 0x42, 0x39, 0xd6, 0xf0, 0x5d, 0x00, 0x4d, 0xde, 0xb4, 0xad, 0xb9, + 0x6e, 0xc8, 0x59, 0xe2, 0x9c, 0xe1, 0x56, 0xdc, 0x49, 0x21, 0x50, 0x86, 0x15, 0xf3, 0xb1, 0x4b, + 0x5c, 0xcd, 0x21, 0xdd, 0xa4, 0x8f, 0x65, 0xd9, 0xc7, 0xcd, 0x4c, 0x14, 0xca, 0xb1, 0x86, 0x6f, + 0x80, 0x29, 0x7f, 0x34, 0x3e, 0xe7, 0x62, 0x71, 0xe6, 0x04, 0xd9, 0xd4, 0x4e, 0xd4, 0x85, 0xe2, + 0x38, 0x16, 0x9a, 0x75, 0xe8, 0x12, 0xa7, 0x4f, 0xba, 0x77, 0x7c, 0x71, 0xc0, 0x2a, 0x68, 0x95, + 0x57, 0xd0, 0x30, 0xb4, 0xdd, 0x14, 0x02, 0x65, 0x58, 0xb1, 0xd0, 0xfc, 0xac, 0x49, 0x85, 0x36, + 0x2e, 0x87, 0x76, 0x90, 0x89, 0x42, 0x39, 0xd6, 0x2c, 0xf7, 0x7c, 0x97, 0xd7, 0xfb, 0x58, 0xd3, + 0xf1, 0xa1, 0x4e, 0xea, 0x13, 0x72, 0xee, 0xed, 0xc8, 0xdd, 0x28, 0x89, 0x87, 0x77, 0xc0, 0x25, + 0xbf, 0xe9, 0xc0, 0xc4, 0x21, 0x49, 0x8d, 0x93, 0xbc, 0x20, 0x48, 0x2e, 0xed, 0x24, 0x01, 0x28, + 0x6d, 0x03, 0x6f, 0x81, 0x99, 0x8e, 0xa5, 0xeb, 0x3c, 0x1f, 0x37, 0x2c, 0xcf, 0xa4, 0xf5, 0x49, + 0xce, 0x02, 0xd9, 0x1e, 0xda, 0x90, 0x7a, 0x50, 0x02, 0x09, 0x7f, 0x08, 0x40, 0x27, 0x28, 0x0c, + 0x6e, 0x1d, 0x8c, 0x50, 0x00, 0xe9, 0xb2, 0x14, 0x55, 0xe6, 0xb0, 0xc9, 0x45, 0x31, 0xca, 0xe6, + 0x87, 0x0a, 0x58, 0xcc, 0xd9, 0xe8, 0xf0, 0xdb, 0x52, 0x11, 0xbc, 0x96, 0x28, 0x82, 0x97, 0x73, + 0xcc, 0x62, 0x95, 0xf0, 0x08, 0x4c, 0x33, 0x41, 0xa2, 0x99, 0x3d, 0x1f, 0x22, 0xce, 0xb2, 0x56, + 0x6e, 0x00, 0x28, 0x8e, 0x8e, 0x4e, 0xe5, 0x4b, 0xc3, 0x93, 0xe5, 0x69, 0xa9, 0x0f, 0xc9, 0xc4, + 0xcd, 0x9f, 0x97, 0x00, 0xd8, 0x24, 0xb6, 0x6e, 0x0d, 0x0c, 0x62, 0x9e, 0x87, 0xa6, 0xb9, 0x27, + 0x69, 0x9a, 0x97, 0xf3, 0x97, 0x24, 0x74, 0x2a, 0x57, 0xd4, 0xbc, 0x97, 0x10, 0x35, 0xaf, 0x14, + 0x21, 0x7b, 0xb6, 0xaa, 0xf9, 0xb8, 0x0c, 0xe6, 0x22, 0x70, 0x24, 0x6b, 0x6e, 0x4b, 0x2b, 0xfa, + 0x72, 0x62, 0x45, 0x17, 0x33, 0x4c, 0xbe, 0x30, 0x5d, 0xf3, 0x01, 0x98, 0x61, 0xaa, 0xc3, 0x5f, + 0x3f, 0xae, 0x69, 0xc6, 0x4f, 0xad, 0x69, 0xc2, 0x4a, 0xb4, 0x25, 0x31, 0xa1, 0x04, 0x73, 0x8e, + 0x86, 0x9a, 0xf8, 0x2a, 0x6a, 0xa8, 0xdf, 0x29, 0x60, 0x26, 0x5a, 0xa6, 0x73, 0x10, 0x51, 0x77, + 0x65, 0x11, 0xf5, 0x62, 0x81, 0xe4, 0xcc, 0x51, 0x51, 0x1f, 0x57, 0xe2, 0xae, 0x73, 0x19, 0xb5, + 0xc2, 0xae, 0x60, 0xb6, 0xae, 0x75, 0xb0, 0x2b, 0xea, 0xed, 0x05, 0xff, 0xfa, 0xe5, 0xb7, 0xa1, + 0xb0, 0x57, 0x12, 0x5c, 0xa5, 0x2f, 0x56, 0x70, 0x95, 0x9f, 0x8f, 0xe0, 0xfa, 0x1e, 0xa8, 0xb9, + 0x81, 0xd4, 0xaa, 0x70, 0xca, 0x6b, 0x85, 0x36, 0xb6, 0x50, 0x59, 0x21, 0x75, 0xa8, 0xaf, 0x42, + 0xba, 0x2c, 0x65, 0x55, 0xfd, 0x32, 0x95, 0x15, 0x4b, 0x74, 0x1b, 0x7b, 0x2e, 0xe9, 0xf2, 0x4d, + 0x55, 0x8b, 0x12, 0x7d, 0x8f, 0xb7, 0x22, 0xd1, 0x0b, 0x0f, 0xc0, 0xa2, 0xed, 0x58, 0x3d, 0x87, + 0xb8, 0xee, 0x26, 0xc1, 0x5d, 0x5d, 0x33, 0x49, 0x10, 0x80, 0x5f, 0x13, 0x2f, 0x0f, 0x4f, 0x96, + 0x17, 0xf7, 0xb2, 0x21, 0x28, 0xcf, 0xb6, 0xf9, 0xc7, 0x0a, 0xb8, 0x98, 0x3c, 0x1b, 0x73, 0x64, + 0x8a, 0x72, 0x26, 0x99, 0x72, 0x3d, 0x96, 0xa7, 0xbe, 0x86, 0x8b, 0x3d, 0x15, 0xa4, 0x72, 0x75, + 0x1d, 0xcc, 0x0a, 0x59, 0x12, 0x74, 0x0a, 0xa1, 0x16, 0x2e, 0xcf, 0x81, 0xdc, 0x8d, 0x92, 0x78, + 0x78, 0x1b, 0x4c, 0x3b, 0x5c, 0x79, 0x05, 0x04, 0xbe, 0x7a, 0xf9, 0x3f, 0x41, 0x30, 0x8d, 0xe2, + 0x9d, 0x48, 0xc6, 0x32, 0xe5, 0x12, 0x09, 0x92, 0x80, 0xa0, 0x22, 0x2b, 0x97, 0xf5, 0x24, 0x00, + 0xa5, 0x6d, 0xe0, 0x36, 0x98, 0xf3, 0xcc, 0x34, 0x95, 0x9f, 0x6b, 0x97, 0x05, 0xd5, 0xdc, 0x41, + 0x1a, 0x82, 0xb2, 0xec, 0xe0, 0x43, 0x49, 0xcc, 0x8c, 0xf3, 0xf3, 0xe4, 0x7a, 0x81, 0x3d, 0x51, + 0x58, 0xcd, 0x64, 0x48, 0xad, 0x5a, 0x51, 0xa9, 0xd5, 0xfc, 0x83, 0x02, 0x60, 0x7a, 0x1f, 0x8e, + 0x7c, 0x09, 0x48, 0x59, 0xc4, 0x2a, 0xa6, 0x96, 0xad, 0x7f, 0x6e, 0x14, 0xd4, 0x3f, 0xd1, 0x81, + 0x5a, 0x4c, 0x00, 0x89, 0x89, 0x3e, 0x9f, 0x47, 0x9d, 0xa2, 0x02, 0x28, 0x72, 0xea, 0x39, 0x08, + 0xa0, 0x18, 0xd9, 0xb3, 0x05, 0xd0, 0xdf, 0x4b, 0x60, 0x2e, 0x02, 0x17, 0x16, 0x40, 0x19, 0x26, + 0x5f, 0x3f, 0xec, 0x14, 0x13, 0x25, 0xd1, 0xd4, 0xfd, 0x37, 0x89, 0x92, 0xc8, 0xab, 0x1c, 0x51, + 0xf2, 0x9b, 0x52, 0xdc, 0xf5, 0x53, 0x8a, 0x92, 0xe7, 0xf0, 0xc2, 0xf1, 0x95, 0xd3, 0x35, 0xcd, + 0x3f, 0x95, 0xc1, 0xc5, 0xe4, 0x3e, 0x94, 0x0a, 0xa4, 0x32, 0xb2, 0x40, 0xee, 0x81, 0xf9, 0x47, + 0x9e, 0xae, 0x0f, 0x78, 0x0c, 0xb1, 0x2a, 0xe9, 0x97, 0xd6, 0xff, 0x17, 0x96, 0xf3, 0xdf, 0xcd, + 0xc0, 0xa0, 0x4c, 0xcb, 0x74, 0xbd, 0xac, 0x7c, 0xde, 0x7a, 0x59, 0x3d, 0x43, 0xbd, 0xcc, 0x96, + 0x1c, 0xe5, 0x33, 0x49, 0x8e, 0xd3, 0x15, 0xcb, 0x8c, 0x83, 0x6b, 0xe4, 0xd5, 0xff, 0xa7, 0x0a, + 0x58, 0xc8, 0xbe, 0x70, 0x43, 0x1d, 0xcc, 0x18, 0xf8, 0x71, 0xfc, 0xe1, 0x63, 0x54, 0x11, 0xf1, + 0xa8, 0xa6, 0xab, 0xfe, 0x27, 0x23, 0xf5, 0x9e, 0x49, 0x77, 0x9d, 0x7d, 0xea, 0x68, 0x66, 0xcf, + 0xaf, 0xbc, 0xdb, 0x12, 0x17, 0x4a, 0x70, 0x37, 0x3f, 0x55, 0xc0, 0x62, 0x4e, 0xe5, 0x3b, 0x5f, + 0x4f, 0xe0, 0xfb, 0xa0, 0x66, 0xe0, 0xc7, 0xfb, 0x9e, 0xd3, 0xcb, 0xaa, 0xd5, 0xc5, 0xc6, 0xe1, + 0x5b, 0x71, 0x5b, 0xb0, 0xa0, 0x90, 0xaf, 0xb9, 0x0b, 0xae, 0x48, 0x41, 0xb2, 0x9d, 0x43, 0x1e, + 0x79, 0x3a, 0xdf, 0x44, 0x42, 0x6c, 0x5c, 0x03, 0x93, 0x36, 0x76, 0xa8, 0x16, 0x4a, 0xd5, 0x6a, + 0x7b, 0x7a, 0x78, 0xb2, 0x3c, 0xb9, 0x17, 0x34, 0xa2, 0xa8, 0xbf, 0xf9, 0x6f, 0x05, 0x54, 0xf7, + 0x3b, 0x58, 0x27, 0xe7, 0x50, 0xed, 0x37, 0xa5, 0x6a, 0x9f, 0xff, 0x92, 0xce, 0xfd, 0xc9, 0x2d, + 0xf4, 0x5b, 0x89, 0x42, 0xff, 0xd2, 0x08, 0x9e, 0x67, 0xd7, 0xf8, 0xb7, 0xc1, 0x64, 0x38, 0xdc, + 0xe9, 0x0e, 0xa0, 0xe6, 0xaf, 0x4b, 0x60, 0x2a, 0x36, 0xc4, 0x29, 0x8f, 0xaf, 0x87, 0xd2, 0x99, + 0xcd, 0x36, 0xe6, 0x5a, 0x91, 0x40, 0xd4, 0xe0, 0x7c, 0x7e, 0xc7, 0xa4, 0x4e, 0xfc, 0x82, 0x97, + 0x3e, 0xb6, 0xbf, 0x05, 0x66, 0x28, 0x76, 0x7a, 0x84, 0x06, 0x7d, 0x7c, 0xc2, 0x26, 0xa3, 0x07, + 0x8f, 0xfb, 0x52, 0x2f, 0x4a, 0xa0, 0x97, 0x6e, 0x83, 0x69, 0x69, 0x30, 0x78, 0x11, 0x94, 0x8f, + 0xc9, 0xc0, 0x97, 0x3d, 0x88, 0xfd, 0x09, 0xe7, 0x41, 0xb5, 0x8f, 0x75, 0xcf, 0xcf, 0xf3, 0x49, + 0xe4, 0xff, 0xb8, 0x55, 0x7a, 0x4b, 0x69, 0xfe, 0x82, 0x4d, 0x4e, 0x94, 0x9c, 0xe7, 0x90, 0x5d, + 0xef, 0x4a, 0xd9, 0x95, 0xff, 0x51, 0x2f, 0xbe, 0x65, 0xf2, 0x72, 0x0c, 0x25, 0x72, 0xec, 0xd5, + 0x42, 0x6c, 0xcf, 0xce, 0xb4, 0x7f, 0x94, 0xc0, 0x7c, 0x0c, 0x1d, 0xc9, 0xc9, 0x6f, 0x4a, 0x72, + 0x72, 0x25, 0x21, 0x27, 0xeb, 0x59, 0x36, 0x5f, 0xeb, 0xc9, 0xd1, 0x7a, 0xf2, 0xf7, 0x0a, 0x98, + 0x8d, 0xcd, 0xdd, 0x39, 0x08, 0xca, 0x7b, 0xb2, 0xa0, 0x7c, 0xa9, 0x48, 0xd2, 0xe4, 0x28, 0xca, + 0x3f, 0x57, 0x25, 0xe7, 0xff, 0xe7, 0xdf, 0xb9, 0x7e, 0x0c, 0xe6, 0xfb, 0x96, 0xee, 0x19, 0x64, + 0x43, 0xc7, 0x9a, 0x11, 0x00, 0x98, 0x02, 0x2b, 0x27, 0xef, 0x72, 0x21, 0x3d, 0x71, 0x5c, 0xcd, + 0xa5, 0xc4, 0xa4, 0x0f, 0x22, 0xcb, 0x48, 0xf7, 0x3d, 0xc8, 0xa0, 0x43, 0x99, 0x83, 0xc0, 0x37, + 0xc0, 0x14, 0x53, 0x4e, 0x5a, 0x87, 0xec, 0x60, 0x23, 0x48, 0xac, 0xf0, 0x13, 0xd6, 0x7e, 0xd4, + 0x85, 0xe2, 0x38, 0x78, 0x04, 0xe6, 0x6c, 0xab, 0xbb, 0x8d, 0x4d, 0xdc, 0x23, 0x4c, 0x66, 0xec, + 0x59, 0xba, 0xd6, 0x19, 0xf0, 0xc7, 0xaf, 0xc9, 0xf6, 0x9b, 0xc1, 0xc3, 0xc6, 0x5e, 0x1a, 0xc2, + 0x2e, 0x89, 0x19, 0xcd, 0x7c, 0x53, 0x67, 0x51, 0x42, 0x27, 0xf5, 0xd9, 0xd5, 0x7f, 0x76, 0x5e, + 0x2b, 0x92, 0x61, 0x67, 0xfc, 0xf0, 0x9a, 0xf7, 0xb6, 0x57, 0x3b, 0xd3, 0x57, 0xd3, 0x7f, 0x56, + 0xc0, 0xa5, 0xd4, 0x51, 0xf9, 0x25, 0xbe, 0xae, 0xa5, 0xa4, 0x7e, 0xf9, 0x14, 0x52, 0x7f, 0x1d, + 0xcc, 0x8a, 0x0f, 0xb6, 0x89, 0x9b, 0x42, 0x78, 0x63, 0xdb, 0x90, 0xbb, 0x51, 0x12, 0x9f, 0xf5, + 0xba, 0x57, 0x3d, 0xe5, 0xeb, 0x5e, 0xdc, 0x0b, 0xf1, 0x0f, 0x48, 0x7e, 0xea, 0xa5, 0xbd, 0x10, + 0xff, 0x87, 0x94, 0xc4, 0x33, 0x85, 0xe0, 0xb3, 0x86, 0x0c, 0x13, 0xb2, 0x42, 0x38, 0x90, 0x7a, + 0x51, 0x02, 0xfd, 0xb9, 0x3e, 0x4a, 0xe2, 0x8c, 0x8f, 0x92, 0xab, 0x45, 0xf2, 0xb9, 0xf8, 0xdd, + 0xe4, 0x2f, 0x0a, 0x78, 0x21, 0x77, 0x23, 0xc0, 0x75, 0xa9, 0xec, 0xae, 0x26, 0xca, 0xee, 0x37, + 0x72, 0x0d, 0x63, 0xb5, 0xd7, 0xc9, 0x7e, 0x9a, 0x7b, 0xbb, 0xd8, 0xd3, 0x5c, 0x86, 0x76, 0x1f, + 0xfd, 0x46, 0xd7, 0x5e, 0x7d, 0xf2, 0xb4, 0x31, 0xf6, 0xd1, 0xd3, 0xc6, 0xd8, 0x27, 0x4f, 0x1b, + 0x63, 0x3f, 0x19, 0x36, 0x94, 0x27, 0xc3, 0x86, 0xf2, 0xd1, 0xb0, 0xa1, 0x7c, 0x32, 0x6c, 0x28, + 0x7f, 0x1d, 0x36, 0x94, 0x5f, 0x7e, 0xda, 0x18, 0x7b, 0x7f, 0x42, 0x8c, 0xf8, 0x9f, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x70, 0x2d, 0x26, 0x9d, 0xec, 0x28, 0x00, 0x00, +} + func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -259,36 +1107,45 @@ func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x18 + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) - n2, err := m.Data.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -296,37 +1153,46 @@ func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevisionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -334,41 +1200,52 @@ func (m *DaemonSet) Marshal() (dAtA []byte, err error) { } func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -376,41 +1253,52 @@ func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n7, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -418,37 +1306,46 @@ func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n8, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -456,51 +1353,62 @@ func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Selector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n9, err := m.Selector.MarshalTo(dAtA[i:]) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n9 - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n10, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n11, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x12 + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -508,58 +1416,65 @@ func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + i-- + dAtA[i] = 0x40 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + i-- + dAtA[i] = 0x38 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -567,31 +1482,39 @@ func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n12, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n12 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Deployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -599,41 +1522,52 @@ func (m *Deployment) Marshal() (dAtA []byte, err error) { } func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n14, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n15, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -641,49 +1575,62 @@ func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n16, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n17, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 - return i, nil + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -691,37 +1638,46 @@ func (m *DeploymentList) Marshal() (dAtA []byte, err error) { } func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n18, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -729,69 +1685,80 @@ func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.ProgressDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x48 + } + i-- + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n19, err := m.Selector.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x38 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x28 + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n19 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n20, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n21, err := m.Strategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x38 - i++ - if m.Paused { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i++ - if m.ProgressDeadlineSeconds != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -799,52 +1766,59 @@ func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x40 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x38 if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -852,31 +1826,39 @@ func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n22 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -884,41 +1866,52 @@ func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n24, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n24 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n25, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n25 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -926,41 +1919,52 @@ func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n26, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -968,37 +1972,46 @@ func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n27, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1006,43 +2019,52 @@ func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n28, err := m.Selector.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n28 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n29, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n29 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - return i, nil + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1050,44 +2072,51 @@ func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1095,27 +2124,34 @@ func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n30 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1123,37 +2159,46 @@ func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n31, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - } if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n32, err := m.MaxSurge.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n32 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1161,22 +2206,27 @@ func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.Partition != nil { - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1184,41 +2234,52 @@ func (m *Scale) Marshal() (dAtA []byte, err error) { } func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n34, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n34 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n35, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n35 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1226,20 +2287,25 @@ func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { } func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1247,46 +2313,54 @@ func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { } func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i -= len(m.TargetSelector) + copy(dAtA[i:], m.TargetSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i-- + dAtA[i] = 0x1a if len(m.Selector) > 0 { keysForSelector := make([]string, 0, len(m.Selector)) for k := range m.Selector { keysForSelector = append(keysForSelector, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i += copy(dAtA[i:], m.TargetSelector) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *StatefulSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1294,41 +2368,52 @@ func (m *StatefulSet) Marshal() (dAtA []byte, err error) { } func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n36, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n37, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n37 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n38, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n38 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1336,41 +2421,52 @@ func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n39, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n39 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1378,37 +2474,46 @@ func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n40, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n40 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1416,73 +2521,88 @@ func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x40 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n41, err := m.Selector.MarshalTo(dAtA[i:]) + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n41 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n42, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n42 + i-- + dAtA[i] = 0x3a + i -= len(m.PodManagementPolicy) + copy(dAtA[i:], m.PodManagementPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i-- + dAtA[i] = 0x32 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x2a if len(m.VolumeClaimTemplates) > 0 { - for _, msg := range m.VolumeClaimTemplates { + for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + } + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) - i += copy(dAtA[i:], m.PodManagementPolicy) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n43, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n43 - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1490,57 +2610,66 @@ func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) - i += copy(dAtA[i:], m.CurrentRevision) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) - i += copy(dAtA[i:], m.UpdateRevision) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i -= len(m.UpdateRevision) + copy(dAtA[i:], m.UpdateRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i-- + dAtA[i] = 0x3a + i -= len(m.CurrentRevision) + copy(dAtA[i:], m.CurrentRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1548,37 +2677,50 @@ func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n44, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n44 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *ControllerRevision) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1590,6 +2732,9 @@ func (m *ControllerRevision) Size() (n int) { } func (m *ControllerRevisionList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1604,6 +2749,9 @@ func (m *ControllerRevisionList) Size() (n int) { } func (m *DaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1616,6 +2764,9 @@ func (m *DaemonSet) Size() (n int) { } func (m *DaemonSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1632,6 +2783,9 @@ func (m *DaemonSetCondition) Size() (n int) { } func (m *DaemonSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1646,6 +2800,9 @@ func (m *DaemonSetList) Size() (n int) { } func (m *DaemonSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Selector != nil { @@ -1664,6 +2821,9 @@ func (m *DaemonSetSpec) Size() (n int) { } func (m *DaemonSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) @@ -1687,6 +2847,9 @@ func (m *DaemonSetStatus) Size() (n int) { } func (m *DaemonSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1699,6 +2862,9 @@ func (m *DaemonSetUpdateStrategy) Size() (n int) { } func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1711,6 +2877,9 @@ func (m *Deployment) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1729,6 +2898,9 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1743,6 +2915,9 @@ func (m *DeploymentList) Size() (n int) { } func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1768,6 +2943,9 @@ func (m *DeploymentSpec) Size() (n int) { } func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -1789,6 +2967,9 @@ func (m *DeploymentStatus) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1801,6 +2982,9 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *ReplicaSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1813,6 +2997,9 @@ func (m *ReplicaSet) Size() (n int) { } func (m *ReplicaSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1829,6 +3016,9 @@ func (m *ReplicaSetCondition) Size() (n int) { } func (m *ReplicaSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1843,6 +3033,9 @@ func (m *ReplicaSetList) Size() (n int) { } func (m *ReplicaSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1859,6 +3052,9 @@ func (m *ReplicaSetSpec) Size() (n int) { } func (m *ReplicaSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1876,6 +3072,9 @@ func (m *ReplicaSetStatus) Size() (n int) { } func (m *RollingUpdateDaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -1886,6 +3085,9 @@ func (m *RollingUpdateDaemonSet) Size() (n int) { } func (m *RollingUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -1900,6 +3102,9 @@ func (m *RollingUpdateDeployment) Size() (n int) { } func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Partition != nil { @@ -1909,6 +3114,9 @@ func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { } func (m *Scale) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1921,6 +3129,9 @@ func (m *Scale) Size() (n int) { } func (m *ScaleSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1928,6 +3139,9 @@ func (m *ScaleSpec) Size() (n int) { } func (m *ScaleStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1945,6 +3159,9 @@ func (m *ScaleStatus) Size() (n int) { } func (m *StatefulSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1957,6 +3174,9 @@ func (m *StatefulSet) Size() (n int) { } func (m *StatefulSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1973,6 +3193,9 @@ func (m *StatefulSetCondition) Size() (n int) { } func (m *StatefulSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1987,6 +3210,9 @@ func (m *StatefulSetList) Size() (n int) { } func (m *StatefulSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -2017,6 +3243,9 @@ func (m *StatefulSetSpec) Size() (n int) { } func (m *StatefulSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -2041,6 +3270,9 @@ func (m *StatefulSetStatus) Size() (n int) { } func (m *StatefulSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2053,14 +3285,7 @@ func (m *StatefulSetUpdateStrategy) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -2070,8 +3295,8 @@ func (this *ControllerRevision) String() string { return "nil" } s := strings.Join([]string{`&ControllerRevision{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, `}`, }, "") @@ -2081,9 +3306,14 @@ func (this *ControllerRevisionList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ControllerRevision{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ControllerRevisionList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2093,7 +3323,7 @@ func (this *DaemonSet) String() string { return "nil" } s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2107,7 +3337,7 @@ func (this *DaemonSetCondition) String() string { s := strings.Join([]string{`&DaemonSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2118,9 +3348,14 @@ func (this *DaemonSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]DaemonSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2130,8 +3365,8 @@ func (this *DaemonSetSpec) String() string { return "nil" } s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -2143,6 +3378,11 @@ func (this *DaemonSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DaemonSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DaemonSetStatus{`, `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, @@ -2153,7 +3393,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2164,7 +3404,7 @@ func (this *DaemonSetUpdateStrategy) String() string { } s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, `}`, }, "") return s @@ -2174,7 +3414,7 @@ func (this *Deployment) String() string { return "nil" } s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2190,8 +3430,8 @@ func (this *DeploymentCondition) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2200,9 +3440,14 @@ func (this *DeploymentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2213,8 +3458,8 @@ func (this *DeploymentSpec) String() string { } s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -2228,13 +3473,18 @@ func (this *DeploymentStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, @@ -2247,7 +3497,7 @@ func (this *DeploymentStrategy) String() string { } s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, `}`, }, "") return s @@ -2257,7 +3507,7 @@ func (this *ReplicaSet) String() string { return "nil" } s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2271,7 +3521,7 @@ func (this *ReplicaSetCondition) String() string { s := strings.Join([]string{`&ReplicaSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2282,9 +3532,14 @@ func (this *ReplicaSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ReplicaSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2295,8 +3550,8 @@ func (this *ReplicaSetSpec) String() string { } s := strings.Join([]string{`&ReplicaSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `}`, }, "") @@ -2306,13 +3561,18 @@ func (this *ReplicaSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]ReplicaSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&ReplicaSetStatus{`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2322,7 +3582,7 @@ func (this *RollingUpdateDaemonSet) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -2332,8 +3592,8 @@ func (this *RollingUpdateDeployment) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -2353,7 +3613,7 @@ func (this *Scale) String() string { return "nil" } s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2397,7 +3657,7 @@ func (this *StatefulSet) String() string { return "nil" } s := strings.Join([]string{`&StatefulSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2411,7 +3671,7 @@ func (this *StatefulSetCondition) String() string { s := strings.Join([]string{`&StatefulSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2422,9 +3682,14 @@ func (this *StatefulSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]StatefulSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&StatefulSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2433,11 +3698,16 @@ func (this *StatefulSetSpec) String() string { if this == nil { return "nil" } + repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{" + for _, f := range this.VolumeClaimTemplates { + repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeClaimTemplates += "}" s := strings.Join([]string{`&StatefulSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, @@ -2450,6 +3720,11 @@ func (this *StatefulSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]StatefulSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&StatefulSetStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, @@ -2459,7 +3734,7 @@ func (this *StatefulSetStatus) String() string { `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2470,7 +3745,7 @@ func (this *StatefulSetUpdateStrategy) String() string { } s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, `}`, }, "") return s @@ -2498,7 +3773,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2526,7 +3801,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2535,6 +3810,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2556,7 +3834,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2565,6 +3843,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2586,7 +3867,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2600,6 +3881,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2627,7 +3911,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2655,7 +3939,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2664,6 +3948,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2685,7 +3972,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2694,6 +3981,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2711,6 +4001,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2738,7 +4031,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2766,7 +4059,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2775,6 +4068,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2796,7 +4092,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2805,6 +4101,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2826,7 +4125,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2835,6 +4134,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2851,6 +4153,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2878,7 +4183,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2906,7 +4211,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2916,6 +4221,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2935,7 +4243,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2945,6 +4253,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2964,7 +4275,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2973,6 +4284,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2994,7 +4308,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3004,6 +4318,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3023,7 +4340,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3033,6 +4350,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3047,6 +4367,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3074,7 +4397,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3102,7 +4425,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3111,6 +4434,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3132,7 +4458,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3141,6 +4467,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3158,6 +4487,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3185,7 +4517,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3213,7 +4545,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3222,11 +4554,14 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3246,7 +4581,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3255,6 +4590,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3276,7 +4614,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3285,6 +4623,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3306,7 +4647,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3325,7 +4666,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3340,6 +4681,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3367,7 +4711,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3395,7 +4739,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift + m.CurrentNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3414,7 +4758,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberMisscheduled |= (int32(b) & 0x7F) << shift + m.NumberMisscheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3433,7 +4777,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift + m.DesiredNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3452,7 +4796,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberReady |= (int32(b) & 0x7F) << shift + m.NumberReady |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3471,7 +4815,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -3490,7 +4834,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift + m.UpdatedNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3509,7 +4853,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberAvailable |= (int32(b) & 0x7F) << shift + m.NumberAvailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3528,7 +4872,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberUnavailable |= (int32(b) & 0x7F) << shift + m.NumberUnavailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3547,7 +4891,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3567,7 +4911,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3576,6 +4920,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3593,6 +4940,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3620,7 +4970,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3648,7 +4998,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3658,6 +5008,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3677,7 +5030,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3686,6 +5039,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3705,6 +5061,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3732,7 +5091,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3760,7 +5119,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3769,6 +5128,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3790,7 +5152,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3799,6 +5161,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3820,7 +5185,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3829,6 +5194,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3845,6 +5213,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3872,7 +5243,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3900,7 +5271,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3910,6 +5281,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3929,7 +5303,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3939,6 +5313,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3958,7 +5335,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3968,6 +5345,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3987,7 +5367,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3997,6 +5377,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4016,7 +5399,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4025,6 +5408,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4046,7 +5432,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4055,6 +5441,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4071,6 +5460,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4098,7 +5490,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4126,7 +5518,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4135,6 +5527,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4156,7 +5551,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4165,6 +5560,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4182,6 +5580,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4209,7 +5610,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4237,7 +5638,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4257,7 +5658,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4266,11 +5667,14 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4290,7 +5694,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4299,6 +5703,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4320,7 +5727,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4329,6 +5736,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4350,7 +5760,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4369,7 +5779,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4389,7 +5799,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4409,7 +5819,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4424,6 +5834,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4451,7 +5864,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4479,7 +5892,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4498,7 +5911,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4517,7 +5930,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4536,7 +5949,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4555,7 +5968,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + m.UnavailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4574,7 +5987,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4583,6 +5996,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4605,7 +6021,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4624,7 +6040,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4639,6 +6055,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4666,7 +6085,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4694,7 +6113,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4704,6 +6123,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4723,7 +6145,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4732,6 +6154,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4751,6 +6176,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4778,7 +6206,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4806,7 +6234,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4815,6 +6243,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4836,7 +6267,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4845,6 +6276,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4866,7 +6300,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4875,6 +6309,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4891,6 +6328,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4918,7 +6358,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4946,7 +6386,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4956,6 +6396,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4975,7 +6418,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4985,6 +6428,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5004,7 +6450,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5013,6 +6459,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5034,7 +6483,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5044,6 +6493,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5063,7 +6515,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5073,6 +6525,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5087,6 +6542,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5114,7 +6572,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5142,7 +6600,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5151,6 +6609,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5172,7 +6633,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5181,6 +6642,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5198,6 +6662,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5225,7 +6692,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5253,7 +6720,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5273,7 +6740,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5282,11 +6749,14 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5306,7 +6776,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5315,6 +6785,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5336,7 +6809,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5350,6 +6823,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5377,7 +6853,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5405,7 +6881,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5424,7 +6900,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + m.FullyLabeledReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5443,7 +6919,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5462,7 +6938,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5481,7 +6957,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5500,7 +6976,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5509,6 +6985,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5526,6 +7005,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5553,7 +7035,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5581,7 +7063,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5590,11 +7072,14 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5609,6 +7094,9 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5636,7 +7124,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5664,7 +7152,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5673,11 +7161,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5697,7 +7188,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5706,11 +7197,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxSurge = &intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5725,6 +7219,9 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5752,7 +7249,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5780,7 +7277,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5795,6 +7292,9 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5822,7 +7322,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5850,7 +7350,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5859,6 +7359,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5880,7 +7383,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5889,6 +7392,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5910,7 +7416,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5919,6 +7425,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5935,6 +7444,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5962,7 +7474,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5990,7 +7502,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6004,6 +7516,9 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6031,7 +7546,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6059,7 +7574,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6078,7 +7593,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6087,6 +7602,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6107,7 +7625,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6124,7 +7642,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6134,6 +7652,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6150,7 +7671,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6160,6 +7681,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -6196,7 +7720,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6206,6 +7730,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6220,6 +7747,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6247,7 +7777,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6275,7 +7805,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6284,6 +7814,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6305,7 +7838,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6314,6 +7847,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6335,7 +7871,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6344,6 +7880,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6360,6 +7899,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6387,7 +7929,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6415,7 +7957,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6425,6 +7967,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6444,7 +7989,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6454,6 +7999,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6473,7 +8021,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6482,6 +8030,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6503,7 +8054,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6513,6 +8064,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6532,7 +8086,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6542,6 +8096,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6556,6 +8113,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6583,7 +8143,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6611,7 +8171,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6620,6 +8180,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6641,7 +8204,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6650,6 +8213,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6667,6 +8233,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6694,7 +8263,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6722,7 +8291,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6742,7 +8311,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6751,11 +8320,14 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6775,7 +8347,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6784,6 +8356,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6805,7 +8380,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6814,10 +8389,13 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v11.PersistentVolumeClaim{}) if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -6836,7 +8414,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6846,6 +8424,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6865,7 +8446,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6875,6 +8456,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6894,7 +8478,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6903,6 +8487,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6924,7 +8511,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6939,6 +8526,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6966,7 +8556,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6994,7 +8584,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -7013,7 +8603,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7032,7 +8622,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7051,7 +8641,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7070,7 +8660,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7089,7 +8679,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7099,6 +8689,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7118,7 +8711,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7128,6 +8721,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7147,7 +8743,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7167,7 +8763,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7176,6 +8772,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7193,6 +8792,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7220,7 +8822,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7248,7 +8850,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7258,6 +8860,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7277,7 +8882,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7286,6 +8891,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7305,6 +8913,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7371,10 +8982,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -7403,6 +9017,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -7421,147 +9038,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 2176 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcb, 0x6f, 0x1c, 0xb7, - 0x19, 0xd7, 0xec, 0x43, 0x5a, 0x51, 0x91, 0x64, 0x53, 0xaa, 0xb4, 0x91, 0xdb, 0x95, 0xb1, 0x09, - 0x1c, 0x25, 0xb6, 0x66, 0x6d, 0xe5, 0x81, 0xc4, 0x2e, 0xda, 0x6a, 0xa5, 0xd4, 0x76, 0xa0, 0x57, - 0x28, 0xcb, 0x40, 0x83, 0x16, 0x35, 0xb5, 0x4b, 0xaf, 0x26, 0x9a, 0x17, 0x66, 0x38, 0x5b, 0x2f, - 0x7a, 0xe9, 0xa9, 0x40, 0x81, 0x02, 0x6d, 0xaf, 0xfd, 0x27, 0x7a, 0x2b, 0x8a, 0xf6, 0x56, 0x04, - 0x85, 0x2f, 0x05, 0x82, 0x5e, 0x92, 0x93, 0x50, 0x6f, 0x4e, 0x45, 0xd1, 0x4b, 0x81, 0x5e, 0x02, - 0x14, 0x28, 0xc8, 0xe1, 0x3c, 0x38, 0x0f, 0xef, 0x48, 0xb1, 0x95, 0x22, 0xc8, 0x6d, 0x87, 0xfc, - 0x7d, 0x3f, 0x7e, 0x24, 0xbf, 0x8f, 0xdf, 0x6f, 0x38, 0x0b, 0xbe, 0x77, 0xfc, 0xb6, 0xab, 0x6a, - 0x56, 0xeb, 0xd8, 0x3b, 0x24, 0x8e, 0x49, 0x28, 0x71, 0x5b, 0x7d, 0x62, 0x76, 0x2d, 0xa7, 0x25, - 0x3a, 0xb0, 0xad, 0xb5, 0xb0, 0x6d, 0xbb, 0xad, 0xfe, 0x8d, 0x43, 0x42, 0xf1, 0x5a, 0xab, 0x47, - 0x4c, 0xe2, 0x60, 0x4a, 0xba, 0xaa, 0xed, 0x58, 0xd4, 0x82, 0x8b, 0x3e, 0x50, 0xc5, 0xb6, 0xa6, - 0x32, 0xa0, 0x2a, 0x80, 0x4b, 0xab, 0x3d, 0x8d, 0x1e, 0x79, 0x87, 0x6a, 0xc7, 0x32, 0x5a, 0x3d, - 0xab, 0x67, 0xb5, 0x38, 0xfe, 0xd0, 0x7b, 0xc8, 0x9f, 0xf8, 0x03, 0xff, 0xe5, 0xf3, 0x2c, 0x35, - 0x63, 0x03, 0x76, 0x2c, 0x87, 0xb4, 0xfa, 0x37, 0x92, 0x63, 0x2d, 0xbd, 0x11, 0x61, 0x0c, 0xdc, - 0x39, 0xd2, 0x4c, 0xe2, 0x0c, 0x5a, 0xf6, 0x71, 0x8f, 0x35, 0xb8, 0x2d, 0x83, 0x50, 0x9c, 0x65, - 0xd5, 0xca, 0xb3, 0x72, 0x3c, 0x93, 0x6a, 0x06, 0x49, 0x19, 0xbc, 0x35, 0xca, 0xc0, 0xed, 0x1c, - 0x11, 0x03, 0xa7, 0xec, 0x5e, 0xcf, 0xb3, 0xf3, 0xa8, 0xa6, 0xb7, 0x34, 0x93, 0xba, 0xd4, 0x49, - 0x1a, 0x35, 0xff, 0xa3, 0x00, 0xb8, 0x61, 0x99, 0xd4, 0xb1, 0x74, 0x9d, 0x38, 0x88, 0xf4, 0x35, - 0x57, 0xb3, 0x4c, 0xf8, 0x00, 0xd4, 0xd8, 0x7c, 0xba, 0x98, 0xe2, 0xba, 0x72, 0x59, 0x59, 0x99, - 0x5a, 0xbb, 0xae, 0x46, 0x2b, 0x1d, 0xd2, 0xab, 0xf6, 0x71, 0x8f, 0x35, 0xb8, 0x2a, 0x43, 0xab, - 0xfd, 0x1b, 0xea, 0xee, 0xe1, 0x87, 0xa4, 0x43, 0xb7, 0x09, 0xc5, 0x6d, 0xf8, 0xf8, 0x64, 0x79, - 0x6c, 0x78, 0xb2, 0x0c, 0xa2, 0x36, 0x14, 0xb2, 0xc2, 0x5d, 0x50, 0xe1, 0xec, 0x25, 0xce, 0xbe, - 0x9a, 0xcb, 0x2e, 0x26, 0xad, 0x22, 0xfc, 0x93, 0x77, 0x1f, 0x51, 0x62, 0x32, 0xf7, 0xda, 0x2f, - 0x08, 0xea, 0xca, 0x26, 0xa6, 0x18, 0x71, 0x22, 0x78, 0x0d, 0xd4, 0x1c, 0xe1, 0x7e, 0xbd, 0x7c, - 0x59, 0x59, 0x29, 0xb7, 0x2f, 0x08, 0x54, 0x2d, 0x98, 0x16, 0x0a, 0x11, 0xcd, 0xc7, 0x0a, 0x58, - 0x48, 0xcf, 0x7b, 0x4b, 0x73, 0x29, 0xfc, 0x61, 0x6a, 0xee, 0x6a, 0xb1, 0xb9, 0x33, 0x6b, 0x3e, - 0xf3, 0x70, 0xe0, 0xa0, 0x25, 0x36, 0xef, 0x3d, 0x50, 0xd5, 0x28, 0x31, 0xdc, 0x7a, 0xe9, 0x72, - 0x79, 0x65, 0x6a, 0xed, 0xaa, 0x9a, 0x13, 0xc0, 0x6a, 0xda, 0xbb, 0xf6, 0xb4, 0xe0, 0xad, 0xde, - 0x65, 0x0c, 0xc8, 0x27, 0x6a, 0xfe, 0xa2, 0x04, 0x26, 0x37, 0x31, 0x31, 0x2c, 0x73, 0x9f, 0xd0, - 0x73, 0xd8, 0xb9, 0x3b, 0xa0, 0xe2, 0xda, 0xa4, 0x23, 0x76, 0xee, 0x4a, 0xee, 0x04, 0x42, 0x9f, - 0xf6, 0x6d, 0xd2, 0x89, 0xb6, 0x8c, 0x3d, 0x21, 0xce, 0x00, 0xf7, 0xc0, 0xb8, 0x4b, 0x31, 0xf5, - 0x5c, 0xbe, 0x61, 0x53, 0x6b, 0x2b, 0x05, 0xb8, 0x38, 0xbe, 0x3d, 0x23, 0xd8, 0xc6, 0xfd, 0x67, - 0x24, 0x78, 0x9a, 0xff, 0x28, 0x01, 0x18, 0x62, 0x37, 0x2c, 0xb3, 0xab, 0x51, 0x16, 0xce, 0x37, - 0x41, 0x85, 0x0e, 0x6c, 0xc2, 0x17, 0x64, 0xb2, 0x7d, 0x25, 0x70, 0xe5, 0xde, 0xc0, 0x26, 0x9f, - 0x9f, 0x2c, 0x2f, 0xa4, 0x2d, 0x58, 0x0f, 0xe2, 0x36, 0x70, 0x2b, 0x74, 0xb2, 0xc4, 0xad, 0xdf, - 0x90, 0x87, 0xfe, 0xfc, 0x64, 0x39, 0xe3, 0xec, 0x50, 0x43, 0x26, 0xd9, 0x41, 0xd8, 0x07, 0x50, - 0xc7, 0x2e, 0xbd, 0xe7, 0x60, 0xd3, 0xf5, 0x47, 0xd2, 0x0c, 0x22, 0xa6, 0xff, 0x5a, 0xb1, 0x8d, - 0x62, 0x16, 0xed, 0x25, 0xe1, 0x05, 0xdc, 0x4a, 0xb1, 0xa1, 0x8c, 0x11, 0xe0, 0x15, 0x30, 0xee, - 0x10, 0xec, 0x5a, 0x66, 0xbd, 0xc2, 0x67, 0x11, 0x2e, 0x20, 0xe2, 0xad, 0x48, 0xf4, 0xc2, 0x57, - 0xc1, 0x84, 0x41, 0x5c, 0x17, 0xf7, 0x48, 0xbd, 0xca, 0x81, 0xb3, 0x02, 0x38, 0xb1, 0xed, 0x37, - 0xa3, 0xa0, 0xbf, 0xf9, 0x7b, 0x05, 0x4c, 0x87, 0x2b, 0x77, 0x0e, 0x99, 0x73, 0x5b, 0xce, 0x9c, - 0xe6, 0xe8, 0x60, 0xc9, 0x49, 0x98, 0x8f, 0xca, 0x31, 0xc7, 0x59, 0x38, 0xc2, 0x1f, 0x81, 0x9a, - 0x4b, 0x74, 0xd2, 0xa1, 0x96, 0x23, 0x1c, 0x7f, 0xbd, 0xa0, 0xe3, 0xf8, 0x90, 0xe8, 0xfb, 0xc2, - 0xb4, 0xfd, 0x02, 0xf3, 0x3c, 0x78, 0x42, 0x21, 0x25, 0x7c, 0x1f, 0xd4, 0x28, 0x31, 0x6c, 0x1d, - 0x53, 0x22, 0xb2, 0xe6, 0xa5, 0xb8, 0xf3, 0x2c, 0x66, 0x18, 0xd9, 0x9e, 0xd5, 0xbd, 0x27, 0x60, - 0x3c, 0x65, 0xc2, 0xc5, 0x08, 0x5a, 0x51, 0x48, 0x03, 0x6d, 0x30, 0xe3, 0xd9, 0x5d, 0x86, 0xa4, - 0xec, 0x38, 0xef, 0x0d, 0x44, 0x0c, 0x5d, 0x1f, 0xbd, 0x2a, 0x07, 0x92, 0x5d, 0x7b, 0x41, 0x8c, - 0x32, 0x23, 0xb7, 0xa3, 0x04, 0x3f, 0x5c, 0x07, 0xb3, 0x86, 0x66, 0x22, 0x82, 0xbb, 0x83, 0x7d, - 0xd2, 0xb1, 0xcc, 0xae, 0xcb, 0x43, 0xa9, 0xda, 0x5e, 0x14, 0x04, 0xb3, 0xdb, 0x72, 0x37, 0x4a, - 0xe2, 0xe1, 0x16, 0x98, 0x0f, 0x0e, 0xe0, 0x3b, 0x9a, 0x4b, 0x2d, 0x67, 0xb0, 0xa5, 0x19, 0x1a, - 0xad, 0x8f, 0x73, 0x9e, 0xfa, 0xf0, 0x64, 0x79, 0x1e, 0x65, 0xf4, 0xa3, 0x4c, 0xab, 0xe6, 0x6f, - 0xc6, 0xc1, 0x6c, 0xe2, 0x5c, 0x80, 0xf7, 0xc1, 0x42, 0xc7, 0x73, 0x1c, 0x62, 0xd2, 0x1d, 0xcf, - 0x38, 0x24, 0xce, 0x7e, 0xe7, 0x88, 0x74, 0x3d, 0x9d, 0x74, 0xf9, 0xb6, 0x56, 0xdb, 0x0d, 0xe1, - 0xeb, 0xc2, 0x46, 0x26, 0x0a, 0xe5, 0x58, 0xc3, 0xf7, 0x00, 0x34, 0x79, 0xd3, 0xb6, 0xe6, 0xba, - 0x21, 0x67, 0x89, 0x73, 0x86, 0xa9, 0xb8, 0x93, 0x42, 0xa0, 0x0c, 0x2b, 0xe6, 0x63, 0x97, 0xb8, - 0x9a, 0x43, 0xba, 0x49, 0x1f, 0xcb, 0xb2, 0x8f, 0x9b, 0x99, 0x28, 0x94, 0x63, 0x0d, 0xdf, 0x04, - 0x53, 0xfe, 0x68, 0x7c, 0xcd, 0xc5, 0xe6, 0xcc, 0x09, 0xb2, 0xa9, 0x9d, 0xa8, 0x0b, 0xc5, 0x71, - 0x6c, 0x6a, 0xd6, 0xa1, 0x4b, 0x9c, 0x3e, 0xe9, 0xde, 0xf6, 0xc5, 0x01, 0xab, 0xa0, 0x55, 0x5e, - 0x41, 0xc3, 0xa9, 0xed, 0xa6, 0x10, 0x28, 0xc3, 0x8a, 0x4d, 0xcd, 0x8f, 0x9a, 0xd4, 0xd4, 0xc6, - 0xe5, 0xa9, 0x1d, 0x64, 0xa2, 0x50, 0x8e, 0x35, 0x8b, 0x3d, 0xdf, 0xe5, 0xf5, 0x3e, 0xd6, 0x74, - 0x7c, 0xa8, 0x93, 0xfa, 0x84, 0x1c, 0x7b, 0x3b, 0x72, 0x37, 0x4a, 0xe2, 0xe1, 0x6d, 0x70, 0xd1, - 0x6f, 0x3a, 0x30, 0x71, 0x48, 0x52, 0xe3, 0x24, 0x2f, 0x0a, 0x92, 0x8b, 0x3b, 0x49, 0x00, 0x4a, - 0xdb, 0xc0, 0x9b, 0x60, 0xa6, 0x63, 0xe9, 0x3a, 0x8f, 0xc7, 0x0d, 0xcb, 0x33, 0x69, 0x7d, 0x92, - 0xb3, 0x40, 0x96, 0x43, 0x1b, 0x52, 0x0f, 0x4a, 0x20, 0xe1, 0x8f, 0x01, 0xe8, 0x04, 0x85, 0xc1, - 0xad, 0x83, 0x11, 0x0a, 0x20, 0x5d, 0x96, 0xa2, 0xca, 0x1c, 0x36, 0xb9, 0x28, 0x46, 0xd9, 0xfc, - 0x48, 0x01, 0x8b, 0x39, 0x89, 0x0e, 0xbf, 0x2b, 0x15, 0xc1, 0xab, 0x89, 0x22, 0x78, 0x29, 0xc7, - 0x2c, 0x56, 0x09, 0x8f, 0xc0, 0x34, 0x13, 0x24, 0x9a, 0xd9, 0xf3, 0x21, 0xe2, 0x2c, 0x6b, 0xe5, - 0x4e, 0x00, 0xc5, 0xd1, 0xd1, 0xa9, 0x7c, 0x71, 0x78, 0xb2, 0x3c, 0x2d, 0xf5, 0x21, 0x99, 0xb8, - 0xf9, 0xcb, 0x12, 0x00, 0x9b, 0xc4, 0xd6, 0xad, 0x81, 0x41, 0xcc, 0xf3, 0xd0, 0x34, 0x77, 0x25, - 0x4d, 0xf3, 0x4a, 0xfe, 0x96, 0x84, 0x4e, 0xe5, 0x8a, 0x9a, 0xf7, 0x13, 0xa2, 0xe6, 0xd5, 0x22, - 0x64, 0x4f, 0x57, 0x35, 0x9f, 0x94, 0xc1, 0x5c, 0x04, 0x8e, 0x64, 0xcd, 0x2d, 0x69, 0x47, 0x5f, - 0x49, 0xec, 0xe8, 0x62, 0x86, 0xc9, 0x73, 0xd3, 0x35, 0xcf, 0x5e, 0x5f, 0xc0, 0x0f, 0xc1, 0x0c, - 0x13, 0x32, 0x7e, 0x48, 0x70, 0x99, 0x34, 0x7e, 0x6a, 0x99, 0x14, 0x16, 0xb7, 0x2d, 0x89, 0x09, - 0x25, 0x98, 0x73, 0x64, 0xd9, 0xc4, 0xf3, 0x96, 0x65, 0xcd, 0x3f, 0x28, 0x60, 0x26, 0xda, 0xa6, - 0x73, 0x10, 0x51, 0x77, 0x64, 0x11, 0xf5, 0x52, 0x81, 0xe0, 0xcc, 0x51, 0x51, 0x9f, 0x54, 0xe2, - 0xae, 0x73, 0x19, 0xb5, 0xc2, 0x5e, 0xc1, 0x6c, 0x5d, 0xeb, 0x60, 0x57, 0xd4, 0xdb, 0x17, 0xfc, - 0xd7, 0x2f, 0xbf, 0x0d, 0x85, 0xbd, 0x92, 0xe0, 0x2a, 0x3d, 0x5f, 0xc1, 0x55, 0x7e, 0x36, 0x82, - 0xeb, 0x07, 0xa0, 0xe6, 0x06, 0x52, 0xab, 0xc2, 0x29, 0xaf, 0x16, 0x4a, 0x6c, 0xa1, 0xb2, 0x42, - 0xea, 0x50, 0x5f, 0x85, 0x74, 0x59, 0xca, 0xaa, 0xfa, 0x65, 0x2a, 0x2b, 0x96, 0xcc, 0x36, 0xf6, - 0x5c, 0xd2, 0xe5, 0x19, 0x50, 0x8b, 0x92, 0x79, 0x8f, 0xb7, 0x22, 0xd1, 0x0b, 0x0f, 0xc0, 0xa2, - 0xed, 0x58, 0x3d, 0x87, 0xb8, 0xee, 0x26, 0xc1, 0x5d, 0x5d, 0x33, 0x49, 0x30, 0x01, 0xbf, 0x26, - 0x5e, 0x1a, 0x9e, 0x2c, 0x2f, 0xee, 0x65, 0x43, 0x50, 0x9e, 0x6d, 0xf3, 0xcf, 0x15, 0x70, 0x21, - 0x79, 0x36, 0xe6, 0xc8, 0x14, 0xe5, 0x4c, 0x32, 0xe5, 0x5a, 0x2c, 0x4e, 0x7d, 0x0d, 0x17, 0xbb, - 0x2a, 0x48, 0xc5, 0xea, 0x3a, 0x98, 0x15, 0xb2, 0x24, 0xe8, 0x14, 0x42, 0x2d, 0xdc, 0x9e, 0x03, - 0xb9, 0x1b, 0x25, 0xf1, 0x4c, 0x7c, 0x44, 0x9a, 0x22, 0x20, 0xa9, 0xc8, 0xe2, 0x63, 0x3d, 0x09, - 0x40, 0x69, 0x1b, 0xb8, 0x0d, 0xe6, 0x3c, 0x33, 0x4d, 0xe5, 0x87, 0xcb, 0x25, 0x41, 0x35, 0x77, - 0x90, 0x86, 0xa0, 0x2c, 0x3b, 0xf8, 0x40, 0xd2, 0x23, 0xe3, 0xfc, 0x48, 0xb8, 0x56, 0x20, 0xac, - 0x0b, 0x0b, 0x12, 0x78, 0x0b, 0x4c, 0x3b, 0x5c, 0x73, 0x06, 0xae, 0xfa, 0xba, 0xed, 0x1b, 0xc2, - 0x6c, 0x1a, 0xc5, 0x3b, 0x91, 0x8c, 0xcd, 0x90, 0x5a, 0xb5, 0xa2, 0x52, 0xab, 0xf9, 0x27, 0x05, - 0xc0, 0x74, 0x1e, 0x8e, 0xbc, 0x09, 0x48, 0x59, 0xc4, 0x2a, 0xa6, 0x96, 0xad, 0x7f, 0xae, 0x17, - 0xd4, 0x3f, 0xd1, 0x81, 0x5a, 0x4c, 0x00, 0x89, 0x65, 0x38, 0x9f, 0x4b, 0x9d, 0xa2, 0x02, 0x28, - 0x72, 0xea, 0x19, 0x08, 0xa0, 0x18, 0xd9, 0xd3, 0x05, 0xd0, 0x3f, 0x4b, 0x60, 0x2e, 0x02, 0x17, - 0x16, 0x40, 0x19, 0x26, 0x5f, 0x5f, 0xec, 0x8c, 0xbe, 0xd8, 0x61, 0xa2, 0x24, 0x5a, 0xba, 0xff, - 0x27, 0x51, 0x12, 0x79, 0x95, 0x23, 0x4a, 0x7e, 0x57, 0x8a, 0xbb, 0xfe, 0x95, 0x17, 0x25, 0x5f, - 0xfc, 0x4e, 0xa6, 0xf9, 0x97, 0x32, 0xb8, 0x90, 0xcc, 0x43, 0xa9, 0x40, 0x2a, 0x23, 0x0b, 0xe4, - 0x1e, 0x98, 0x7f, 0xe8, 0xe9, 0xfa, 0x80, 0x2f, 0x43, 0xac, 0x4a, 0xfa, 0xa5, 0xf5, 0x9b, 0xc2, - 0x72, 0xfe, 0xfb, 0x19, 0x18, 0x94, 0x69, 0x99, 0x53, 0xec, 0xcb, 0x67, 0x2a, 0xf6, 0xa9, 0x0a, - 0x54, 0x39, 0x45, 0x05, 0xca, 0x2c, 0xdc, 0xd5, 0x33, 0x14, 0xee, 0xd3, 0x55, 0xda, 0x8c, 0x83, - 0x6b, 0xe4, 0xab, 0xff, 0xcf, 0x15, 0xb0, 0x90, 0xfd, 0xc2, 0x0d, 0x75, 0x30, 0x63, 0xe0, 0x47, - 0xf1, 0x8b, 0x8f, 0x51, 0x45, 0xc4, 0xa3, 0x9a, 0xae, 0xfa, 0x9f, 0x8c, 0xd4, 0xbb, 0x26, 0xdd, - 0x75, 0xf6, 0xa9, 0xa3, 0x99, 0x3d, 0xbf, 0xf2, 0x6e, 0x4b, 0x5c, 0x28, 0xc1, 0xdd, 0xfc, 0x4c, - 0x01, 0x8b, 0x39, 0x95, 0xef, 0x7c, 0x3d, 0x81, 0x1f, 0x80, 0x9a, 0x81, 0x1f, 0xed, 0x7b, 0x4e, - 0x2f, 0xab, 0x56, 0x17, 0x1b, 0x87, 0x67, 0xf3, 0xb6, 0x60, 0x41, 0x21, 0x5f, 0x73, 0x17, 0x5c, - 0x96, 0x26, 0xc9, 0x32, 0x87, 0x3c, 0xf4, 0x74, 0x9e, 0x44, 0x42, 0x6c, 0x5c, 0x05, 0x93, 0x36, - 0x76, 0xa8, 0x16, 0x4a, 0xd5, 0x6a, 0x7b, 0x7a, 0x78, 0xb2, 0x3c, 0xb9, 0x17, 0x34, 0xa2, 0xa8, - 0xbf, 0xf9, 0x5f, 0x05, 0x54, 0xf7, 0x3b, 0x58, 0x27, 0xe7, 0x50, 0xed, 0x37, 0xa5, 0x6a, 0x9f, - 0x7f, 0x93, 0xce, 0xfd, 0xc9, 0x2d, 0xf4, 0x5b, 0x89, 0x42, 0xff, 0xf2, 0x08, 0x9e, 0xa7, 0xd7, - 0xf8, 0x77, 0xc0, 0x64, 0x38, 0xdc, 0xe9, 0x0e, 0xa0, 0xe6, 0x6f, 0x4b, 0x60, 0x2a, 0x36, 0xc4, - 0x29, 0x8f, 0xaf, 0x07, 0xd2, 0xb1, 0xcf, 0x12, 0x73, 0xad, 0xc8, 0x44, 0xd4, 0xe0, 0x88, 0x7f, - 0xd7, 0xa4, 0x4e, 0xfc, 0x05, 0x2f, 0x7d, 0xf2, 0x7f, 0x07, 0xcc, 0x50, 0xec, 0xf4, 0x08, 0x0d, - 0xfa, 0xf8, 0x82, 0x4d, 0x46, 0xb7, 0x13, 0xf7, 0xa4, 0x5e, 0x94, 0x40, 0x2f, 0xdd, 0x02, 0xd3, - 0xd2, 0x60, 0xf0, 0x02, 0x28, 0x1f, 0x93, 0x81, 0x2f, 0x7b, 0x10, 0xfb, 0x09, 0xe7, 0x41, 0xb5, - 0x8f, 0x75, 0xcf, 0x8f, 0xf3, 0x49, 0xe4, 0x3f, 0xdc, 0x2c, 0xbd, 0xad, 0x34, 0x7f, 0xc5, 0x16, - 0x27, 0x0a, 0xce, 0x73, 0x88, 0xae, 0xf7, 0xa4, 0xe8, 0xca, 0xff, 0xa8, 0x17, 0x4f, 0x99, 0xbc, - 0x18, 0x43, 0x89, 0x18, 0x7b, 0xad, 0x10, 0xdb, 0xd3, 0x23, 0xed, 0x5f, 0x25, 0x30, 0x1f, 0x43, - 0x47, 0x72, 0xf2, 0xdb, 0x92, 0x9c, 0x5c, 0x49, 0xc8, 0xc9, 0x7a, 0x96, 0xcd, 0xd7, 0x7a, 0x72, - 0xb4, 0x9e, 0xfc, 0xa3, 0x02, 0x66, 0x63, 0x6b, 0x77, 0x0e, 0x82, 0xf2, 0xae, 0x2c, 0x28, 0x5f, - 0x2e, 0x12, 0x34, 0x39, 0x8a, 0xf2, 0xaf, 0x55, 0xc9, 0xf9, 0xaf, 0xbc, 0xa4, 0xfc, 0x29, 0x98, - 0xef, 0x5b, 0xba, 0x67, 0x90, 0x0d, 0x1d, 0x6b, 0x46, 0x00, 0x60, 0xaa, 0xa9, 0x9c, 0x7c, 0x97, - 0x0b, 0xe9, 0x89, 0xe3, 0x6a, 0x2e, 0x25, 0x26, 0xbd, 0x1f, 0x59, 0x46, 0xba, 0xef, 0x7e, 0x06, - 0x1d, 0xca, 0x1c, 0x04, 0xbe, 0x09, 0xa6, 0x98, 0x7e, 0xd3, 0x3a, 0x64, 0x07, 0x1b, 0x41, 0x60, - 0x85, 0x9f, 0xb0, 0xf6, 0xa3, 0x2e, 0x14, 0xc7, 0xc1, 0x23, 0x30, 0x67, 0x5b, 0xdd, 0x6d, 0x6c, - 0xe2, 0x1e, 0x61, 0x32, 0x63, 0xcf, 0xd2, 0xb5, 0xce, 0x80, 0x5f, 0x7e, 0x4d, 0xb6, 0xdf, 0x0a, - 0x6e, 0x45, 0xf6, 0xd2, 0x10, 0xf6, 0x92, 0x98, 0xd1, 0xcc, 0x93, 0x3a, 0x8b, 0x12, 0x3a, 0xa9, - 0xcf, 0xae, 0xfe, 0x1d, 0xf1, 0x5a, 0x91, 0x08, 0x3b, 0xe3, 0x87, 0xd7, 0xbc, 0xbb, 0xbd, 0xda, - 0x99, 0xbe, 0x9a, 0xfe, 0xbb, 0x02, 0x2e, 0xa6, 0x8e, 0xca, 0x2f, 0xf1, 0x76, 0x2d, 0x25, 0xcf, - 0xcb, 0xa7, 0x90, 0xe7, 0xeb, 0x60, 0x56, 0x7c, 0xb0, 0x4d, 0xa8, 0xfb, 0xf0, 0xfd, 0x67, 0x43, - 0xee, 0x46, 0x49, 0x7c, 0xd6, 0xed, 0x5e, 0xf5, 0x94, 0xb7, 0x7b, 0x71, 0x2f, 0xc4, 0x1f, 0x90, - 0xfc, 0xd0, 0x4b, 0x7b, 0x21, 0xfe, 0x87, 0x94, 0xc4, 0x33, 0x85, 0xe0, 0xb3, 0x86, 0x0c, 0x13, - 0xb2, 0x42, 0x38, 0x90, 0x7a, 0x51, 0x02, 0xfd, 0x85, 0x3e, 0x4a, 0xe2, 0x8c, 0x8f, 0x92, 0xab, - 0x45, 0xe2, 0xb9, 0xf8, 0xbb, 0xc9, 0xdf, 0x14, 0xf0, 0x62, 0x6e, 0x22, 0xc0, 0x75, 0xa9, 0xec, - 0xae, 0x26, 0xca, 0xee, 0xb7, 0x72, 0x0d, 0x63, 0xb5, 0xd7, 0xc9, 0xbe, 0x9a, 0x7b, 0xa7, 0xd8, - 0xd5, 0x5c, 0x86, 0x76, 0x1f, 0x7d, 0x47, 0xd7, 0x5e, 0x7d, 0xfc, 0xa4, 0x31, 0xf6, 0xf1, 0x93, - 0xc6, 0xd8, 0xa7, 0x4f, 0x1a, 0x63, 0x3f, 0x1b, 0x36, 0x94, 0xc7, 0xc3, 0x86, 0xf2, 0xf1, 0xb0, - 0xa1, 0x7c, 0x3a, 0x6c, 0x28, 0x7f, 0x1f, 0x36, 0x94, 0x5f, 0x7f, 0xd6, 0x18, 0xfb, 0x60, 0x42, - 0x8c, 0xf8, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x80, 0x85, 0x43, 0x0a, 0xec, 0x28, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index 39e07e278..d358455f0 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -141,13 +141,13 @@ const ( // ordering constraints. When a scale operation is performed with this // strategy, new Pods will be created from the specification version indicated // by the StatefulSet's updateRevision. - RollingUpdateStatefulSetStrategyType = "RollingUpdate" + RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate" // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version // tracking and ordered rolling restarts are disabled. Pods are recreated // from the StatefulSetSpec when they are manually deleted. When a scale // operation is performed with this strategy,specification version indicated // by the StatefulSet's currentRevision. - OnDeleteStatefulSetStrategyType = "OnDelete" + OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete" ) // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go index 8a0bad22e..127bf095f 100644 --- a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go @@ -58,7 +58,7 @@ func (in *ControllerRevision) DeepCopyObject() runtime.Object { func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ControllerRevision, len(*in)) @@ -136,7 +136,7 @@ func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DaemonSet, len(*in)) @@ -292,7 +292,7 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -457,7 +457,7 @@ func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicaSet, len(*in)) @@ -720,7 +720,7 @@ func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StatefulSet, len(*in)) diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go index 399d92b38..003cc30bf 100644 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go @@ -17,32 +17,20 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto - - It has these top-level messages: - AuditSink - AuditSinkList - AuditSinkSpec - Policy - ServiceReference - Webhook - WebhookClientConfig - WebhookThrottleConfig -*/ package v1alpha1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import strings "strings" -import reflect "reflect" + io "io" -import io "io" + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -55,37 +43,229 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AuditSink) Reset() { *m = AuditSink{} } -func (*AuditSink) ProtoMessage() {} -func (*AuditSink) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AuditSink) Reset() { *m = AuditSink{} } +func (*AuditSink) ProtoMessage() {} +func (*AuditSink) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{0} +} +func (m *AuditSink) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditSink) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditSink) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditSink.Merge(m, src) +} +func (m *AuditSink) XXX_Size() int { + return m.Size() +} +func (m *AuditSink) XXX_DiscardUnknown() { + xxx_messageInfo_AuditSink.DiscardUnknown(m) +} -func (m *AuditSinkList) Reset() { *m = AuditSinkList{} } -func (*AuditSinkList) ProtoMessage() {} -func (*AuditSinkList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_AuditSink proto.InternalMessageInfo -func (m *AuditSinkSpec) Reset() { *m = AuditSinkSpec{} } -func (*AuditSinkSpec) ProtoMessage() {} -func (*AuditSinkSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *AuditSinkList) Reset() { *m = AuditSinkList{} } +func (*AuditSinkList) ProtoMessage() {} +func (*AuditSinkList) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{1} +} +func (m *AuditSinkList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditSinkList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditSinkList) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditSinkList.Merge(m, src) +} +func (m *AuditSinkList) XXX_Size() int { + return m.Size() +} +func (m *AuditSinkList) XXX_DiscardUnknown() { + xxx_messageInfo_AuditSinkList.DiscardUnknown(m) +} -func (m *Policy) Reset() { *m = Policy{} } -func (*Policy) ProtoMessage() {} -func (*Policy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_AuditSinkList proto.InternalMessageInfo -func (m *ServiceReference) Reset() { *m = ServiceReference{} } -func (*ServiceReference) ProtoMessage() {} -func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *AuditSinkSpec) Reset() { *m = AuditSinkSpec{} } +func (*AuditSinkSpec) ProtoMessage() {} +func (*AuditSinkSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{2} +} +func (m *AuditSinkSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditSinkSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditSinkSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditSinkSpec.Merge(m, src) +} +func (m *AuditSinkSpec) XXX_Size() int { + return m.Size() +} +func (m *AuditSinkSpec) XXX_DiscardUnknown() { + xxx_messageInfo_AuditSinkSpec.DiscardUnknown(m) +} -func (m *Webhook) Reset() { *m = Webhook{} } -func (*Webhook) ProtoMessage() {} -func (*Webhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_AuditSinkSpec proto.InternalMessageInfo -func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } -func (*WebhookClientConfig) ProtoMessage() {} -func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *Policy) Reset() { *m = Policy{} } +func (*Policy) ProtoMessage() {} +func (*Policy) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{3} +} +func (m *Policy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Policy) XXX_Merge(src proto.Message) { + xxx_messageInfo_Policy.Merge(m, src) +} +func (m *Policy) XXX_Size() int { + return m.Size() +} +func (m *Policy) XXX_DiscardUnknown() { + xxx_messageInfo_Policy.DiscardUnknown(m) +} -func (m *WebhookThrottleConfig) Reset() { *m = WebhookThrottleConfig{} } -func (*WebhookThrottleConfig) ProtoMessage() {} -func (*WebhookThrottleConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_Policy proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{4} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo + +func (m *Webhook) Reset() { *m = Webhook{} } +func (*Webhook) ProtoMessage() {} +func (*Webhook) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{5} +} +func (m *Webhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Webhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Webhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_Webhook.Merge(m, src) +} +func (m *Webhook) XXX_Size() int { + return m.Size() +} +func (m *Webhook) XXX_DiscardUnknown() { + xxx_messageInfo_Webhook.DiscardUnknown(m) +} + +var xxx_messageInfo_Webhook proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{6} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo + +func (m *WebhookThrottleConfig) Reset() { *m = WebhookThrottleConfig{} } +func (*WebhookThrottleConfig) ProtoMessage() {} +func (*WebhookThrottleConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{7} +} +func (m *WebhookThrottleConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookThrottleConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookThrottleConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookThrottleConfig.Merge(m, src) +} +func (m *WebhookThrottleConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookThrottleConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookThrottleConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookThrottleConfig proto.InternalMessageInfo func init() { proto.RegisterType((*AuditSink)(nil), "k8s.io.api.auditregistration.v1alpha1.AuditSink") @@ -97,10 +277,67 @@ func init() { proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.auditregistration.v1alpha1.WebhookClientConfig") proto.RegisterType((*WebhookThrottleConfig)(nil), "k8s.io.api.auditregistration.v1alpha1.WebhookThrottleConfig") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto", fileDescriptor_642d3597c6afa8ba) +} + +var fileDescriptor_642d3597c6afa8ba = []byte{ + // 765 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x41, 0x6f, 0x13, 0x47, + 0x14, 0xf6, 0xc6, 0x76, 0x6c, 0x4f, 0x9c, 0x36, 0x9d, 0xb4, 0x95, 0x1b, 0x55, 0x6b, 0x6b, 0xa5, + 0x4a, 0x91, 0xda, 0xcc, 0x36, 0x55, 0xd4, 0x56, 0x88, 0x4b, 0x36, 0x27, 0xa4, 0x10, 0xc2, 0x98, + 0x80, 0x40, 0x08, 0x31, 0x5e, 0x3f, 0xef, 0x0e, 0xb6, 0x77, 0x97, 0xdd, 0x59, 0xa3, 0xdc, 0xf8, + 0x09, 0xfc, 0x05, 0xfe, 0x06, 0x37, 0x24, 0x90, 0x72, 0xcc, 0x31, 0xa7, 0x88, 0x98, 0x03, 0xff, + 0x81, 0x13, 0x9a, 0xd9, 0x59, 0xdb, 0xc4, 0x41, 0x38, 0xb7, 0x79, 0xdf, 0x7b, 0xdf, 0xf7, 0xbe, + 0xf7, 0xde, 0xa0, 0x83, 0xfe, 0xff, 0x09, 0xe1, 0xa1, 0xdd, 0x4f, 0x3b, 0x10, 0x07, 0x20, 0x20, + 0xb1, 0x47, 0x10, 0x74, 0xc3, 0xd8, 0xd6, 0x09, 0x16, 0x71, 0x9b, 0xa5, 0x5d, 0x2e, 0x62, 0xf0, + 0x78, 0x22, 0x62, 0x26, 0x78, 0x18, 0xd8, 0xa3, 0x6d, 0x36, 0x88, 0x7c, 0xb6, 0x6d, 0x7b, 0x10, + 0x40, 0xcc, 0x04, 0x74, 0x49, 0x14, 0x87, 0x22, 0xc4, 0x7f, 0x64, 0x34, 0xc2, 0x22, 0x4e, 0xe6, + 0x68, 0x24, 0xa7, 0x6d, 0x6c, 0x79, 0x5c, 0xf8, 0x69, 0x87, 0xb8, 0xe1, 0xd0, 0xf6, 0x42, 0x2f, + 0xb4, 0x15, 0xbb, 0x93, 0xf6, 0x54, 0xa4, 0x02, 0xf5, 0xca, 0x54, 0x37, 0x76, 0xa6, 0x66, 0x86, + 0xcc, 0xf5, 0x79, 0x00, 0xf1, 0xb1, 0x1d, 0xf5, 0x3d, 0x09, 0x24, 0xf6, 0x10, 0x04, 0xb3, 0x47, + 0x73, 0x5e, 0x36, 0xec, 0x6f, 0xb1, 0xe2, 0x34, 0x10, 0x7c, 0x08, 0x73, 0x84, 0x7f, 0xbf, 0x47, + 0x48, 0x5c, 0x1f, 0x86, 0xec, 0x32, 0xcf, 0x7a, 0x6f, 0xa0, 0xda, 0xae, 0x1c, 0xb6, 0xcd, 0x83, + 0x3e, 0x7e, 0x8a, 0xaa, 0xd2, 0x51, 0x97, 0x09, 0xd6, 0x30, 0x5a, 0xc6, 0xe6, 0xca, 0x3f, 0x7f, + 0x93, 0xe9, 0x56, 0x26, 0xc2, 0x24, 0xea, 0x7b, 0x12, 0x48, 0x88, 0xac, 0x26, 0xa3, 0x6d, 0x72, + 0xa7, 0xf3, 0x0c, 0x5c, 0x71, 0x1b, 0x04, 0x73, 0xf0, 0xc9, 0x79, 0xb3, 0x30, 0x3e, 0x6f, 0xa2, + 0x29, 0x46, 0x27, 0xaa, 0xf8, 0x3e, 0x2a, 0x25, 0x11, 0xb8, 0x8d, 0x25, 0xa5, 0xbe, 0x43, 0x16, + 0xda, 0x39, 0x99, 0x38, 0x6c, 0x47, 0xe0, 0x3a, 0x75, 0xdd, 0xa1, 0x24, 0x23, 0xaa, 0xf4, 0xac, + 0x77, 0x06, 0x5a, 0x9d, 0x54, 0xed, 0xf3, 0x44, 0xe0, 0xc7, 0x73, 0xb3, 0x90, 0xc5, 0x66, 0x91, + 0x6c, 0x35, 0xc9, 0x9a, 0xee, 0x53, 0xcd, 0x91, 0x99, 0x39, 0x8e, 0x50, 0x99, 0x0b, 0x18, 0x26, + 0x8d, 0xa5, 0x56, 0xf1, 0xd2, 0x9a, 0x16, 0x1a, 0xc4, 0x59, 0xd5, 0xe2, 0xe5, 0x5b, 0x52, 0x86, + 0x66, 0x6a, 0xd6, 0xdb, 0xd9, 0x31, 0xe4, 0x78, 0xf8, 0x08, 0x2d, 0x47, 0xe1, 0x80, 0xbb, 0xc7, + 0x7a, 0x88, 0xad, 0x05, 0x3b, 0x1d, 0x2a, 0x92, 0xf3, 0x83, 0x6e, 0xb3, 0x9c, 0xc5, 0x54, 0x8b, + 0xe1, 0x87, 0xa8, 0xf2, 0x02, 0x3a, 0x7e, 0x18, 0xf6, 0xf5, 0x29, 0xc8, 0x82, 0xba, 0x0f, 0x32, + 0x96, 0xf3, 0xa3, 0x16, 0xae, 0x68, 0x80, 0xe6, 0x7a, 0x96, 0x8b, 0x74, 0x33, 0xfc, 0x17, 0x2a, + 0x0f, 0x60, 0x04, 0x03, 0x65, 0xbd, 0xe6, 0xfc, 0x9a, 0x8f, 0xbc, 0x2f, 0xc1, 0xcf, 0xf9, 0x83, + 0x66, 0x45, 0xf8, 0x4f, 0xb4, 0x9c, 0x08, 0xe6, 0x41, 0xb6, 0xd3, 0x9a, 0xb3, 0x2e, 0x6d, 0xb7, + 0x15, 0x22, 0x6b, 0xd5, 0x8b, 0xea, 0x12, 0xeb, 0xb5, 0x81, 0xd6, 0xda, 0x10, 0x8f, 0xb8, 0x0b, + 0x14, 0x7a, 0x10, 0x43, 0xe0, 0x02, 0xb6, 0x51, 0x2d, 0x60, 0x43, 0x48, 0x22, 0xe6, 0x82, 0xee, + 0xf9, 0x93, 0xee, 0x59, 0x3b, 0xc8, 0x13, 0x74, 0x5a, 0x83, 0x5b, 0xa8, 0x24, 0x03, 0xb5, 0x82, + 0xda, 0xf4, 0x5f, 0xc9, 0x5a, 0xaa, 0x32, 0xf8, 0x77, 0x54, 0x8a, 0x98, 0xf0, 0x1b, 0x45, 0x55, + 0x51, 0x95, 0xd9, 0x43, 0x26, 0x7c, 0xaa, 0x50, 0x95, 0x0d, 0x63, 0xd1, 0x28, 0xb5, 0x8c, 0xcd, + 0xb2, 0xce, 0x86, 0xb1, 0xa0, 0x0a, 0xb5, 0x3e, 0x19, 0x28, 0xdf, 0x0e, 0xee, 0xa1, 0xaa, 0xf0, + 0xe3, 0x50, 0x88, 0x01, 0xe8, 0x43, 0xde, 0xbc, 0xde, 0xc2, 0xef, 0x69, 0xf6, 0x5e, 0x18, 0xf4, + 0xb8, 0xe7, 0xd4, 0xe5, 0xbf, 0xcc, 0x31, 0x3a, 0xd1, 0xc6, 0x02, 0xd5, 0xdd, 0x01, 0x87, 0x40, + 0x64, 0x75, 0xfa, 0xb8, 0x37, 0xae, 0xd7, 0x6b, 0x6f, 0x46, 0xc1, 0xf9, 0x59, 0x6f, 0xa5, 0x3e, + 0x8b, 0xd2, 0xaf, 0xba, 0x58, 0x6f, 0x0c, 0xb4, 0x7e, 0x05, 0x17, 0xff, 0x86, 0x8a, 0x69, 0x9c, + 0x9f, 0xbf, 0x32, 0x3e, 0x6f, 0x16, 0x8f, 0xe8, 0x3e, 0x95, 0x18, 0x7e, 0x82, 0x2a, 0x49, 0x76, + 0x3f, 0xed, 0xf1, 0xbf, 0x05, 0x3d, 0x5e, 0xbe, 0xba, 0xb3, 0x22, 0x7f, 0x61, 0x8e, 0xe6, 0xa2, + 0x78, 0x13, 0x55, 0x5d, 0xe6, 0xa4, 0x41, 0x77, 0x00, 0xea, 0x78, 0xf5, 0x6c, 0x65, 0x7b, 0xbb, + 0x19, 0x46, 0x27, 0x59, 0xab, 0x8d, 0x7e, 0xb9, 0x72, 0xc7, 0xd2, 0xfd, 0xf3, 0x28, 0x51, 0xee, + 0x8b, 0x99, 0xfb, 0xbb, 0x87, 0x6d, 0x2a, 0x31, 0xdc, 0x44, 0xe5, 0x4e, 0x1a, 0x27, 0x42, 0x79, + 0x2f, 0x3a, 0x35, 0xf9, 0xab, 0x1d, 0x09, 0xd0, 0x0c, 0x77, 0xc8, 0xc9, 0x85, 0x59, 0x38, 0xbd, + 0x30, 0x0b, 0x67, 0x17, 0x66, 0xe1, 0xe5, 0xd8, 0x34, 0x4e, 0xc6, 0xa6, 0x71, 0x3a, 0x36, 0x8d, + 0xb3, 0xb1, 0x69, 0x7c, 0x18, 0x9b, 0xc6, 0xab, 0x8f, 0x66, 0xe1, 0x51, 0x35, 0x9f, 0xea, 0x4b, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x0a, 0x6c, 0xff, 0x86, 0xcd, 0x06, 0x00, 0x00, +} + func (m *AuditSink) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -108,33 +345,42 @@ func (m *AuditSink) Marshal() (dAtA []byte, err error) { } func (m *AuditSink) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditSink) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AuditSinkList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -142,37 +388,46 @@ func (m *AuditSinkList) Marshal() (dAtA []byte, err error) { } func (m *AuditSinkList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditSinkList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AuditSinkSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -180,33 +435,42 @@ func (m *AuditSinkSpec) Marshal() (dAtA []byte, err error) { } func (m *AuditSinkSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditSinkSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Policy.Size())) - n4, err := m.Policy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Webhook.Size())) - n5, err := m.Webhook.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Policy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Policy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -214,36 +478,36 @@ func (m *Policy) Marshal() (dAtA []byte, err error) { } func (m *Policy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Policy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) - i += copy(dAtA[i:], m.Level) if len(m.Stages) > 0 { - for _, s := range m.Stages { + for iNdEx := len(m.Stages) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Stages[iNdEx]) + copy(dAtA[i:], m.Stages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Stages[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.Level) + copy(dAtA[i:], m.Level) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ServiceReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -251,31 +515,44 @@ func (m *ServiceReference) Marshal() (dAtA []byte, err error) { } func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } if m.Path != nil { - dAtA[i] = 0x1a - i++ + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) - i += copy(dAtA[i:], *m.Path) + i-- + dAtA[i] = 0x1a } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Webhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -283,35 +560,44 @@ func (m *Webhook) Marshal() (dAtA []byte, err error) { } func (m *Webhook) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Webhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Throttle != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Throttle.Size())) - n6, err := m.Throttle.MarshalTo(dAtA[i:]) + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n6 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) - n7, err := m.ClientConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Throttle != nil { + { + size, err := m.Throttle.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n7 - return i, nil + return len(dAtA) - i, nil } func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -319,39 +605,48 @@ func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { } func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.URL != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i += copy(dAtA[i:], *m.URL) + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x1a } if m.Service != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) - n8, err := m.Service.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0x12 } - if m.CABundle != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i += copy(dAtA[i:], m.CABundle) + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *WebhookThrottleConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -359,33 +654,43 @@ func (m *WebhookThrottleConfig) Marshal() (dAtA []byte, err error) { } func (m *WebhookThrottleConfig) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookThrottleConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.QPS != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.QPS)) - } if m.Burst != nil { - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Burst)) + i-- + dAtA[i] = 0x10 } - return i, nil + if m.QPS != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.QPS)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AuditSink) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -396,6 +701,9 @@ func (m *AuditSink) Size() (n int) { } func (m *AuditSinkList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -410,6 +718,9 @@ func (m *AuditSinkList) Size() (n int) { } func (m *AuditSinkSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Policy.Size() @@ -420,6 +731,9 @@ func (m *AuditSinkSpec) Size() (n int) { } func (m *Policy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Level) @@ -434,6 +748,9 @@ func (m *Policy) Size() (n int) { } func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -444,10 +761,16 @@ func (m *ServiceReference) Size() (n int) { l = len(*m.Path) n += 1 + l + sovGenerated(uint64(l)) } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } return n } func (m *Webhook) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Throttle != nil { @@ -460,6 +783,9 @@ func (m *Webhook) Size() (n int) { } func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.URL != nil { @@ -478,6 +804,9 @@ func (m *WebhookClientConfig) Size() (n int) { } func (m *WebhookThrottleConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.QPS != nil { @@ -490,14 +819,7 @@ func (m *WebhookThrottleConfig) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -507,7 +829,7 @@ func (this *AuditSink) String() string { return "nil" } s := strings.Join([]string{`&AuditSink{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "AuditSinkSpec", "AuditSinkSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -517,9 +839,14 @@ func (this *AuditSinkList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]AuditSink{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "AuditSink", "AuditSink", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&AuditSinkList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "AuditSink", "AuditSink", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -554,6 +881,7 @@ func (this *ServiceReference) String() string { `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, `}`, }, "") return s @@ -563,7 +891,7 @@ func (this *Webhook) String() string { return "nil" } s := strings.Join([]string{`&Webhook{`, - `Throttle:` + strings.Replace(fmt.Sprintf("%v", this.Throttle), "WebhookThrottleConfig", "WebhookThrottleConfig", 1) + `,`, + `Throttle:` + strings.Replace(this.Throttle.String(), "WebhookThrottleConfig", "WebhookThrottleConfig", 1) + `,`, `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -575,7 +903,7 @@ func (this *WebhookClientConfig) String() string { } s := strings.Join([]string{`&WebhookClientConfig{`, `URL:` + valueToStringGenerated(this.URL) + `,`, - `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "ServiceReference", "ServiceReference", 1) + `,`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, `}`, }, "") @@ -615,7 +943,7 @@ func (m *AuditSink) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -643,7 +971,7 @@ func (m *AuditSink) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -652,6 +980,9 @@ func (m *AuditSink) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -673,7 +1004,7 @@ func (m *AuditSink) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -682,6 +1013,9 @@ func (m *AuditSink) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -698,6 +1032,9 @@ func (m *AuditSink) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -725,7 +1062,7 @@ func (m *AuditSinkList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -753,7 +1090,7 @@ func (m *AuditSinkList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -762,6 +1099,9 @@ func (m *AuditSinkList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -783,7 +1123,7 @@ func (m *AuditSinkList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -792,6 +1132,9 @@ func (m *AuditSinkList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -809,6 +1152,9 @@ func (m *AuditSinkList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -836,7 +1182,7 @@ func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -864,7 +1210,7 @@ func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -873,6 +1219,9 @@ func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -894,7 +1243,7 @@ func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -903,6 +1252,9 @@ func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -919,6 +1271,9 @@ func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -946,7 +1301,7 @@ func (m *Policy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -974,7 +1329,7 @@ func (m *Policy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -984,6 +1339,9 @@ func (m *Policy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1003,7 +1361,7 @@ func (m *Policy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1013,6 +1371,9 @@ func (m *Policy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1027,6 +1388,9 @@ func (m *Policy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1054,7 +1418,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1082,7 +1446,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1092,6 +1456,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1111,7 +1478,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1121,6 +1488,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1140,7 +1510,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1150,12 +1520,35 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) m.Path = &s iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1165,6 +1558,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1192,7 +1588,7 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1220,7 +1616,7 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1229,6 +1625,9 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1253,7 +1652,7 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1262,6 +1661,9 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1278,6 +1680,9 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1305,7 +1710,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1333,7 +1738,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1343,6 +1748,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1363,7 +1771,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1372,6 +1780,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1396,7 +1807,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1405,6 +1816,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1422,6 +1836,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1449,7 +1866,7 @@ func (m *WebhookThrottleConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1477,7 +1894,7 @@ func (m *WebhookThrottleConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -1497,7 +1914,7 @@ func (m *WebhookThrottleConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -1512,6 +1929,9 @@ func (m *WebhookThrottleConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1578,10 +1998,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1610,6 +2033,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1628,58 +2054,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 747 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x41, 0x6f, 0xd3, 0x48, - 0x14, 0x8e, 0x9b, 0xa4, 0x49, 0xa6, 0xe9, 0x6e, 0x77, 0xba, 0xbb, 0xca, 0x56, 0x2b, 0xa7, 0xb2, - 0xb4, 0x52, 0xa5, 0xdd, 0x8e, 0xb7, 0xa8, 0x02, 0x84, 0xb8, 0xd4, 0x3d, 0x21, 0x95, 0x52, 0x26, - 0x14, 0x04, 0x42, 0x88, 0x89, 0xf3, 0x62, 0x0f, 0x49, 0x6c, 0x63, 0x8f, 0x83, 0x7a, 0x43, 0xe2, - 0x0f, 0xf0, 0x7b, 0xb8, 0x21, 0x81, 0xd4, 0x63, 0x8f, 0x3d, 0x55, 0x34, 0x1c, 0xf8, 0x0f, 0x9c, - 0xd0, 0x8c, 0xc7, 0x49, 0x68, 0x8a, 0x48, 0x6f, 0x33, 0xdf, 0xbc, 0xef, 0x7b, 0xdf, 0xf7, 0xde, - 0xa0, 0xfd, 0xde, 0xcd, 0x84, 0xf0, 0xd0, 0xee, 0xa5, 0x6d, 0x88, 0x03, 0x10, 0x90, 0xd8, 0x43, - 0x08, 0x3a, 0x61, 0x6c, 0xeb, 0x07, 0x16, 0x71, 0x9b, 0xa5, 0x1d, 0x2e, 0x62, 0xf0, 0x78, 0x22, - 0x62, 0x26, 0x78, 0x18, 0xd8, 0xc3, 0x2d, 0xd6, 0x8f, 0x7c, 0xb6, 0x65, 0x7b, 0x10, 0x40, 0xcc, - 0x04, 0x74, 0x48, 0x14, 0x87, 0x22, 0xc4, 0xff, 0x64, 0x34, 0xc2, 0x22, 0x4e, 0x66, 0x68, 0x24, - 0xa7, 0xad, 0x6d, 0x7a, 0x5c, 0xf8, 0x69, 0x9b, 0xb8, 0xe1, 0xc0, 0xf6, 0x42, 0x2f, 0xb4, 0x15, - 0xbb, 0x9d, 0x76, 0xd5, 0x4d, 0x5d, 0xd4, 0x29, 0x53, 0x5d, 0xdb, 0x9e, 0x98, 0x19, 0x30, 0xd7, - 0xe7, 0x01, 0xc4, 0x47, 0x76, 0xd4, 0xf3, 0x24, 0x90, 0xd8, 0x03, 0x10, 0xcc, 0x1e, 0xce, 0x78, - 0x59, 0xb3, 0x7f, 0xc4, 0x8a, 0xd3, 0x40, 0xf0, 0x01, 0xcc, 0x10, 0xae, 0xff, 0x8c, 0x90, 0xb8, - 0x3e, 0x0c, 0xd8, 0x45, 0x9e, 0xf5, 0xd1, 0x40, 0xb5, 0x1d, 0x19, 0xb6, 0xc5, 0x83, 0x1e, 0x7e, - 0x8e, 0xaa, 0xd2, 0x51, 0x87, 0x09, 0xd6, 0x30, 0xd6, 0x8d, 0x8d, 0xa5, 0x6b, 0xff, 0x93, 0xc9, - 0x54, 0xc6, 0xc2, 0x24, 0xea, 0x79, 0x12, 0x48, 0x88, 0xac, 0x26, 0xc3, 0x2d, 0x72, 0xaf, 0xfd, - 0x02, 0x5c, 0x71, 0x17, 0x04, 0x73, 0xf0, 0xf1, 0x59, 0xb3, 0x30, 0x3a, 0x6b, 0xa2, 0x09, 0x46, - 0xc7, 0xaa, 0xf8, 0x21, 0x2a, 0x25, 0x11, 0xb8, 0x8d, 0x05, 0xa5, 0xbe, 0x4d, 0xe6, 0x9a, 0x39, - 0x19, 0x3b, 0x6c, 0x45, 0xe0, 0x3a, 0x75, 0xdd, 0xa1, 0x24, 0x6f, 0x54, 0xe9, 0x59, 0x1f, 0x0c, - 0xb4, 0x3c, 0xae, 0xda, 0xe3, 0x89, 0xc0, 0x4f, 0x67, 0xb2, 0x90, 0xf9, 0xb2, 0x48, 0xb6, 0x4a, - 0xb2, 0xa2, 0xfb, 0x54, 0x73, 0x64, 0x2a, 0xc7, 0x21, 0x2a, 0x73, 0x01, 0x83, 0xa4, 0xb1, 0xb0, - 0x5e, 0xbc, 0x30, 0xa6, 0xb9, 0x82, 0x38, 0xcb, 0x5a, 0xbc, 0x7c, 0x47, 0xca, 0xd0, 0x4c, 0xcd, - 0x7a, 0x3f, 0x1d, 0x43, 0xc6, 0xc3, 0x87, 0x68, 0x31, 0x0a, 0xfb, 0xdc, 0x3d, 0xd2, 0x21, 0x36, - 0xe7, 0xec, 0x74, 0xa0, 0x48, 0xce, 0x2f, 0xba, 0xcd, 0x62, 0x76, 0xa7, 0x5a, 0x0c, 0x3f, 0x46, - 0x95, 0x57, 0xd0, 0xf6, 0xc3, 0xb0, 0xa7, 0x57, 0x41, 0xe6, 0xd4, 0x7d, 0x94, 0xb1, 0x9c, 0x5f, - 0xb5, 0x70, 0x45, 0x03, 0x34, 0xd7, 0xb3, 0x5c, 0xa4, 0x9b, 0xe1, 0xff, 0x50, 0xb9, 0x0f, 0x43, - 0xe8, 0x2b, 0xeb, 0x35, 0xe7, 0xcf, 0x3c, 0xf2, 0x9e, 0x04, 0xbf, 0xe6, 0x07, 0x9a, 0x15, 0xe1, - 0x7f, 0xd1, 0x62, 0x22, 0x98, 0x07, 0xd9, 0x4c, 0x6b, 0xce, 0xaa, 0xb4, 0xdd, 0x52, 0x88, 0xac, - 0x55, 0x27, 0xaa, 0x4b, 0xac, 0x37, 0x06, 0x5a, 0x69, 0x41, 0x3c, 0xe4, 0x2e, 0x50, 0xe8, 0x42, - 0x0c, 0x81, 0x0b, 0xd8, 0x46, 0xb5, 0x80, 0x0d, 0x20, 0x89, 0x98, 0x0b, 0xba, 0xe7, 0x6f, 0xba, - 0x67, 0x6d, 0x3f, 0x7f, 0xa0, 0x93, 0x1a, 0xbc, 0x8e, 0x4a, 0xf2, 0xa2, 0x46, 0x50, 0x9b, 0xfc, - 0x2b, 0x59, 0x4b, 0xd5, 0x0b, 0xfe, 0x1b, 0x95, 0x22, 0x26, 0xfc, 0x46, 0x51, 0x55, 0x54, 0xe5, - 0xeb, 0x01, 0x13, 0x3e, 0x55, 0xa8, 0xf5, 0xc5, 0x40, 0x79, 0x7e, 0xdc, 0x45, 0x55, 0xe1, 0xc7, - 0xa1, 0x10, 0x7d, 0xd0, 0xab, 0xba, 0x7d, 0xb5, 0x91, 0x3e, 0xd0, 0xec, 0xdd, 0x30, 0xe8, 0x72, - 0xcf, 0xa9, 0xcb, 0x9f, 0x97, 0x63, 0x74, 0xac, 0x8d, 0x05, 0xaa, 0xbb, 0x7d, 0x0e, 0x81, 0xc8, - 0xea, 0xf4, 0xfa, 0x6e, 0x5d, 0xad, 0xd7, 0xee, 0x94, 0x82, 0xf3, 0xbb, 0xce, 0x5d, 0x9f, 0x46, - 0xe9, 0x77, 0x5d, 0xac, 0x77, 0x06, 0x5a, 0xbd, 0x84, 0x8b, 0xff, 0x42, 0xc5, 0x34, 0xce, 0x17, - 0x5c, 0x19, 0x9d, 0x35, 0x8b, 0x87, 0x74, 0x8f, 0x4a, 0x0c, 0x3f, 0x43, 0x95, 0x24, 0xdb, 0x90, - 0xf6, 0x78, 0x63, 0x4e, 0x8f, 0x17, 0xf7, 0xea, 0x2c, 0xc9, 0x7f, 0x96, 0xa3, 0xb9, 0x28, 0xde, - 0x40, 0x55, 0x97, 0x39, 0x69, 0xd0, 0xe9, 0x83, 0x5a, 0x4f, 0x3d, 0x1b, 0xd9, 0xee, 0x4e, 0x86, - 0xd1, 0xf1, 0xab, 0xd5, 0x42, 0x7f, 0x5c, 0x3a, 0x63, 0xe9, 0xfe, 0x65, 0x94, 0x28, 0xf7, 0xc5, - 0xcc, 0xfd, 0xfd, 0x83, 0x16, 0x95, 0x18, 0x6e, 0xa2, 0x72, 0x3b, 0x8d, 0x13, 0xa1, 0xbc, 0x17, - 0x9d, 0x9a, 0xfc, 0xb7, 0x8e, 0x04, 0x68, 0x86, 0x3b, 0xe4, 0xf8, 0xdc, 0x2c, 0x9c, 0x9c, 0x9b, - 0x85, 0xd3, 0x73, 0xb3, 0xf0, 0x7a, 0x64, 0x1a, 0xc7, 0x23, 0xd3, 0x38, 0x19, 0x99, 0xc6, 0xe9, - 0xc8, 0x34, 0x3e, 0x8d, 0x4c, 0xe3, 0xed, 0x67, 0xb3, 0xf0, 0xa4, 0x9a, 0xa7, 0xfa, 0x16, 0x00, - 0x00, 0xff, 0xff, 0x55, 0x1b, 0x03, 0x56, 0xaf, 0x06, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto index 70801a6c5..674debee4 100644 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto @@ -83,6 +83,12 @@ message ServiceReference { // this service. // +optional optional string path = 3; + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + optional int32 port = 4; } // Webhook holds the configuration of the webhook @@ -132,8 +138,6 @@ message WebhookClientConfig { // // If the webhook is running within the cluster, then you should use `service`. // - // Port 443 will be used if it is open, otherwise it is an error. - // // +optional optional ServiceReference service = 2; diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/types.go b/vendor/k8s.io/api/auditregistration/v1alpha1/types.go index af31cfe27..a0fb48c30 100644 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/types.go +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/types.go @@ -166,8 +166,6 @@ type WebhookClientConfig struct { // // If the webhook is running within the cluster, then you should use `service`. // - // Port 443 will be used if it is open, otherwise it is an error. - // // +optional Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,2,opt,name=service"` @@ -191,4 +189,10 @@ type ServiceReference struct { // this service. // +optional Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` } diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go index edd608f3b..1a86f4da5 100644 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go @@ -70,6 +70,7 @@ var map_ServiceReference = map[string]string{ "namespace": "`namespace` is the namespace of the service. Required", "name": "`name` is the name of the service. Required", "path": "`path` is an optional URL path which will be sent in any request to this service.", + "port": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", } func (ServiceReference) SwaggerDoc() map[string]string { @@ -89,7 +90,7 @@ func (Webhook) SwaggerDoc() map[string]string { var map_WebhookClientConfig = map[string]string{ "": "WebhookClientConfig contains the information to make a connection with the webhook", "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", - "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nPort 443 will be used if it is open, otherwise it is an error.", + "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.", "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", } diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go index e71deffad..621a19e83 100644 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go @@ -55,7 +55,7 @@ func (in *AuditSink) DeepCopyObject() runtime.Object { func (in *AuditSinkList) DeepCopyInto(out *AuditSinkList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]AuditSink, len(*in)) @@ -131,6 +131,11 @@ func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { *out = new(string) **out = **in } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } return } diff --git a/vendor/k8s.io/api/authentication/v1/generated.pb.go b/vendor/k8s.io/api/authentication/v1/generated.pb.go index 4e7f28d8c..02be20dec 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1/generated.pb.go @@ -17,37 +17,23 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto - - It has these top-level messages: - BoundObjectReference - ExtraValue - TokenRequest - TokenRequestSpec - TokenRequestStatus - TokenReview - TokenReviewSpec - TokenReviewStatus - UserInfo -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + io "io" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" -import strings "strings" -import reflect "reflect" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import io "io" + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -60,41 +46,257 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *BoundObjectReference) Reset() { *m = BoundObjectReference{} } -func (*BoundObjectReference) ProtoMessage() {} -func (*BoundObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *BoundObjectReference) Reset() { *m = BoundObjectReference{} } +func (*BoundObjectReference) ProtoMessage() {} +func (*BoundObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{0} +} +func (m *BoundObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BoundObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BoundObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoundObjectReference.Merge(m, src) +} +func (m *BoundObjectReference) XXX_Size() int { + return m.Size() +} +func (m *BoundObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_BoundObjectReference.DiscardUnknown(m) +} -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_BoundObjectReference proto.InternalMessageInfo -func (m *TokenRequest) Reset() { *m = TokenRequest{} } -func (*TokenRequest) ProtoMessage() {} -func (*TokenRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{1} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} -func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} } -func (*TokenRequestSpec) ProtoMessage() {} -func (*TokenRequestSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo -func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} } -func (*TokenRequestStatus) ProtoMessage() {} -func (*TokenRequestStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *TokenRequest) Reset() { *m = TokenRequest{} } +func (*TokenRequest) ProtoMessage() {} +func (*TokenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{2} +} +func (m *TokenRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequest.Merge(m, src) +} +func (m *TokenRequest) XXX_Size() int { + return m.Size() +} +func (m *TokenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequest.DiscardUnknown(m) +} -func (m *TokenReview) Reset() { *m = TokenReview{} } -func (*TokenReview) ProtoMessage() {} -func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_TokenRequest proto.InternalMessageInfo -func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } -func (*TokenReviewSpec) ProtoMessage() {} -func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} } +func (*TokenRequestSpec) ProtoMessage() {} +func (*TokenRequestSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{3} +} +func (m *TokenRequestSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequestSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequestSpec.Merge(m, src) +} +func (m *TokenRequestSpec) XXX_Size() int { + return m.Size() +} +func (m *TokenRequestSpec) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequestSpec.DiscardUnknown(m) +} -func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } -func (*TokenReviewStatus) ProtoMessage() {} -func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_TokenRequestSpec proto.InternalMessageInfo + +func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} } +func (*TokenRequestStatus) ProtoMessage() {} +func (*TokenRequestStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{4} +} +func (m *TokenRequestStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequestStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequestStatus.Merge(m, src) +} +func (m *TokenRequestStatus) XXX_Size() int { + return m.Size() +} +func (m *TokenRequestStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequestStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenRequestStatus proto.InternalMessageInfo + +func (m *TokenReview) Reset() { *m = TokenReview{} } +func (*TokenReview) ProtoMessage() {} +func (*TokenReview) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{5} +} +func (m *TokenReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReview.Merge(m, src) +} +func (m *TokenReview) XXX_Size() int { + return m.Size() +} +func (m *TokenReview) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReview.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReview proto.InternalMessageInfo + +func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } +func (*TokenReviewSpec) ProtoMessage() {} +func (*TokenReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{6} +} +func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewSpec.Merge(m, src) +} +func (m *TokenReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo + +func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } +func (*TokenReviewStatus) ProtoMessage() {} +func (*TokenReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{7} +} +func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewStatus.Merge(m, src) +} +func (m *TokenReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo + +func (m *UserInfo) Reset() { *m = UserInfo{} } +func (*UserInfo) ProtoMessage() {} +func (*UserInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{8} +} +func (m *UserInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserInfo.Merge(m, src) +} +func (m *UserInfo) XXX_Size() int { + return m.Size() +} +func (m *UserInfo) XXX_DiscardUnknown() { + xxx_messageInfo_UserInfo.DiscardUnknown(m) +} -func (m *UserInfo) Reset() { *m = UserInfo{} } -func (*UserInfo) ProtoMessage() {} -func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_UserInfo proto.InternalMessageInfo func init() { proto.RegisterType((*BoundObjectReference)(nil), "k8s.io.api.authentication.v1.BoundObjectReference") @@ -106,11 +308,78 @@ func init() { proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1.TokenReviewSpec") proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1.TokenReviewStatus") proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1.UserInfo") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authentication.v1.UserInfo.ExtraEntry") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto", fileDescriptor_2953ea822e7ffe1e) +} + +var fileDescriptor_2953ea822e7ffe1e = []byte{ + // 903 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0x8e, 0xf3, 0xa3, 0x4a, 0x26, 0xdb, 0xd2, 0xce, 0xb2, 0x52, 0x54, 0x20, 0x2e, 0x5e, 0x09, + 0x55, 0xc0, 0xda, 0x9b, 0x08, 0xc1, 0x6a, 0x91, 0x90, 0x6a, 0x1a, 0x41, 0x84, 0x60, 0x57, 0xb3, + 0xdb, 0x82, 0x38, 0x31, 0xb1, 0x5f, 0x53, 0x13, 0x3c, 0x36, 0xf6, 0x38, 0x6c, 0x6e, 0xfb, 0x27, + 0x70, 0x04, 0x89, 0x03, 0x7f, 0x04, 0x12, 0xff, 0x42, 0x8f, 0x2b, 0x4e, 0x3d, 0xa0, 0x88, 0x9a, + 0x2b, 0x47, 0x4e, 0x9c, 0xd0, 0x8c, 0xa7, 0x71, 0x9c, 0xb4, 0x69, 0x4e, 0x7b, 0x8b, 0xdf, 0xfb, + 0xde, 0xf7, 0xde, 0xfb, 0xe6, 0xcb, 0x0c, 0xea, 0x8d, 0x1e, 0xc4, 0xa6, 0x17, 0x58, 0xa3, 0x64, + 0x00, 0x11, 0x03, 0x0e, 0xb1, 0x35, 0x06, 0xe6, 0x06, 0x91, 0xa5, 0x12, 0x34, 0xf4, 0x2c, 0x9a, + 0xf0, 0x53, 0x60, 0xdc, 0x73, 0x28, 0xf7, 0x02, 0x66, 0x8d, 0x3b, 0xd6, 0x10, 0x18, 0x44, 0x94, + 0x83, 0x6b, 0x86, 0x51, 0xc0, 0x03, 0xfc, 0x7a, 0x86, 0x36, 0x69, 0xe8, 0x99, 0x45, 0xb4, 0x39, + 0xee, 0xec, 0xde, 0x1b, 0x7a, 0xfc, 0x34, 0x19, 0x98, 0x4e, 0xe0, 0x5b, 0xc3, 0x60, 0x18, 0x58, + 0xb2, 0x68, 0x90, 0x9c, 0xc8, 0x2f, 0xf9, 0x21, 0x7f, 0x65, 0x64, 0xbb, 0xef, 0xe5, 0xad, 0x7d, + 0xea, 0x9c, 0x7a, 0x0c, 0xa2, 0x89, 0x15, 0x8e, 0x86, 0x22, 0x10, 0x5b, 0x3e, 0x70, 0x7a, 0xc5, + 0x08, 0xbb, 0xd6, 0x75, 0x55, 0x51, 0xc2, 0xb8, 0xe7, 0xc3, 0x52, 0xc1, 0xfb, 0x37, 0x15, 0xc4, + 0xce, 0x29, 0xf8, 0x74, 0xb1, 0xce, 0xf8, 0x43, 0x43, 0xaf, 0xda, 0x41, 0xc2, 0xdc, 0x47, 0x83, + 0x6f, 0xc1, 0xe1, 0x04, 0x4e, 0x20, 0x02, 0xe6, 0x00, 0xde, 0x43, 0xd5, 0x91, 0xc7, 0xdc, 0x96, + 0xb6, 0xa7, 0xed, 0x37, 0xec, 0x5b, 0x67, 0x53, 0xbd, 0x94, 0x4e, 0xf5, 0xea, 0x67, 0x1e, 0x73, + 0x89, 0xcc, 0xe0, 0x2e, 0x42, 0xf4, 0x71, 0xff, 0x18, 0xa2, 0xd8, 0x0b, 0x58, 0xab, 0x2c, 0x71, + 0x58, 0xe1, 0xd0, 0xc1, 0x2c, 0x43, 0xe6, 0x50, 0x82, 0x95, 0x51, 0x1f, 0x5a, 0x95, 0x22, 0xeb, + 0x17, 0xd4, 0x07, 0x22, 0x33, 0xd8, 0x46, 0x95, 0xa4, 0x7f, 0xd8, 0xaa, 0x4a, 0xc0, 0x7d, 0x05, + 0xa8, 0x1c, 0xf5, 0x0f, 0xff, 0x9b, 0xea, 0x6f, 0x5e, 0xb7, 0x24, 0x9f, 0x84, 0x10, 0x9b, 0x47, + 0xfd, 0x43, 0x22, 0x8a, 0x8d, 0x0f, 0x10, 0xea, 0x3d, 0xe3, 0x11, 0x3d, 0xa6, 0xdf, 0x25, 0x80, + 0x75, 0x54, 0xf3, 0x38, 0xf8, 0x71, 0x4b, 0xdb, 0xab, 0xec, 0x37, 0xec, 0x46, 0x3a, 0xd5, 0x6b, + 0x7d, 0x11, 0x20, 0x59, 0xfc, 0x61, 0xfd, 0xa7, 0x5f, 0xf5, 0xd2, 0xf3, 0x3f, 0xf7, 0x4a, 0xc6, + 0x2f, 0x65, 0x74, 0xeb, 0x69, 0x30, 0x02, 0x46, 0xe0, 0xfb, 0x04, 0x62, 0x8e, 0xbf, 0x41, 0x75, + 0x71, 0x44, 0x2e, 0xe5, 0x54, 0x2a, 0xd1, 0xec, 0xde, 0x37, 0x73, 0x77, 0xcc, 0x86, 0x30, 0xc3, + 0xd1, 0x50, 0x04, 0x62, 0x53, 0xa0, 0xcd, 0x71, 0xc7, 0xcc, 0xe4, 0xfc, 0x1c, 0x38, 0xcd, 0x35, + 0xc9, 0x63, 0x64, 0xc6, 0x8a, 0x1f, 0xa3, 0x6a, 0x1c, 0x82, 0x23, 0xf5, 0x6b, 0x76, 0x4d, 0x73, + 0x95, 0xf7, 0xcc, 0xf9, 0xd9, 0x9e, 0x84, 0xe0, 0xe4, 0x0a, 0x8a, 0x2f, 0x22, 0x99, 0xf0, 0x57, + 0x68, 0x23, 0xe6, 0x94, 0x27, 0xb1, 0x54, 0xb9, 0x38, 0xf1, 0x4d, 0x9c, 0xb2, 0xce, 0xde, 0x52, + 0xac, 0x1b, 0xd9, 0x37, 0x51, 0x7c, 0xc6, 0xbf, 0x1a, 0xda, 0x5e, 0x1c, 0x01, 0xbf, 0x83, 0x1a, + 0x34, 0x71, 0x3d, 0x61, 0x9a, 0x4b, 0x89, 0x37, 0xd3, 0xa9, 0xde, 0x38, 0xb8, 0x0c, 0x92, 0x3c, + 0x8f, 0x3f, 0x46, 0x3b, 0xf0, 0x2c, 0xf4, 0x22, 0xd9, 0xfd, 0x09, 0x38, 0x01, 0x73, 0x63, 0x79, + 0xd6, 0x15, 0xfb, 0x4e, 0x3a, 0xd5, 0x77, 0x7a, 0x8b, 0x49, 0xb2, 0x8c, 0xc7, 0x0c, 0x6d, 0x0d, + 0x0a, 0x96, 0x55, 0x8b, 0x76, 0x57, 0x2f, 0x7a, 0x95, 0xcd, 0x6d, 0x9c, 0x4e, 0xf5, 0xad, 0x62, + 0x86, 0x2c, 0xb0, 0x1b, 0xbf, 0x69, 0x08, 0x2f, 0xab, 0x84, 0xef, 0xa2, 0x1a, 0x17, 0x51, 0xf5, + 0x17, 0xd9, 0x54, 0xa2, 0xd5, 0x32, 0x68, 0x96, 0xc3, 0x13, 0x74, 0x3b, 0x5f, 0xe0, 0xa9, 0xe7, + 0x43, 0xcc, 0xa9, 0x1f, 0xaa, 0xd3, 0x7e, 0x7b, 0x3d, 0x2f, 0x89, 0x32, 0xfb, 0x35, 0x45, 0x7f, + 0xbb, 0xb7, 0x4c, 0x47, 0xae, 0xea, 0x61, 0xfc, 0x5c, 0x46, 0x4d, 0x35, 0xf6, 0xd8, 0x83, 0x1f, + 0x5e, 0x82, 0x97, 0x1f, 0x15, 0xbc, 0x7c, 0x6f, 0x2d, 0xdf, 0x89, 0xd1, 0xae, 0xb5, 0xf2, 0x97, + 0x0b, 0x56, 0xb6, 0xd6, 0xa7, 0x5c, 0xed, 0x64, 0x07, 0xbd, 0xb2, 0xd0, 0x7f, 0xbd, 0xe3, 0x2c, + 0x98, 0xbd, 0xbc, 0xda, 0xec, 0xc6, 0x3f, 0x1a, 0xda, 0x59, 0x1a, 0x09, 0x7f, 0x88, 0x36, 0xe7, + 0x26, 0x87, 0xec, 0x86, 0xad, 0xdb, 0x77, 0x54, 0xbf, 0xcd, 0x83, 0xf9, 0x24, 0x29, 0x62, 0xf1, + 0xa7, 0xa8, 0x9a, 0xc4, 0x10, 0x29, 0x85, 0xdf, 0x5a, 0x2d, 0xc7, 0x51, 0x0c, 0x51, 0x9f, 0x9d, + 0x04, 0xb9, 0xb4, 0x22, 0x42, 0x24, 0x43, 0x71, 0x93, 0xea, 0x0d, 0x7f, 0xdb, 0xbb, 0xa8, 0x06, + 0x51, 0x14, 0x44, 0xea, 0xde, 0x9e, 0x69, 0xd3, 0x13, 0x41, 0x92, 0xe5, 0x8c, 0xdf, 0xcb, 0xa8, + 0x7e, 0xd9, 0x12, 0xbf, 0x8b, 0xea, 0xa2, 0x8d, 0xbc, 0xec, 0x33, 0x41, 0xb7, 0x55, 0x91, 0xc4, + 0x88, 0x38, 0x99, 0x21, 0xf0, 0x1b, 0xa8, 0x92, 0x78, 0xae, 0x7a, 0x43, 0x9a, 0x73, 0x97, 0x3e, + 0x11, 0x71, 0x6c, 0xa0, 0x8d, 0x61, 0x14, 0x24, 0xa1, 0xb0, 0x81, 0x18, 0x14, 0x89, 0x13, 0xfd, + 0x44, 0x46, 0x88, 0xca, 0xe0, 0x63, 0x54, 0x03, 0x71, 0xe7, 0xcb, 0x5d, 0x9a, 0xdd, 0xce, 0x7a, + 0xd2, 0x98, 0xf2, 0x9d, 0xe8, 0x31, 0x1e, 0x4d, 0xe6, 0xb6, 0x12, 0x31, 0x92, 0xd1, 0xed, 0x0e, + 0xd4, 0x5b, 0x22, 0x31, 0x78, 0x1b, 0x55, 0x46, 0x30, 0xc9, 0x36, 0x22, 0xe2, 0x27, 0xfe, 0x08, + 0xd5, 0xc6, 0xe2, 0x99, 0x51, 0x47, 0xb2, 0xbf, 0xba, 0x6f, 0xfe, 0x2c, 0x91, 0xac, 0xec, 0x61, + 0xf9, 0x81, 0x66, 0xef, 0x9f, 0x5d, 0xb4, 0x4b, 0x2f, 0x2e, 0xda, 0xa5, 0xf3, 0x8b, 0x76, 0xe9, + 0x79, 0xda, 0xd6, 0xce, 0xd2, 0xb6, 0xf6, 0x22, 0x6d, 0x6b, 0xe7, 0x69, 0x5b, 0xfb, 0x2b, 0x6d, + 0x6b, 0x3f, 0xfe, 0xdd, 0x2e, 0x7d, 0x5d, 0x1e, 0x77, 0xfe, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x8c, + 0x44, 0x87, 0xd0, 0xe2, 0x08, 0x00, 0x00, +} + func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -118,33 +387,42 @@ func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) { } func (m *BoundObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BoundObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ + i -= len(m.UID) + copy(dAtA[i:], m.UID) i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -152,32 +430,31 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *TokenRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -185,41 +462,52 @@ func (m *TokenRequest) Marshal() (dAtA []byte, err error) { } func (m *TokenRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenRequestSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -227,47 +515,48 @@ func (m *TokenRequestSpec) Marshal() (dAtA []byte, err error) { } func (m *TokenRequestSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + if m.ExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + i-- + dAtA[i] = 0x20 } if m.BoundObjectRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.BoundObjectRef.Size())) - n4, err := m.BoundObjectRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.BoundObjectRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x1a } - if m.ExpirationSeconds != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + if len(m.Audiences) > 0 { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - return i, nil + return len(dAtA) - i, nil } func (m *TokenRequestStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -275,29 +564,37 @@ func (m *TokenRequestStatus) Marshal() (dAtA []byte, err error) { } func (m *TokenRequestStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) - i += copy(dAtA[i:], m.Token) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ExpirationTimestamp.Size())) - n5, err := m.ExpirationTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ExpirationTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Token) + copy(dAtA[i:], m.Token) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -305,41 +602,52 @@ func (m *TokenReview) Marshal() (dAtA []byte, err error) { } func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n6, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n7, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n8, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -347,36 +655,36 @@ func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) - i += copy(dAtA[i:], m.Token) if len(m.Audiences) > 0 { - for _, s := range m.Audiences { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.Token) + copy(dAtA[i:], m.Token) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -384,52 +692,54 @@ func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ + if len(m.Audiences) > 0 { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x1a + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i-- if m.Authenticated { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.User.Size())) - n9, err := m.User.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *UserInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -437,77 +747,81 @@ func (m *UserInfo) Marshal() (dAtA []byte, err error) { } func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if len(m.Extra) > 0 { keysForExtra := make([]string, 0, len(m.Extra)) for k := range m.Extra { keysForExtra = append(keysForExtra, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x22 - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n10, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 } } - return i, nil + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *BoundObjectReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -522,6 +836,9 @@ func (m *BoundObjectReference) Size() (n int) { } func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -534,6 +851,9 @@ func (m ExtraValue) Size() (n int) { } func (m *TokenRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -546,6 +866,9 @@ func (m *TokenRequest) Size() (n int) { } func (m *TokenRequestSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Audiences) > 0 { @@ -565,6 +888,9 @@ func (m *TokenRequestSpec) Size() (n int) { } func (m *TokenRequestStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Token) @@ -575,6 +901,9 @@ func (m *TokenRequestStatus) Size() (n int) { } func (m *TokenReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -587,6 +916,9 @@ func (m *TokenReview) Size() (n int) { } func (m *TokenReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Token) @@ -601,6 +933,9 @@ func (m *TokenReviewSpec) Size() (n int) { } func (m *TokenReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -618,6 +953,9 @@ func (m *TokenReviewStatus) Size() (n int) { } func (m *UserInfo) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Username) @@ -643,14 +981,7 @@ func (m *UserInfo) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -673,7 +1004,7 @@ func (this *TokenRequest) String() string { return "nil" } s := strings.Join([]string{`&TokenRequest{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenRequestSpec", "TokenRequestSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenRequestStatus", "TokenRequestStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -686,7 +1017,7 @@ func (this *TokenRequestSpec) String() string { } s := strings.Join([]string{`&TokenRequestSpec{`, `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, - `BoundObjectRef:` + strings.Replace(fmt.Sprintf("%v", this.BoundObjectRef), "BoundObjectReference", "BoundObjectReference", 1) + `,`, + `BoundObjectRef:` + strings.Replace(this.BoundObjectRef.String(), "BoundObjectReference", "BoundObjectReference", 1) + `,`, `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, `}`, }, "") @@ -698,7 +1029,7 @@ func (this *TokenRequestStatus) String() string { } s := strings.Join([]string{`&TokenRequestStatus{`, `Token:` + fmt.Sprintf("%v", this.Token) + `,`, - `ExpirationTimestamp:` + strings.Replace(strings.Replace(this.ExpirationTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `ExpirationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExpirationTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -708,7 +1039,7 @@ func (this *TokenReview) String() string { return "nil" } s := strings.Join([]string{`&TokenReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -785,7 +1116,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -813,7 +1144,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -823,6 +1154,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -842,7 +1176,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -852,6 +1186,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -871,7 +1208,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -881,6 +1218,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -900,7 +1240,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -910,6 +1250,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -924,6 +1267,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -951,7 +1297,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -979,7 +1325,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -989,6 +1335,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1003,6 +1352,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1030,7 +1382,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1058,7 +1410,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1067,6 +1419,9 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1088,7 +1443,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1097,6 +1452,9 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1118,7 +1476,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1127,6 +1485,9 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1143,6 +1504,9 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1170,7 +1534,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1198,7 +1562,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1208,6 +1572,9 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1227,7 +1594,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1236,6 +1603,9 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1260,7 +1630,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -1275,6 +1645,9 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1302,7 +1675,7 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1330,7 +1703,7 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1340,6 +1713,9 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1359,7 +1735,7 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1368,6 +1744,9 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1384,6 +1763,9 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1411,7 +1793,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1439,7 +1821,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1448,6 +1830,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1469,7 +1854,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1478,6 +1863,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1499,7 +1887,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1508,6 +1896,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1524,6 +1915,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1551,7 +1945,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1579,7 +1973,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1589,6 +1983,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1608,7 +2005,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1618,6 +2015,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1632,6 +2032,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1659,7 +2062,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1687,7 +2090,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1707,7 +2110,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1716,6 +2119,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1737,7 +2143,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1747,6 +2153,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1766,7 +2175,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1776,6 +2185,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1790,6 +2202,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1817,7 +2232,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1845,7 +2260,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1855,6 +2270,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1874,7 +2292,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1884,6 +2302,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1903,7 +2324,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1913,6 +2334,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1932,7 +2356,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1941,6 +2365,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1961,7 +2388,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1978,7 +2405,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1988,6 +2415,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -2004,7 +2434,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2013,7 +2443,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -2050,6 +2480,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2116,10 +2549,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2148,6 +2584,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2166,68 +2605,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 900 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0x8e, 0xf3, 0xa3, 0x4a, 0x26, 0xdb, 0xd2, 0xce, 0xb2, 0x52, 0x54, 0xc0, 0x2e, 0x41, 0x42, - 0x15, 0xb0, 0xf6, 0x26, 0x42, 0xb0, 0x5a, 0x24, 0xa4, 0x9a, 0x46, 0x10, 0x21, 0xd8, 0xd5, 0xec, - 0xb6, 0x20, 0x4e, 0x4c, 0xec, 0xd7, 0xc4, 0x04, 0x8f, 0x8d, 0x3d, 0x0e, 0x9b, 0xdb, 0xfe, 0x09, - 0x1c, 0x41, 0xe2, 0xc0, 0x1f, 0x81, 0xc4, 0xbf, 0xd0, 0xe3, 0x8a, 0xd3, 0x1e, 0x50, 0x44, 0xcd, - 0x95, 0x23, 0x27, 0x4e, 0x68, 0xc6, 0xd3, 0x38, 0x4e, 0xda, 0x34, 0x27, 0x6e, 0x9e, 0xf7, 0xbe, - 0xf7, 0xbd, 0x37, 0xdf, 0x7c, 0x9e, 0x41, 0xbd, 0xf1, 0xfd, 0xd8, 0xf4, 0x02, 0x6b, 0x9c, 0x0c, - 0x20, 0x62, 0xc0, 0x21, 0xb6, 0x26, 0xc0, 0xdc, 0x20, 0xb2, 0x54, 0x82, 0x86, 0x9e, 0x45, 0x13, - 0x3e, 0x02, 0xc6, 0x3d, 0x87, 0x72, 0x2f, 0x60, 0xd6, 0xa4, 0x63, 0x0d, 0x81, 0x41, 0x44, 0x39, - 0xb8, 0x66, 0x18, 0x05, 0x3c, 0xc0, 0xaf, 0x66, 0x68, 0x93, 0x86, 0x9e, 0x59, 0x44, 0x9b, 0x93, - 0xce, 0xfe, 0xdd, 0xa1, 0xc7, 0x47, 0xc9, 0xc0, 0x74, 0x02, 0xdf, 0x1a, 0x06, 0xc3, 0xc0, 0x92, - 0x45, 0x83, 0xe4, 0x4c, 0xae, 0xe4, 0x42, 0x7e, 0x65, 0x64, 0xfb, 0xef, 0xe6, 0xad, 0x7d, 0xea, - 0x8c, 0x3c, 0x06, 0xd1, 0xd4, 0x0a, 0xc7, 0x43, 0x11, 0x88, 0x2d, 0x1f, 0x38, 0xbd, 0x62, 0x84, - 0x7d, 0xeb, 0xba, 0xaa, 0x28, 0x61, 0xdc, 0xf3, 0x61, 0xa5, 0xe0, 0xbd, 0x9b, 0x0a, 0x62, 0x67, - 0x04, 0x3e, 0x5d, 0xae, 0x6b, 0xff, 0xae, 0xa1, 0x97, 0xed, 0x20, 0x61, 0xee, 0xc3, 0xc1, 0x37, - 0xe0, 0x70, 0x02, 0x67, 0x10, 0x01, 0x73, 0x00, 0x1f, 0xa0, 0xea, 0xd8, 0x63, 0x6e, 0x4b, 0x3b, - 0xd0, 0x0e, 0x1b, 0xf6, 0xad, 0xf3, 0x99, 0x51, 0x4a, 0x67, 0x46, 0xf5, 0x53, 0x8f, 0xb9, 0x44, - 0x66, 0x70, 0x17, 0x21, 0xfa, 0xa8, 0x7f, 0x0a, 0x51, 0xec, 0x05, 0xac, 0x55, 0x96, 0x38, 0xac, - 0x70, 0xe8, 0x68, 0x9e, 0x21, 0x0b, 0x28, 0xc1, 0xca, 0xa8, 0x0f, 0xad, 0x4a, 0x91, 0xf5, 0x73, - 0xea, 0x03, 0x91, 0x19, 0x6c, 0xa3, 0x4a, 0xd2, 0x3f, 0x6e, 0x55, 0x25, 0xe0, 0x9e, 0x02, 0x54, - 0x4e, 0xfa, 0xc7, 0xff, 0xce, 0x8c, 0xd7, 0xaf, 0xdb, 0x24, 0x9f, 0x86, 0x10, 0x9b, 0x27, 0xfd, - 0x63, 0x22, 0x8a, 0xdb, 0xef, 0x23, 0xd4, 0x7b, 0xca, 0x23, 0x7a, 0x4a, 0xbf, 0x4d, 0x00, 0x1b, - 0xa8, 0xe6, 0x71, 0xf0, 0xe3, 0x96, 0x76, 0x50, 0x39, 0x6c, 0xd8, 0x8d, 0x74, 0x66, 0xd4, 0xfa, - 0x22, 0x40, 0xb2, 0xf8, 0x83, 0xfa, 0x8f, 0xbf, 0x18, 0xa5, 0x67, 0x7f, 0x1c, 0x94, 0xda, 0x3f, - 0x97, 0xd1, 0xad, 0x27, 0xc1, 0x18, 0x18, 0x81, 0xef, 0x12, 0x88, 0x39, 0xfe, 0x1a, 0xd5, 0xc5, - 0x11, 0xb9, 0x94, 0x53, 0xa9, 0x44, 0xb3, 0x7b, 0xcf, 0xcc, 0xdd, 0x31, 0x1f, 0xc2, 0x0c, 0xc7, - 0x43, 0x11, 0x88, 0x4d, 0x81, 0x36, 0x27, 0x1d, 0x33, 0x93, 0xf3, 0x33, 0xe0, 0x34, 0xd7, 0x24, - 0x8f, 0x91, 0x39, 0x2b, 0x7e, 0x84, 0xaa, 0x71, 0x08, 0x8e, 0xd4, 0xaf, 0xd9, 0x35, 0xcd, 0x75, - 0xde, 0x33, 0x17, 0x67, 0x7b, 0x1c, 0x82, 0x93, 0x2b, 0x28, 0x56, 0x44, 0x32, 0xe1, 0x2f, 0xd1, - 0x56, 0xcc, 0x29, 0x4f, 0x62, 0xa9, 0x72, 0x71, 0xe2, 0x9b, 0x38, 0x65, 0x9d, 0xbd, 0xa3, 0x58, - 0xb7, 0xb2, 0x35, 0x51, 0x7c, 0xed, 0x7f, 0x34, 0xb4, 0xbb, 0x3c, 0x02, 0x7e, 0x1b, 0x35, 0x68, - 0xe2, 0x7a, 0xc2, 0x34, 0x97, 0x12, 0x6f, 0xa7, 0x33, 0xa3, 0x71, 0x74, 0x19, 0x24, 0x79, 0x1e, - 0x33, 0xb4, 0x33, 0x28, 0xb8, 0x4d, 0xcd, 0xd8, 0x5d, 0x3f, 0xe3, 0x55, 0x0e, 0xb5, 0x71, 0x3a, - 0x33, 0x76, 0x8a, 0x19, 0xb2, 0xc4, 0x8e, 0x3f, 0x42, 0x7b, 0xf0, 0x34, 0xf4, 0x22, 0xc9, 0xf4, - 0x18, 0x9c, 0x80, 0xb9, 0xb1, 0xf4, 0x56, 0xc5, 0xbe, 0x93, 0xce, 0x8c, 0xbd, 0xde, 0x72, 0x92, - 0xac, 0xe2, 0xdb, 0xbf, 0x6a, 0x08, 0xaf, 0xaa, 0x84, 0xdf, 0x40, 0x35, 0x2e, 0xa2, 0xea, 0x17, - 0xd9, 0x56, 0xa2, 0xd5, 0x32, 0x68, 0x96, 0xc3, 0x53, 0x74, 0x3b, 0x27, 0x7c, 0xe2, 0xf9, 0x10, - 0x73, 0xea, 0x87, 0xea, 0xb4, 0xdf, 0xda, 0xcc, 0x4b, 0xa2, 0xcc, 0x7e, 0x45, 0xd1, 0xdf, 0xee, - 0xad, 0xd2, 0x91, 0xab, 0x7a, 0xb4, 0x7f, 0x2a, 0xa3, 0xa6, 0x1a, 0x7b, 0xe2, 0xc1, 0xf7, 0xff, - 0x83, 0x97, 0x1f, 0x16, 0xbc, 0x7c, 0x77, 0x23, 0xdf, 0x89, 0xd1, 0xae, 0xb5, 0xf2, 0x17, 0x4b, - 0x56, 0xb6, 0x36, 0xa7, 0x5c, 0xef, 0x64, 0x07, 0xbd, 0xb4, 0xd4, 0x7f, 0xb3, 0xe3, 0x2c, 0x98, - 0xbd, 0xbc, 0xde, 0xec, 0xed, 0xbf, 0x35, 0xb4, 0xb7, 0x32, 0x12, 0xfe, 0x00, 0x6d, 0x2f, 0x4c, - 0x0e, 0xd9, 0x0d, 0x5b, 0xb7, 0xef, 0xa8, 0x7e, 0xdb, 0x47, 0x8b, 0x49, 0x52, 0xc4, 0xe2, 0x4f, - 0x50, 0x35, 0x89, 0x21, 0x52, 0x0a, 0xbf, 0xb9, 0x5e, 0x8e, 0x93, 0x18, 0xa2, 0x3e, 0x3b, 0x0b, - 0x72, 0x69, 0x45, 0x84, 0x48, 0x06, 0xb1, 0x5d, 0x88, 0xa2, 0x20, 0x52, 0x57, 0xf1, 0x7c, 0xbb, - 0x3d, 0x11, 0x24, 0x59, 0xae, 0xb8, 0xdd, 0xea, 0x0d, 0xdb, 0xfd, 0xad, 0x8c, 0xea, 0x97, 0x2d, - 0xf1, 0x3b, 0xa8, 0x2e, 0xda, 0xc8, 0xcb, 0x3e, 0x13, 0x74, 0x57, 0x75, 0x90, 0x18, 0x11, 0x27, - 0x73, 0x04, 0x7e, 0x0d, 0x55, 0x12, 0xcf, 0x55, 0x6f, 0x48, 0x73, 0xe1, 0xd2, 0x27, 0x22, 0x8e, - 0xdb, 0x68, 0x6b, 0x18, 0x05, 0x49, 0x28, 0x6c, 0x20, 0x66, 0x40, 0xe2, 0x44, 0x3f, 0x96, 0x11, - 0xa2, 0x32, 0xf8, 0x14, 0xd5, 0x40, 0xdc, 0xf9, 0x72, 0xcc, 0x66, 0xb7, 0xb3, 0x99, 0x34, 0xa6, - 0x7c, 0x27, 0x7a, 0x8c, 0x47, 0xd3, 0x05, 0x09, 0x44, 0x8c, 0x64, 0x74, 0xfb, 0x03, 0xf5, 0x96, - 0x48, 0x0c, 0xde, 0x45, 0x95, 0x31, 0x4c, 0xb3, 0x1d, 0x11, 0xf1, 0x89, 0x3f, 0x44, 0xb5, 0x89, - 0x78, 0x66, 0xd4, 0x91, 0x1c, 0xae, 0xef, 0x9b, 0x3f, 0x4b, 0x24, 0x2b, 0x7b, 0x50, 0xbe, 0xaf, - 0xd9, 0x87, 0xe7, 0x17, 0x7a, 0xe9, 0xf9, 0x85, 0x5e, 0x7a, 0x71, 0xa1, 0x97, 0x9e, 0xa5, 0xba, - 0x76, 0x9e, 0xea, 0xda, 0xf3, 0x54, 0xd7, 0x5e, 0xa4, 0xba, 0xf6, 0x67, 0xaa, 0x6b, 0x3f, 0xfc, - 0xa5, 0x97, 0xbe, 0x2a, 0x4f, 0x3a, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x04, 0x81, 0x6f, - 0xe2, 0x08, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go index 5f34e76a9..0721bda87 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go @@ -17,31 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto - - It has these top-level messages: - ExtraValue - TokenReview - TokenReviewSpec - TokenReviewStatus - UserInfo -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -54,25 +44,145 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{0} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} -func (m *TokenReview) Reset() { *m = TokenReview{} } -func (*TokenReview) ProtoMessage() {} -func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo -func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } -func (*TokenReviewSpec) ProtoMessage() {} -func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *TokenReview) Reset() { *m = TokenReview{} } +func (*TokenReview) ProtoMessage() {} +func (*TokenReview) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{1} +} +func (m *TokenReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReview.Merge(m, src) +} +func (m *TokenReview) XXX_Size() int { + return m.Size() +} +func (m *TokenReview) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReview.DiscardUnknown(m) +} -func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } -func (*TokenReviewStatus) ProtoMessage() {} -func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_TokenReview proto.InternalMessageInfo -func (m *UserInfo) Reset() { *m = UserInfo{} } -func (*UserInfo) ProtoMessage() {} -func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } +func (*TokenReviewSpec) ProtoMessage() {} +func (*TokenReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{2} +} +func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewSpec.Merge(m, src) +} +func (m *TokenReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo + +func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } +func (*TokenReviewStatus) ProtoMessage() {} +func (*TokenReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{3} +} +func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewStatus.Merge(m, src) +} +func (m *TokenReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo + +func (m *UserInfo) Reset() { *m = UserInfo{} } +func (*UserInfo) ProtoMessage() {} +func (*UserInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{4} +} +func (m *UserInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserInfo.Merge(m, src) +} +func (m *UserInfo) XXX_Size() int { + return m.Size() +} +func (m *UserInfo) XXX_DiscardUnknown() { + xxx_messageInfo_UserInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_UserInfo proto.InternalMessageInfo func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.ExtraValue") @@ -80,11 +190,63 @@ func init() { proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewSpec") proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewStatus") proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1beta1.UserInfo") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.UserInfo.ExtraEntry") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto", fileDescriptor_77c9b20d3ad27844) +} + +var fileDescriptor_77c9b20d3ad27844 = []byte{ + // 663 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xb6, 0xf3, 0x53, 0x92, 0x0d, 0x81, 0xb2, 0x12, 0x52, 0x14, 0x09, 0xa7, 0x84, 0x4b, 0xa5, + 0xd2, 0x35, 0xad, 0xaa, 0x52, 0x95, 0x53, 0x0d, 0x15, 0x2a, 0x52, 0x85, 0xb4, 0xb4, 0x1c, 0x80, + 0x03, 0x1b, 0x67, 0xea, 0x98, 0xe0, 0x1f, 0xad, 0xd7, 0x81, 0xde, 0xfa, 0x08, 0x1c, 0x39, 0x22, + 0xf1, 0x24, 0xdc, 0x7a, 0xec, 0xb1, 0x07, 0x14, 0x51, 0xf3, 0x04, 0xbc, 0x01, 0xda, 0xf5, 0xb6, + 0x4e, 0x1b, 0x41, 0xdb, 0x9b, 0xf7, 0x9b, 0xf9, 0xbe, 0x99, 0xf9, 0xc6, 0x83, 0x5e, 0x0c, 0xd7, + 0x12, 0xe2, 0x47, 0xf6, 0x30, 0xed, 0x01, 0x0f, 0x41, 0x40, 0x62, 0x8f, 0x20, 0xec, 0x47, 0xdc, + 0xd6, 0x01, 0x16, 0xfb, 0x36, 0x4b, 0xc5, 0x00, 0x42, 0xe1, 0xbb, 0x4c, 0xf8, 0x51, 0x68, 0x8f, + 0x96, 0x7a, 0x20, 0xd8, 0x92, 0xed, 0x41, 0x08, 0x9c, 0x09, 0xe8, 0x93, 0x98, 0x47, 0x22, 0xc2, + 0xf7, 0x73, 0x0a, 0x61, 0xb1, 0x4f, 0xce, 0x53, 0x88, 0xa6, 0xb4, 0x17, 0x3d, 0x5f, 0x0c, 0xd2, + 0x1e, 0x71, 0xa3, 0xc0, 0xf6, 0x22, 0x2f, 0xb2, 0x15, 0xb3, 0x97, 0xee, 0xa9, 0x97, 0x7a, 0xa8, + 0xaf, 0x5c, 0xb1, 0xbd, 0x52, 0x34, 0x11, 0x30, 0x77, 0xe0, 0x87, 0xc0, 0xf7, 0xed, 0x78, 0xe8, + 0x49, 0x20, 0xb1, 0x03, 0x10, 0xcc, 0x1e, 0x4d, 0xf5, 0xd1, 0xb6, 0xff, 0xc5, 0xe2, 0x69, 0x28, + 0xfc, 0x00, 0xa6, 0x08, 0xab, 0x97, 0x11, 0x12, 0x77, 0x00, 0x01, 0xbb, 0xc8, 0xeb, 0x3e, 0x46, + 0x68, 0xf3, 0xb3, 0xe0, 0xec, 0x35, 0xfb, 0x98, 0x02, 0xee, 0xa0, 0xaa, 0x2f, 0x20, 0x48, 0x5a, + 0xe6, 0x5c, 0x79, 0xbe, 0xee, 0xd4, 0xb3, 0x71, 0xa7, 0xba, 0x25, 0x01, 0x9a, 0xe3, 0xeb, 0xb5, + 0xaf, 0xdf, 0x3a, 0xc6, 0xc1, 0xcf, 0x39, 0xa3, 0xfb, 0xbd, 0x84, 0x1a, 0x3b, 0xd1, 0x10, 0x42, + 0x0a, 0x23, 0x1f, 0x3e, 0xe1, 0xf7, 0xa8, 0x26, 0x87, 0xe9, 0x33, 0xc1, 0x5a, 0xe6, 0x9c, 0x39, + 0xdf, 0x58, 0x7e, 0x44, 0x0a, 0x33, 0xcf, 0x7a, 0x22, 0xf1, 0xd0, 0x93, 0x40, 0x42, 0x64, 0x36, + 0x19, 0x2d, 0x91, 0x97, 0xbd, 0x0f, 0xe0, 0x8a, 0x6d, 0x10, 0xcc, 0xc1, 0x87, 0xe3, 0x8e, 0x91, + 0x8d, 0x3b, 0xa8, 0xc0, 0xe8, 0x99, 0x2a, 0xde, 0x41, 0x95, 0x24, 0x06, 0xb7, 0x55, 0x52, 0xea, + 0xcb, 0xe4, 0xd2, 0x55, 0x91, 0x89, 0xfe, 0x5e, 0xc5, 0xe0, 0x3a, 0x37, 0xb5, 0x7e, 0x45, 0xbe, + 0xa8, 0x52, 0xc3, 0xef, 0xd0, 0x4c, 0x22, 0x98, 0x48, 0x93, 0x56, 0x59, 0xe9, 0xae, 0x5c, 0x53, + 0x57, 0x71, 0x9d, 0x5b, 0x5a, 0x79, 0x26, 0x7f, 0x53, 0xad, 0xd9, 0x75, 0xd1, 0xed, 0x0b, 0x4d, + 0xe0, 0x07, 0xa8, 0x2a, 0x24, 0xa4, 0x5c, 0xaa, 0x3b, 0x4d, 0xcd, 0xac, 0xe6, 0x79, 0x79, 0x0c, + 0x2f, 0xa0, 0x3a, 0x4b, 0xfb, 0x3e, 0x84, 0x2e, 0x24, 0xad, 0x92, 0x5a, 0x46, 0x33, 0x1b, 0x77, + 0xea, 0x1b, 0xa7, 0x20, 0x2d, 0xe2, 0xdd, 0x3f, 0x26, 0xba, 0x33, 0xd5, 0x12, 0x7e, 0x82, 0x9a, + 0x13, 0xed, 0x43, 0x5f, 0xd5, 0xab, 0x39, 0x77, 0x75, 0xbd, 0xe6, 0xc6, 0x64, 0x90, 0x9e, 0xcf, + 0xc5, 0xdb, 0xa8, 0x92, 0x26, 0xc0, 0xb5, 0xd7, 0x0b, 0x57, 0xf0, 0x64, 0x37, 0x01, 0xbe, 0x15, + 0xee, 0x45, 0x85, 0xc9, 0x12, 0xa1, 0x4a, 0xe6, 0xfc, 0x38, 0x95, 0xff, 0x8f, 0x23, 0x0d, 0x02, + 0xce, 0x23, 0xae, 0x16, 0x32, 0x61, 0xd0, 0xa6, 0x04, 0x69, 0x1e, 0xeb, 0xfe, 0x28, 0xa1, 0xda, + 0x69, 0x49, 0xfc, 0x10, 0xd5, 0x64, 0x99, 0x90, 0x05, 0xa0, 0x5d, 0x9d, 0xd5, 0x24, 0x95, 0x23, + 0x71, 0x7a, 0x96, 0x81, 0xef, 0xa1, 0x72, 0xea, 0xf7, 0xd5, 0x68, 0x75, 0xa7, 0xa1, 0x13, 0xcb, + 0xbb, 0x5b, 0xcf, 0xa8, 0xc4, 0x71, 0x17, 0xcd, 0x78, 0x3c, 0x4a, 0x63, 0xf9, 0x43, 0xc8, 0x46, + 0x91, 0x5c, 0xeb, 0x73, 0x85, 0x50, 0x1d, 0xc1, 0x6f, 0x51, 0x15, 0xe4, 0xd5, 0xa8, 0x59, 0x1a, + 0xcb, 0xab, 0xd7, 0xf0, 0x87, 0xa8, 0x73, 0xdb, 0x0c, 0x05, 0xdf, 0x9f, 0x18, 0x4d, 0x62, 0x34, + 0xd7, 0x6c, 0x7b, 0xfa, 0x24, 0x55, 0x0e, 0x9e, 0x45, 0xe5, 0x21, 0xec, 0xe7, 0x63, 0x51, 0xf9, + 0x89, 0x9f, 0xa2, 0xea, 0x48, 0x5e, 0xab, 0x5e, 0xce, 0xe2, 0x15, 0x8a, 0x17, 0x27, 0x4e, 0x73, + 0xee, 0x7a, 0x69, 0xcd, 0x74, 0x16, 0x0f, 0x4f, 0x2c, 0xe3, 0xe8, 0xc4, 0x32, 0x8e, 0x4f, 0x2c, + 0xe3, 0x20, 0xb3, 0xcc, 0xc3, 0xcc, 0x32, 0x8f, 0x32, 0xcb, 0x3c, 0xce, 0x2c, 0xf3, 0x57, 0x66, + 0x99, 0x5f, 0x7e, 0x5b, 0xc6, 0x9b, 0x1b, 0x5a, 0xe4, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x66, + 0xbb, 0x89, 0x53, 0x68, 0x05, 0x00, 0x00, +} + func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -92,32 +254,31 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *TokenReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -125,41 +286,52 @@ func (m *TokenReview) Marshal() (dAtA []byte, err error) { } func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -167,36 +339,36 @@ func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) - i += copy(dAtA[i:], m.Token) if len(m.Audiences) > 0 { - for _, s := range m.Audiences { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.Token) + copy(dAtA[i:], m.Token) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -204,52 +376,54 @@ func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ + if len(m.Audiences) > 0 { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x1a + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i-- if m.Authenticated { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.User.Size())) - n4, err := m.User.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *UserInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -257,77 +431,81 @@ func (m *UserInfo) Marshal() (dAtA []byte, err error) { } func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if len(m.Extra) > 0 { keysForExtra := make([]string, 0, len(m.Extra)) for k := range m.Extra { keysForExtra = append(keysForExtra, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x22 - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n5, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 } } - return i, nil + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -340,6 +518,9 @@ func (m ExtraValue) Size() (n int) { } func (m *TokenReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -352,6 +533,9 @@ func (m *TokenReview) Size() (n int) { } func (m *TokenReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Token) @@ -366,6 +550,9 @@ func (m *TokenReviewSpec) Size() (n int) { } func (m *TokenReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -383,6 +570,9 @@ func (m *TokenReviewStatus) Size() (n int) { } func (m *UserInfo) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Username) @@ -408,14 +598,7 @@ func (m *UserInfo) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -425,7 +608,7 @@ func (this *TokenReview) String() string { return "nil" } s := strings.Join([]string{`&TokenReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -502,7 +685,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -530,7 +713,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -540,6 +723,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -554,6 +740,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -581,7 +770,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -609,7 +798,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -618,6 +807,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -639,7 +831,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -648,6 +840,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -669,7 +864,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -678,6 +873,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -694,6 +892,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -721,7 +922,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -749,7 +950,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -759,6 +960,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -778,7 +982,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -788,6 +992,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -802,6 +1009,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -829,7 +1039,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -857,7 +1067,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -877,7 +1087,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -886,6 +1096,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -907,7 +1120,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -917,6 +1130,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -936,7 +1152,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -946,6 +1162,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -960,6 +1179,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -987,7 +1209,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1015,7 +1237,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1025,6 +1247,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1044,7 +1269,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1054,6 +1279,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1073,7 +1301,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1083,6 +1311,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1102,7 +1333,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1111,6 +1342,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1131,7 +1365,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1148,7 +1382,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1158,6 +1392,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -1174,7 +1411,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1183,7 +1420,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -1220,6 +1457,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1286,10 +1526,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1318,6 +1561,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1336,53 +1582,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 663 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcd, 0x4e, 0x14, 0x4d, - 0x14, 0xed, 0x9e, 0x1f, 0xbe, 0x99, 0x9a, 0x6f, 0x14, 0x2b, 0x31, 0x99, 0x4c, 0x62, 0x0f, 0x8e, - 0x1b, 0x12, 0xa4, 0x5a, 0x08, 0x41, 0x82, 0x2b, 0x5a, 0x89, 0xc1, 0x84, 0x98, 0x94, 0xe0, 0x42, - 0x5d, 0x58, 0xd3, 0x73, 0xe9, 0x69, 0xc7, 0xfe, 0x49, 0x55, 0xf5, 0x28, 0x3b, 0x1e, 0xc1, 0xa5, - 0x4b, 0x13, 0x9f, 0xc4, 0x1d, 0x4b, 0x96, 0x2c, 0xcc, 0x44, 0xda, 0x27, 0xf0, 0x0d, 0x4c, 0x55, - 0x17, 0xcc, 0x00, 0x31, 0xc0, 0xae, 0xeb, 0xdc, 0x7b, 0xce, 0x3d, 0xf7, 0x54, 0x17, 0x7a, 0x31, - 0x5c, 0x13, 0x24, 0x4c, 0xdc, 0x61, 0xd6, 0x03, 0x1e, 0x83, 0x04, 0xe1, 0x8e, 0x20, 0xee, 0x27, - 0xdc, 0x35, 0x05, 0x96, 0x86, 0x2e, 0xcb, 0xe4, 0x00, 0x62, 0x19, 0xfa, 0x4c, 0x86, 0x49, 0xec, - 0x8e, 0x96, 0x7a, 0x20, 0xd9, 0x92, 0x1b, 0x40, 0x0c, 0x9c, 0x49, 0xe8, 0x93, 0x94, 0x27, 0x32, - 0xc1, 0xf7, 0x0b, 0x0a, 0x61, 0x69, 0x48, 0xce, 0x53, 0x88, 0xa1, 0xb4, 0x17, 0x83, 0x50, 0x0e, - 0xb2, 0x1e, 0xf1, 0x93, 0xc8, 0x0d, 0x92, 0x20, 0x71, 0x35, 0xb3, 0x97, 0xed, 0xe9, 0x93, 0x3e, - 0xe8, 0xaf, 0x42, 0xb1, 0xbd, 0x32, 0x31, 0x11, 0x31, 0x7f, 0x10, 0xc6, 0xc0, 0xf7, 0xdd, 0x74, - 0x18, 0x28, 0x40, 0xb8, 0x11, 0x48, 0xe6, 0x8e, 0x2e, 0xf9, 0x68, 0xbb, 0xff, 0x62, 0xf1, 0x2c, - 0x96, 0x61, 0x04, 0x97, 0x08, 0xab, 0x57, 0x11, 0x84, 0x3f, 0x80, 0x88, 0x5d, 0xe4, 0x75, 0x1f, - 0x23, 0xb4, 0xf9, 0x59, 0x72, 0xf6, 0x9a, 0x7d, 0xcc, 0x00, 0x77, 0x50, 0x35, 0x94, 0x10, 0x89, - 0x96, 0x3d, 0x57, 0x9e, 0xaf, 0x7b, 0xf5, 0x7c, 0xdc, 0xa9, 0x6e, 0x29, 0x80, 0x16, 0xf8, 0x7a, - 0xed, 0xeb, 0xb7, 0x8e, 0x75, 0xf0, 0x73, 0xce, 0xea, 0x7e, 0x2f, 0xa1, 0xc6, 0x4e, 0x32, 0x84, - 0x98, 0xc2, 0x28, 0x84, 0x4f, 0xf8, 0x3d, 0xaa, 0xa9, 0x65, 0xfa, 0x4c, 0xb2, 0x96, 0x3d, 0x67, - 0xcf, 0x37, 0x96, 0x1f, 0x91, 0x49, 0x98, 0x67, 0x9e, 0x48, 0x3a, 0x0c, 0x14, 0x20, 0x88, 0xea, - 0x26, 0xa3, 0x25, 0xf2, 0xb2, 0xf7, 0x01, 0x7c, 0xb9, 0x0d, 0x92, 0x79, 0xf8, 0x70, 0xdc, 0xb1, - 0xf2, 0x71, 0x07, 0x4d, 0x30, 0x7a, 0xa6, 0x8a, 0x77, 0x50, 0x45, 0xa4, 0xe0, 0xb7, 0x4a, 0x5a, - 0x7d, 0x99, 0x5c, 0x79, 0x55, 0x64, 0xca, 0xdf, 0xab, 0x14, 0x7c, 0xef, 0x7f, 0xa3, 0x5f, 0x51, - 0x27, 0xaa, 0xd5, 0xf0, 0x3b, 0x34, 0x23, 0x24, 0x93, 0x99, 0x68, 0x95, 0xb5, 0xee, 0xca, 0x0d, - 0x75, 0x35, 0xd7, 0xbb, 0x65, 0x94, 0x67, 0x8a, 0x33, 0x35, 0x9a, 0x5d, 0x1f, 0xdd, 0xbe, 0x60, - 0x02, 0x3f, 0x40, 0x55, 0xa9, 0x20, 0x9d, 0x52, 0xdd, 0x6b, 0x1a, 0x66, 0xb5, 0xe8, 0x2b, 0x6a, - 0x78, 0x01, 0xd5, 0x59, 0xd6, 0x0f, 0x21, 0xf6, 0x41, 0xb4, 0x4a, 0xfa, 0x32, 0x9a, 0xf9, 0xb8, - 0x53, 0xdf, 0x38, 0x05, 0xe9, 0xa4, 0xde, 0xfd, 0x63, 0xa3, 0x3b, 0x97, 0x2c, 0xe1, 0x27, 0xa8, - 0x39, 0x65, 0x1f, 0xfa, 0x7a, 0x5e, 0xcd, 0xbb, 0x6b, 0xe6, 0x35, 0x37, 0xa6, 0x8b, 0xf4, 0x7c, - 0x2f, 0xde, 0x46, 0x95, 0x4c, 0x00, 0x37, 0x59, 0x2f, 0x5c, 0x23, 0x93, 0x5d, 0x01, 0x7c, 0x2b, - 0xde, 0x4b, 0x26, 0x21, 0x2b, 0x84, 0x6a, 0x19, 0xb5, 0x33, 0x70, 0x9e, 0x70, 0x9d, 0xf1, 0xd4, - 0xce, 0x9b, 0x0a, 0xa4, 0x45, 0xed, 0xfc, 0xce, 0x95, 0x2b, 0x76, 0xfe, 0x51, 0x42, 0xb5, 0xd3, - 0x91, 0xf8, 0x21, 0xaa, 0xa9, 0x31, 0x31, 0x8b, 0xc0, 0xa4, 0x3a, 0x6b, 0x26, 0xe8, 0x1e, 0x85, - 0xd3, 0xb3, 0x0e, 0x7c, 0x0f, 0x95, 0xb3, 0xb0, 0xaf, 0x57, 0xab, 0x7b, 0x0d, 0xd3, 0x58, 0xde, - 0xdd, 0x7a, 0x46, 0x15, 0x8e, 0xbb, 0x68, 0x26, 0xe0, 0x49, 0x96, 0xaa, 0x1f, 0x42, 0x79, 0x40, - 0xea, 0x5a, 0x9f, 0x6b, 0x84, 0x9a, 0x0a, 0x7e, 0x8b, 0xaa, 0xa0, 0x5e, 0x8d, 0xb6, 0xd9, 0x58, - 0x5e, 0xbd, 0x41, 0x3e, 0x44, 0x3f, 0xb7, 0xcd, 0x58, 0xf2, 0xfd, 0xa9, 0x1c, 0x14, 0x46, 0x0b, - 0xcd, 0x76, 0x60, 0x9e, 0xa4, 0xee, 0xc1, 0xb3, 0xa8, 0x3c, 0x84, 0xfd, 0x62, 0x2d, 0xaa, 0x3e, - 0xf1, 0x53, 0x54, 0x1d, 0xa9, 0xd7, 0x6a, 0x2e, 0x67, 0xf1, 0x1a, 0xc3, 0x27, 0x4f, 0x9c, 0x16, - 0xdc, 0xf5, 0xd2, 0x9a, 0xed, 0x2d, 0x1e, 0x9e, 0x38, 0xd6, 0xd1, 0x89, 0x63, 0x1d, 0x9f, 0x38, - 0xd6, 0x41, 0xee, 0xd8, 0x87, 0xb9, 0x63, 0x1f, 0xe5, 0x8e, 0x7d, 0x9c, 0x3b, 0xf6, 0xaf, 0xdc, - 0xb1, 0xbf, 0xfc, 0x76, 0xac, 0x37, 0xff, 0x19, 0x91, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf7, - 0xd6, 0x32, 0x28, 0x68, 0x05, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/authorization/v1/generated.pb.go b/vendor/k8s.io/api/authorization/v1/generated.pb.go index fc6a25f62..0dc01bc92 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1/generated.pb.go @@ -17,40 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto - - It has these top-level messages: - ExtraValue - LocalSubjectAccessReview - NonResourceAttributes - NonResourceRule - ResourceAttributes - ResourceRule - SelfSubjectAccessReview - SelfSubjectAccessReviewSpec - SelfSubjectRulesReview - SelfSubjectRulesReviewSpec - SubjectAccessReview - SubjectAccessReviewSpec - SubjectAccessReviewStatus - SubjectRulesReviewStatus -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -63,73 +44,397 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{0} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } func (*LocalSubjectAccessReview) ProtoMessage() {} func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptor_e50da13573e369bd, []int{1} +} +func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalSubjectAccessReview.Merge(m, src) +} +func (m *LocalSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *LocalSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_LocalSubjectAccessReview.DiscardUnknown(m) } -func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } -func (*NonResourceAttributes) ProtoMessage() {} -func (*NonResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo -func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } -func (*NonResourceRule) ProtoMessage() {} -func (*NonResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } +func (*NonResourceAttributes) ProtoMessage() {} +func (*NonResourceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{2} +} +func (m *NonResourceAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourceAttributes.Merge(m, src) +} +func (m *NonResourceAttributes) XXX_Size() int { + return m.Size() +} +func (m *NonResourceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourceAttributes.DiscardUnknown(m) +} -func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } -func (*ResourceAttributes) ProtoMessage() {} -func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_NonResourceAttributes proto.InternalMessageInfo -func (m *ResourceRule) Reset() { *m = ResourceRule{} } -func (*ResourceRule) ProtoMessage() {} -func (*ResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } +func (*NonResourceRule) ProtoMessage() {} +func (*NonResourceRule) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{3} +} +func (m *NonResourceRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourceRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourceRule.Merge(m, src) +} +func (m *NonResourceRule) XXX_Size() int { + return m.Size() +} +func (m *NonResourceRule) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourceRule.DiscardUnknown(m) +} -func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } -func (*SelfSubjectAccessReview) ProtoMessage() {} -func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +var xxx_messageInfo_NonResourceRule proto.InternalMessageInfo + +func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } +func (*ResourceAttributes) ProtoMessage() {} +func (*ResourceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{4} +} +func (m *ResourceAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAttributes.Merge(m, src) +} +func (m *ResourceAttributes) XXX_Size() int { + return m.Size() +} +func (m *ResourceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAttributes proto.InternalMessageInfo + +func (m *ResourceRule) Reset() { *m = ResourceRule{} } +func (*ResourceRule) ProtoMessage() {} +func (*ResourceRule) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{5} +} +func (m *ResourceRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceRule.Merge(m, src) +} +func (m *ResourceRule) XXX_Size() int { + return m.Size() +} +func (m *ResourceRule) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceRule proto.InternalMessageInfo + +func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } +func (*SelfSubjectAccessReview) ProtoMessage() {} +func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{6} +} +func (m *SelfSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectAccessReview.Merge(m, src) +} +func (m *SelfSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectAccessReview proto.InternalMessageInfo func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_e50da13573e369bd, []int{7} +} +func (m *SelfSubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectAccessReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectAccessReviewSpec.Merge(m, src) +} +func (m *SelfSubjectAccessReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectAccessReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectAccessReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectAccessReviewSpec proto.InternalMessageInfo + +func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } +func (*SelfSubjectRulesReview) ProtoMessage() {} +func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{8} +} +func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReview.Merge(m, src) +} +func (m *SelfSubjectRulesReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReview.DiscardUnknown(m) } -func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } -func (*SelfSubjectRulesReview) ProtoMessage() {} -func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} + return fileDescriptor_e50da13573e369bd, []int{9} +} +func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReviewSpec.Merge(m, src) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReviewSpec.DiscardUnknown(m) } -func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } -func (*SubjectAccessReview) ProtoMessage() {} -func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo + +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{10} +} +func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReview.Merge(m, src) +} +func (m *SubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } func (*SubjectAccessReviewSpec) ProtoMessage() {} func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} + return fileDescriptor_e50da13573e369bd, []int{11} +} +func (m *SubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewSpec.Merge(m, src) +} +func (m *SubjectAccessReviewSpec) XXX_Size() int { + return m.Size() } +func (m *SubjectAccessReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewSpec proto.InternalMessageInfo func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } func (*SubjectAccessReviewStatus) ProtoMessage() {} func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{12} + return fileDescriptor_e50da13573e369bd, []int{12} +} +func (m *SubjectAccessReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewStatus.Merge(m, src) +} +func (m *SubjectAccessReviewStatus) XXX_Size() int { + return m.Size() } +func (m *SubjectAccessReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewStatus proto.InternalMessageInfo func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } func (*SubjectRulesReviewStatus) ProtoMessage() {} func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} + return fileDescriptor_e50da13573e369bd, []int{13} +} +func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *SubjectRulesReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectRulesReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectRulesReviewStatus.Merge(m, src) +} +func (m *SubjectRulesReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SubjectRulesReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectRulesReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1.ExtraValue") @@ -144,13 +449,95 @@ func init() { proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "k8s.io.api.authorization.v1.SelfSubjectRulesReviewSpec") proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.api.authorization.v1.SubjectAccessReview") proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewSpec") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewSpec.ExtraEntry") proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewStatus") proto.RegisterType((*SubjectRulesReviewStatus)(nil), "k8s.io.api.authorization.v1.SubjectRulesReviewStatus") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto", fileDescriptor_e50da13573e369bd) +} + +var fileDescriptor_e50da13573e369bd = []byte{ + // 1140 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xcf, 0x6f, 0x1b, 0xc5, + 0x17, 0xf7, 0xae, 0xed, 0xc4, 0x1e, 0x37, 0xdf, 0xa4, 0x13, 0xa5, 0xd9, 0xa6, 0xfa, 0xda, 0xd1, + 0x22, 0x41, 0x2a, 0xca, 0x2e, 0xb1, 0xda, 0x26, 0xaa, 0x54, 0xa1, 0x58, 0x89, 0x50, 0xa4, 0xb6, + 0x54, 0x13, 0x25, 0x12, 0x45, 0x20, 0xc6, 0xeb, 0x89, 0xbd, 0xc4, 0xde, 0x5d, 0x66, 0x66, 0x1d, + 0xc2, 0xa9, 0x12, 0xff, 0x00, 0x47, 0x0e, 0x1c, 0xf8, 0x0f, 0xb8, 0x20, 0x71, 0xe3, 0xc0, 0x01, + 0xe5, 0xd8, 0x63, 0x91, 0x90, 0x45, 0x96, 0x33, 0xff, 0x03, 0x9a, 0xd9, 0xb1, 0x77, 0x9d, 0xac, + 0xdd, 0x84, 0x03, 0xbd, 0xf4, 0xb6, 0xfb, 0x3e, 0x9f, 0xf7, 0xe6, 0xcd, 0xfb, 0x35, 0x0f, 0x6c, + 0x1f, 0x6d, 0x32, 0xcb, 0xf5, 0xed, 0xa3, 0xb0, 0x49, 0xa8, 0x47, 0x38, 0x61, 0x76, 0x9f, 0x78, + 0x2d, 0x9f, 0xda, 0x0a, 0xc0, 0x81, 0x6b, 0xe3, 0x90, 0x77, 0x7c, 0xea, 0x7e, 0x8d, 0xb9, 0xeb, + 0x7b, 0x76, 0x7f, 0xdd, 0x6e, 0x13, 0x8f, 0x50, 0xcc, 0x49, 0xcb, 0x0a, 0xa8, 0xcf, 0x7d, 0x78, + 0x2b, 0x26, 0x5b, 0x38, 0x70, 0xad, 0x31, 0xb2, 0xd5, 0x5f, 0x5f, 0x79, 0xaf, 0xed, 0xf2, 0x4e, + 0xd8, 0xb4, 0x1c, 0xbf, 0x67, 0xb7, 0xfd, 0xb6, 0x6f, 0x4b, 0x9d, 0x66, 0x78, 0x28, 0xff, 0xe4, + 0x8f, 0xfc, 0x8a, 0x6d, 0xad, 0xdc, 0x4d, 0x0e, 0xee, 0x61, 0xa7, 0xe3, 0x7a, 0x84, 0x9e, 0xd8, + 0xc1, 0x51, 0x5b, 0x08, 0x98, 0xdd, 0x23, 0x1c, 0x67, 0x78, 0xb0, 0x62, 0x4f, 0xd2, 0xa2, 0xa1, + 0xc7, 0xdd, 0x1e, 0xb9, 0xa0, 0x70, 0xff, 0x55, 0x0a, 0xcc, 0xe9, 0x90, 0x1e, 0x3e, 0xaf, 0x67, + 0x6e, 0x00, 0xb0, 0xf3, 0x15, 0xa7, 0xf8, 0x00, 0x77, 0x43, 0x02, 0x6b, 0xa0, 0xe8, 0x72, 0xd2, + 0x63, 0x86, 0xb6, 0x9a, 0x5f, 0x2b, 0x37, 0xca, 0xd1, 0xa0, 0x56, 0xdc, 0x15, 0x02, 0x14, 0xcb, + 0x1f, 0x94, 0xbe, 0xfb, 0xa1, 0x96, 0x7b, 0xfe, 0xc7, 0x6a, 0xce, 0xfc, 0x49, 0x07, 0xc6, 0x23, + 0xdf, 0xc1, 0xdd, 0xbd, 0xb0, 0xf9, 0x05, 0x71, 0xf8, 0x96, 0xe3, 0x10, 0xc6, 0x10, 0xe9, 0xbb, + 0xe4, 0x18, 0x7e, 0x0e, 0x4a, 0xe2, 0x66, 0x2d, 0xcc, 0xb1, 0xa1, 0xad, 0x6a, 0x6b, 0x95, 0xfa, + 0xfb, 0x56, 0x12, 0xd3, 0x91, 0x83, 0x56, 0x70, 0xd4, 0x16, 0x02, 0x66, 0x09, 0xb6, 0xd5, 0x5f, + 0xb7, 0x3e, 0x92, 0xb6, 0x1e, 0x13, 0x8e, 0x1b, 0xf0, 0x74, 0x50, 0xcb, 0x45, 0x83, 0x1a, 0x48, + 0x64, 0x68, 0x64, 0x15, 0x1e, 0x80, 0x02, 0x0b, 0x88, 0x63, 0xe8, 0xd2, 0xfa, 0x5d, 0x6b, 0x4a, + 0xc6, 0xac, 0x0c, 0x0f, 0xf7, 0x02, 0xe2, 0x34, 0xae, 0xa9, 0x13, 0x0a, 0xe2, 0x0f, 0x49, 0x7b, + 0xf0, 0x33, 0x30, 0xc3, 0x38, 0xe6, 0x21, 0x33, 0xf2, 0xd2, 0xf2, 0xfd, 0x2b, 0x5b, 0x96, 0xda, + 0x8d, 0xff, 0x29, 0xdb, 0x33, 0xf1, 0x3f, 0x52, 0x56, 0xcd, 0x4f, 0xc0, 0xd2, 0x13, 0xdf, 0x43, + 0x84, 0xf9, 0x21, 0x75, 0xc8, 0x16, 0xe7, 0xd4, 0x6d, 0x86, 0x9c, 0x30, 0xb8, 0x0a, 0x0a, 0x01, + 0xe6, 0x1d, 0x19, 0xae, 0x72, 0xe2, 0xda, 0x53, 0xcc, 0x3b, 0x48, 0x22, 0x82, 0xd1, 0x27, 0xb4, + 0x29, 0xaf, 0x9c, 0x62, 0x1c, 0x10, 0xda, 0x44, 0x12, 0x31, 0xbf, 0x04, 0xf3, 0x29, 0xe3, 0x28, + 0xec, 0xca, 0x8c, 0x0a, 0x68, 0x2c, 0xa3, 0x42, 0x83, 0xa1, 0x58, 0x0e, 0x1f, 0x82, 0x79, 0x2f, + 0xd1, 0xd9, 0x47, 0x8f, 0x98, 0xa1, 0x4b, 0xea, 0x62, 0x34, 0xa8, 0xa5, 0xcd, 0x09, 0x08, 0x9d, + 0xe7, 0x9a, 0xbf, 0xe8, 0x00, 0x66, 0xdc, 0xc6, 0x06, 0x65, 0x0f, 0xf7, 0x08, 0x0b, 0xb0, 0x43, + 0xd4, 0x95, 0xae, 0x2b, 0x87, 0xcb, 0x4f, 0x86, 0x00, 0x4a, 0x38, 0xaf, 0xbe, 0x1c, 0x7c, 0x0b, + 0x14, 0xdb, 0xd4, 0x0f, 0x03, 0x99, 0x98, 0x72, 0x63, 0x4e, 0x51, 0x8a, 0x1f, 0x0a, 0x21, 0x8a, + 0x31, 0x78, 0x1b, 0xcc, 0xf6, 0x09, 0x65, 0xae, 0xef, 0x19, 0x05, 0x49, 0x9b, 0x57, 0xb4, 0xd9, + 0x83, 0x58, 0x8c, 0x86, 0x38, 0xbc, 0x03, 0x4a, 0x54, 0x39, 0x6e, 0x14, 0x25, 0x77, 0x41, 0x71, + 0x4b, 0xa3, 0x08, 0x8e, 0x18, 0xf0, 0x1e, 0xa8, 0xb0, 0xb0, 0x39, 0x52, 0x98, 0x91, 0x0a, 0x8b, + 0x4a, 0xa1, 0xb2, 0x97, 0x40, 0x28, 0xcd, 0x13, 0xd7, 0x12, 0x77, 0x34, 0x66, 0xc7, 0xaf, 0x25, + 0x42, 0x80, 0x24, 0x62, 0xfe, 0xaa, 0x81, 0x6b, 0x57, 0xcb, 0xd8, 0xbb, 0xa0, 0x8c, 0x03, 0x57, + 0x5e, 0x7b, 0x98, 0xab, 0x39, 0x11, 0xd7, 0xad, 0xa7, 0xbb, 0xb1, 0x10, 0x25, 0xb8, 0x20, 0x0f, + 0x9d, 0x11, 0x25, 0x3d, 0x22, 0x0f, 0x8f, 0x64, 0x28, 0xc1, 0xe1, 0x06, 0x98, 0x1b, 0xfe, 0xc8, + 0x24, 0x19, 0x05, 0xa9, 0x70, 0x3d, 0x1a, 0xd4, 0xe6, 0x50, 0x1a, 0x40, 0xe3, 0x3c, 0xf3, 0x67, + 0x1d, 0x2c, 0xef, 0x91, 0xee, 0xe1, 0xeb, 0x99, 0x05, 0xcf, 0xc6, 0x66, 0xc1, 0xe6, 0xf4, 0x8e, + 0xcd, 0xf6, 0xf2, 0xb5, 0xcd, 0x83, 0xef, 0x75, 0x70, 0x6b, 0x8a, 0x4f, 0xf0, 0x18, 0x40, 0x7a, + 0xa1, 0xbd, 0x54, 0x1c, 0xed, 0xa9, 0xbe, 0x5c, 0xec, 0xca, 0xc6, 0x8d, 0x68, 0x50, 0xcb, 0xe8, + 0x56, 0x94, 0x71, 0x04, 0xfc, 0x46, 0x03, 0x4b, 0x5e, 0xd6, 0xa4, 0x52, 0x61, 0xae, 0x4f, 0x3d, + 0x3c, 0x73, 0xc6, 0x35, 0x6e, 0x46, 0x83, 0x5a, 0xf6, 0xf8, 0x43, 0xd9, 0x67, 0x89, 0x57, 0xe6, + 0x46, 0x2a, 0x3c, 0xa2, 0x41, 0xfe, 0xbb, 0xba, 0xfa, 0x78, 0xac, 0xae, 0x36, 0x2e, 0x5b, 0x57, + 0x29, 0x27, 0x27, 0x96, 0xd5, 0xa7, 0xe7, 0xca, 0xea, 0xde, 0x65, 0xca, 0x2a, 0x6d, 0x78, 0x7a, + 0x55, 0x3d, 0x06, 0x2b, 0x93, 0x1d, 0xba, 0xf2, 0x70, 0x36, 0x7f, 0xd4, 0xc1, 0xe2, 0x9b, 0x67, + 0xfe, 0x2a, 0x6d, 0xfd, 0x5b, 0x01, 0x2c, 0xbf, 0x69, 0xe9, 0x49, 0x8b, 0x4e, 0xc8, 0x08, 0x55, + 0xcf, 0xf8, 0x28, 0x39, 0xfb, 0x8c, 0x50, 0x24, 0x11, 0x68, 0x82, 0x99, 0x76, 0xfc, 0xba, 0xc5, + 0xef, 0x0f, 0x10, 0x01, 0x56, 0x4f, 0x9b, 0x42, 0x60, 0x0b, 0x14, 0x89, 0xd8, 0x5b, 0x8d, 0xe2, + 0x6a, 0x7e, 0xad, 0x52, 0xff, 0xe0, 0xdf, 0x54, 0x86, 0x25, 0x37, 0xdf, 0x1d, 0x8f, 0xd3, 0x93, + 0x64, 0x9d, 0x90, 0x32, 0x14, 0x1b, 0x87, 0xff, 0x07, 0xf9, 0xd0, 0x6d, 0xa9, 0xd7, 0xbe, 0xa2, + 0x28, 0xf9, 0xfd, 0xdd, 0x6d, 0x24, 0xe4, 0x2b, 0x58, 0x2d, 0xcf, 0xd2, 0x04, 0x5c, 0x00, 0xf9, + 0x23, 0x72, 0x12, 0x37, 0x14, 0x12, 0x9f, 0xf0, 0x21, 0x28, 0xf6, 0xc5, 0x5e, 0xad, 0xe2, 0xfb, + 0xce, 0x54, 0x27, 0x93, 0x35, 0x1c, 0xc5, 0x5a, 0x0f, 0xf4, 0x4d, 0xcd, 0xfc, 0x5d, 0x03, 0x37, + 0x27, 0x96, 0x9f, 0x58, 0x77, 0x70, 0xb7, 0xeb, 0x1f, 0x93, 0x96, 0x3c, 0xb6, 0x94, 0xac, 0x3b, + 0x5b, 0xb1, 0x18, 0x0d, 0x71, 0xf8, 0x36, 0x98, 0x69, 0x11, 0xcf, 0x25, 0x2d, 0xb9, 0x18, 0x95, + 0x92, 0xca, 0xdd, 0x96, 0x52, 0xa4, 0x50, 0xc1, 0xa3, 0x04, 0x33, 0xdf, 0x53, 0xab, 0xd8, 0x88, + 0x87, 0xa4, 0x14, 0x29, 0x14, 0x6e, 0x81, 0x79, 0x22, 0xdc, 0x94, 0xfe, 0xef, 0x50, 0xea, 0x0f, + 0x33, 0xba, 0xac, 0x14, 0xe6, 0x77, 0xc6, 0x61, 0x74, 0x9e, 0x6f, 0xfe, 0xad, 0x03, 0x63, 0xd2, + 0x68, 0x83, 0x87, 0xc9, 0x2e, 0x22, 0x41, 0xb9, 0x0e, 0x55, 0xea, 0xb7, 0x2f, 0xd5, 0x20, 0x42, + 0xa3, 0xb1, 0xa4, 0x1c, 0x99, 0x4b, 0x4b, 0x53, 0xab, 0x8b, 0xfc, 0x85, 0x14, 0x2c, 0x78, 0xe3, + 0x3b, 0x73, 0xbc, 0x54, 0x55, 0xea, 0x77, 0x2e, 0xdb, 0x0e, 0xf2, 0x34, 0x43, 0x9d, 0xb6, 0x70, + 0x0e, 0x60, 0xe8, 0x82, 0x7d, 0x58, 0x07, 0xc0, 0xf5, 0x1c, 0xbf, 0x17, 0x74, 0x09, 0x27, 0x32, + 0x6c, 0xa5, 0x64, 0x0e, 0xee, 0x8e, 0x10, 0x94, 0x62, 0x65, 0xc5, 0xbb, 0x70, 0xb5, 0x78, 0x37, + 0xd6, 0x4e, 0xcf, 0xaa, 0xb9, 0x17, 0x67, 0xd5, 0xdc, 0xcb, 0xb3, 0x6a, 0xee, 0x79, 0x54, 0xd5, + 0x4e, 0xa3, 0xaa, 0xf6, 0x22, 0xaa, 0x6a, 0x2f, 0xa3, 0xaa, 0xf6, 0x67, 0x54, 0xd5, 0xbe, 0xfd, + 0xab, 0x9a, 0x7b, 0xa6, 0xf7, 0xd7, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x99, 0x87, 0xb8, 0x24, + 0x47, 0x0f, 0x00, 0x00, +} + func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -158,32 +545,31 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -191,41 +577,52 @@ func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *LocalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -233,25 +630,32 @@ func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { } func (m *NonResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i += copy(dAtA[i:], m.Verb) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -259,47 +663,40 @@ func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { } func (m *NonResourceRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -307,45 +704,57 @@ func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { } func (m *ResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i += copy(dAtA[i:], m.Verb) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) - i += copy(dAtA[i:], m.Subresource) - dAtA[i] = 0x3a - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0x3a + i -= len(m.Subresource) + copy(dAtA[i:], m.Subresource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i-- + dAtA[i] = 0x32 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x2a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x1a + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -353,77 +762,58 @@ func (m *ResourceRule) Marshal() (dAtA []byte, err error) { } func (m *ResourceRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Resources) > 0 { - for _, s := range m.Resources { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -431,41 +821,52 @@ func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -473,37 +874,46 @@ func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ResourceAttributes != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n7, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } if m.NonResourceAttributes != nil { + { + size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n8, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.ResourceAttributes != nil { + { + size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -511,41 +921,52 @@ func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n11, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -553,21 +974,27 @@ func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -575,41 +1002,52 @@ func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n13, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n13 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n14, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -617,91 +1055,94 @@ func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ResourceAttributes != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n15, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x32 + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) } - i += n15 - } - if m.NonResourceAttributes != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n16, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a } - i += n16 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) if len(m.Groups) > 0 { - for _, s := range m.Groups { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.Extra) > 0 { - keysForExtra := make([]string, 0, len(m.Extra)) - for k := range m.Extra { - keysForExtra = append(keysForExtra, string(k)) + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + if m.NonResourceAttributes != nil { + { + size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x2a - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n17, err := (&v).MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x12 + } + if m.ResourceAttributes != nil { + { + size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n17 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - return i, nil + return len(dAtA) - i, nil } func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -709,41 +1150,48 @@ func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Allowed { + i-- + if m.Denied { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i += copy(dAtA[i:], m.EvaluationError) + i-- dAtA[i] = 0x20 - i++ - if m.Denied { + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x1a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + i-- + if m.Allowed { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -751,59 +1199,74 @@ func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectRulesReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.ResourceRules) > 0 { - for _, msg := range m.ResourceRules { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x22 + i-- + if m.Incomplete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x18 if len(m.NonResourceRules) > 0 { - for _, msg := range m.NonResourceRules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.NonResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NonResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x18 - i++ - if m.Incomplete { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i += copy(dAtA[i:], m.EvaluationError) - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -816,6 +1279,9 @@ func (m ExtraValue) Size() (n int) { } func (m *LocalSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -828,6 +1294,9 @@ func (m *LocalSubjectAccessReview) Size() (n int) { } func (m *NonResourceAttributes) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Path) @@ -838,6 +1307,9 @@ func (m *NonResourceAttributes) Size() (n int) { } func (m *NonResourceRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -856,6 +1328,9 @@ func (m *NonResourceRule) Size() (n int) { } func (m *ResourceAttributes) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -876,6 +1351,9 @@ func (m *ResourceAttributes) Size() (n int) { } func (m *ResourceRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -906,6 +1384,9 @@ func (m *ResourceRule) Size() (n int) { } func (m *SelfSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -918,6 +1399,9 @@ func (m *SelfSubjectAccessReview) Size() (n int) { } func (m *SelfSubjectAccessReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ResourceAttributes != nil { @@ -932,6 +1416,9 @@ func (m *SelfSubjectAccessReviewSpec) Size() (n int) { } func (m *SelfSubjectRulesReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -944,6 +1431,9 @@ func (m *SelfSubjectRulesReview) Size() (n int) { } func (m *SelfSubjectRulesReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -952,6 +1442,9 @@ func (m *SelfSubjectRulesReviewSpec) Size() (n int) { } func (m *SubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -964,6 +1457,9 @@ func (m *SubjectAccessReview) Size() (n int) { } func (m *SubjectAccessReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ResourceAttributes != nil { @@ -997,6 +1493,9 @@ func (m *SubjectAccessReviewSpec) Size() (n int) { } func (m *SubjectAccessReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -1009,6 +1508,9 @@ func (m *SubjectAccessReviewStatus) Size() (n int) { } func (m *SubjectRulesReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.ResourceRules) > 0 { @@ -1030,14 +1532,7 @@ func (m *SubjectRulesReviewStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1047,7 +1542,7 @@ func (this *LocalSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&LocalSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1110,7 +1605,7 @@ func (this *SelfSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1122,8 +1617,8 @@ func (this *SelfSubjectAccessReviewSpec) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, `}`, }, "") return s @@ -1133,7 +1628,7 @@ func (this *SelfSubjectRulesReview) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectRulesReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectRulesReviewSpec", "SelfSubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1155,7 +1650,7 @@ func (this *SubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1177,8 +1672,8 @@ func (this *SubjectAccessReviewSpec) String() string { } mapStringForExtra += "}" s := strings.Join([]string{`&SubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, `User:` + fmt.Sprintf("%v", this.User) + `,`, `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, `Extra:` + mapStringForExtra + `,`, @@ -1204,9 +1699,19 @@ func (this *SubjectRulesReviewStatus) String() string { if this == nil { return "nil" } + repeatedStringForResourceRules := "[]ResourceRule{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForNonResourceRules := "[]NonResourceRule{" + for _, f := range this.NonResourceRules { + repeatedStringForNonResourceRules += strings.Replace(strings.Replace(f.String(), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + "," + } + repeatedStringForNonResourceRules += "}" s := strings.Join([]string{`&SubjectRulesReviewStatus{`, - `ResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceRules), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + `,`, - `NonResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NonResourceRules), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `NonResourceRules:` + repeatedStringForNonResourceRules + `,`, `Incomplete:` + fmt.Sprintf("%v", this.Incomplete) + `,`, `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, `}`, @@ -1236,7 +1741,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1264,7 +1769,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1274,6 +1779,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1288,6 +1796,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1315,7 +1826,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1343,7 +1854,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1352,6 +1863,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1373,7 +1887,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1382,6 +1896,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1403,7 +1920,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1412,6 +1929,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1428,6 +1948,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1455,7 +1978,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1483,7 +2006,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1493,6 +2016,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1512,7 +2038,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1522,6 +2048,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1536,6 +2065,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1563,7 +2095,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1591,7 +2123,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1601,6 +2133,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1620,7 +2155,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1630,6 +2165,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1644,6 +2182,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1671,7 +2212,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1699,7 +2240,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1709,6 +2250,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1728,7 +2272,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1738,6 +2282,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1757,7 +2304,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1767,6 +2314,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1786,7 +2336,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1796,6 +2346,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1815,7 +2368,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1825,6 +2378,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1844,7 +2400,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1854,6 +2410,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1873,7 +2432,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1883,6 +2442,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1897,6 +2459,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1924,7 +2489,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1952,7 +2517,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1962,6 +2527,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1981,7 +2549,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1991,6 +2559,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2010,7 +2581,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2020,6 +2591,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2039,7 +2613,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2049,6 +2623,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2063,6 +2640,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2090,7 +2670,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2118,7 +2698,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2127,6 +2707,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2148,7 +2731,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2157,6 +2740,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2178,7 +2764,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2187,6 +2773,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2203,6 +2792,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2230,7 +2822,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2258,7 +2850,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2267,6 +2859,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2291,7 +2886,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2300,6 +2895,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2319,6 +2917,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2346,7 +2947,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2374,7 +2975,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2383,6 +2984,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2404,7 +3008,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2413,6 +3017,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2434,7 +3041,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2443,6 +3050,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2459,6 +3069,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2486,7 +3099,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2514,7 +3127,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2524,6 +3137,9 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2538,6 +3154,9 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2565,7 +3184,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2593,7 +3212,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2602,6 +3221,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2623,7 +3245,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2632,6 +3254,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2653,7 +3278,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2662,6 +3287,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2678,6 +3306,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2705,7 +3336,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2733,7 +3364,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2742,6 +3373,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2766,7 +3400,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2775,6 +3409,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2799,7 +3436,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2809,6 +3446,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2828,7 +3468,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2838,6 +3478,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2857,7 +3500,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2866,6 +3509,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2886,7 +3532,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2903,7 +3549,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2913,6 +3559,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -2929,7 +3578,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2938,7 +3587,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -2980,7 +3629,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2990,6 +3639,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3004,6 +3656,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3031,7 +3686,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3059,7 +3714,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3079,7 +3734,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3089,6 +3744,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3108,7 +3766,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3118,6 +3776,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3137,7 +3798,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3152,6 +3813,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3179,7 +3843,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3207,7 +3871,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3216,6 +3880,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3238,7 +3905,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3247,6 +3914,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3269,7 +3939,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3289,7 +3959,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3299,6 +3969,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3313,6 +3986,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3379,10 +4055,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -3411,6 +4090,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -3429,83 +4111,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1140 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4d, 0x6f, 0x1b, 0xc5, - 0x1b, 0xf7, 0xae, 0xed, 0xc4, 0x1e, 0x37, 0xff, 0xa4, 0x13, 0xa5, 0xd9, 0xa6, 0xfa, 0xdb, 0xd1, - 0x22, 0x41, 0x2a, 0xca, 0x2e, 0xb1, 0xda, 0x26, 0xaa, 0x54, 0xa1, 0x58, 0x89, 0x50, 0xa4, 0xb6, - 0x54, 0x13, 0x25, 0x12, 0x45, 0x20, 0xc6, 0xeb, 0x89, 0xbd, 0xc4, 0xde, 0x5d, 0x66, 0x66, 0x1d, - 0xc2, 0xa9, 0x12, 0x5f, 0x80, 0x23, 0x07, 0x0e, 0x7c, 0x03, 0x2e, 0x48, 0xdc, 0x38, 0x70, 0x40, - 0x39, 0xf6, 0x58, 0x24, 0x64, 0x91, 0xe5, 0xcc, 0x77, 0x40, 0x33, 0x3b, 0xf6, 0xae, 0x93, 0xb5, - 0x9b, 0x70, 0xa0, 0x97, 0xde, 0x76, 0x9f, 0xdf, 0xef, 0x79, 0x99, 0xe7, 0x65, 0xe6, 0x01, 0xdb, - 0x47, 0x9b, 0xcc, 0x72, 0x7d, 0xfb, 0x28, 0x6c, 0x12, 0xea, 0x11, 0x4e, 0x98, 0xdd, 0x27, 0x5e, - 0xcb, 0xa7, 0xb6, 0x02, 0x70, 0xe0, 0xda, 0x38, 0xe4, 0x1d, 0x9f, 0xba, 0x5f, 0x63, 0xee, 0xfa, - 0x9e, 0xdd, 0x5f, 0xb7, 0xdb, 0xc4, 0x23, 0x14, 0x73, 0xd2, 0xb2, 0x02, 0xea, 0x73, 0x1f, 0xde, - 0x8a, 0xc9, 0x16, 0x0e, 0x5c, 0x6b, 0x8c, 0x6c, 0xf5, 0xd7, 0x57, 0xde, 0x6b, 0xbb, 0xbc, 0x13, - 0x36, 0x2d, 0xc7, 0xef, 0xd9, 0x6d, 0xbf, 0xed, 0xdb, 0x52, 0xa7, 0x19, 0x1e, 0xca, 0x3f, 0xf9, - 0x23, 0xbf, 0x62, 0x5b, 0x2b, 0x77, 0x13, 0xc7, 0x3d, 0xec, 0x74, 0x5c, 0x8f, 0xd0, 0x13, 0x3b, - 0x38, 0x6a, 0x0b, 0x01, 0xb3, 0x7b, 0x84, 0xe3, 0x8c, 0x08, 0x56, 0xec, 0x49, 0x5a, 0x34, 0xf4, - 0xb8, 0xdb, 0x23, 0x17, 0x14, 0xee, 0xbf, 0x4a, 0x81, 0x39, 0x1d, 0xd2, 0xc3, 0xe7, 0xf5, 0xcc, - 0x0d, 0x00, 0x76, 0xbe, 0xe2, 0x14, 0x1f, 0xe0, 0x6e, 0x48, 0x60, 0x0d, 0x14, 0x5d, 0x4e, 0x7a, - 0xcc, 0xd0, 0x56, 0xf3, 0x6b, 0xe5, 0x46, 0x39, 0x1a, 0xd4, 0x8a, 0xbb, 0x42, 0x80, 0x62, 0xf9, - 0x83, 0xd2, 0x77, 0x3f, 0xd4, 0x72, 0xcf, 0xff, 0x58, 0xcd, 0x99, 0x3f, 0xe9, 0xc0, 0x78, 0xe4, - 0x3b, 0xb8, 0xbb, 0x17, 0x36, 0xbf, 0x20, 0x0e, 0xdf, 0x72, 0x1c, 0xc2, 0x18, 0x22, 0x7d, 0x97, - 0x1c, 0xc3, 0xcf, 0x41, 0x49, 0x9c, 0xac, 0x85, 0x39, 0x36, 0xb4, 0x55, 0x6d, 0xad, 0x52, 0x7f, - 0xdf, 0x4a, 0x72, 0x3a, 0x0a, 0xd0, 0x0a, 0x8e, 0xda, 0x42, 0xc0, 0x2c, 0xc1, 0xb6, 0xfa, 0xeb, - 0xd6, 0x47, 0xd2, 0xd6, 0x63, 0xc2, 0x71, 0x03, 0x9e, 0x0e, 0x6a, 0xb9, 0x68, 0x50, 0x03, 0x89, - 0x0c, 0x8d, 0xac, 0xc2, 0x03, 0x50, 0x60, 0x01, 0x71, 0x0c, 0x5d, 0x5a, 0xbf, 0x6b, 0x4d, 0xa9, - 0x98, 0x95, 0x11, 0xe1, 0x5e, 0x40, 0x9c, 0xc6, 0x35, 0xe5, 0xa1, 0x20, 0xfe, 0x90, 0xb4, 0x07, - 0x3f, 0x03, 0x33, 0x8c, 0x63, 0x1e, 0x32, 0x23, 0x2f, 0x2d, 0xdf, 0xbf, 0xb2, 0x65, 0xa9, 0xdd, - 0xf8, 0x9f, 0xb2, 0x3d, 0x13, 0xff, 0x23, 0x65, 0xd5, 0xfc, 0x04, 0x2c, 0x3d, 0xf1, 0x3d, 0x44, - 0x98, 0x1f, 0x52, 0x87, 0x6c, 0x71, 0x4e, 0xdd, 0x66, 0xc8, 0x09, 0x83, 0xab, 0xa0, 0x10, 0x60, - 0xde, 0x91, 0xe9, 0x2a, 0x27, 0xa1, 0x3d, 0xc5, 0xbc, 0x83, 0x24, 0x22, 0x18, 0x7d, 0x42, 0x9b, - 0xf2, 0xc8, 0x29, 0xc6, 0x01, 0xa1, 0x4d, 0x24, 0x11, 0xf3, 0x4b, 0x30, 0x9f, 0x32, 0x8e, 0xc2, - 0xae, 0xac, 0xa8, 0x80, 0xc6, 0x2a, 0x2a, 0x34, 0x18, 0x8a, 0xe5, 0xf0, 0x21, 0x98, 0xf7, 0x12, - 0x9d, 0x7d, 0xf4, 0x88, 0x19, 0xba, 0xa4, 0x2e, 0x46, 0x83, 0x5a, 0xda, 0x9c, 0x80, 0xd0, 0x79, - 0xae, 0xf9, 0x8b, 0x0e, 0x60, 0xc6, 0x69, 0x6c, 0x50, 0xf6, 0x70, 0x8f, 0xb0, 0x00, 0x3b, 0x44, - 0x1d, 0xe9, 0xba, 0x0a, 0xb8, 0xfc, 0x64, 0x08, 0xa0, 0x84, 0xf3, 0xea, 0xc3, 0xc1, 0xb7, 0x40, - 0xb1, 0x4d, 0xfd, 0x30, 0x90, 0x85, 0x29, 0x37, 0xe6, 0x14, 0xa5, 0xf8, 0xa1, 0x10, 0xa2, 0x18, - 0x83, 0xb7, 0xc1, 0x6c, 0x9f, 0x50, 0xe6, 0xfa, 0x9e, 0x51, 0x90, 0xb4, 0x79, 0x45, 0x9b, 0x3d, - 0x88, 0xc5, 0x68, 0x88, 0xc3, 0x3b, 0xa0, 0x44, 0x55, 0xe0, 0x46, 0x51, 0x72, 0x17, 0x14, 0xb7, - 0x34, 0xca, 0xe0, 0x88, 0x01, 0xef, 0x81, 0x0a, 0x0b, 0x9b, 0x23, 0x85, 0x19, 0xa9, 0xb0, 0xa8, - 0x14, 0x2a, 0x7b, 0x09, 0x84, 0xd2, 0x3c, 0x71, 0x2c, 0x71, 0x46, 0x63, 0x76, 0xfc, 0x58, 0x22, - 0x05, 0x48, 0x22, 0xe6, 0xaf, 0x1a, 0xb8, 0x76, 0xb5, 0x8a, 0xbd, 0x0b, 0xca, 0x38, 0x70, 0xe5, - 0xb1, 0x87, 0xb5, 0x9a, 0x13, 0x79, 0xdd, 0x7a, 0xba, 0x1b, 0x0b, 0x51, 0x82, 0x0b, 0xf2, 0x30, - 0x18, 0xd1, 0xd2, 0x23, 0xf2, 0xd0, 0x25, 0x43, 0x09, 0x0e, 0x37, 0xc0, 0xdc, 0xf0, 0x47, 0x16, - 0xc9, 0x28, 0x48, 0x85, 0xeb, 0xd1, 0xa0, 0x36, 0x87, 0xd2, 0x00, 0x1a, 0xe7, 0x99, 0x3f, 0xeb, - 0x60, 0x79, 0x8f, 0x74, 0x0f, 0x5f, 0xcf, 0x5d, 0xf0, 0x6c, 0xec, 0x2e, 0xd8, 0x9c, 0x3e, 0xb1, - 0xd9, 0x51, 0xbe, 0xb6, 0xfb, 0xe0, 0x7b, 0x1d, 0xdc, 0x9a, 0x12, 0x13, 0x3c, 0x06, 0x90, 0x5e, - 0x18, 0x2f, 0x95, 0x47, 0x7b, 0x6a, 0x2c, 0x17, 0xa7, 0xb2, 0x71, 0x23, 0x1a, 0xd4, 0x32, 0xa6, - 0x15, 0x65, 0xb8, 0x80, 0xdf, 0x68, 0x60, 0xc9, 0xcb, 0xba, 0xa9, 0x54, 0x9a, 0xeb, 0x53, 0x9d, - 0x67, 0xde, 0x71, 0x8d, 0x9b, 0xd1, 0xa0, 0x96, 0x7d, 0xfd, 0xa1, 0x6c, 0x5f, 0xe2, 0x95, 0xb9, - 0x91, 0x4a, 0x8f, 0x18, 0x90, 0xff, 0xae, 0xaf, 0x3e, 0x1e, 0xeb, 0xab, 0x8d, 0xcb, 0xf6, 0x55, - 0x2a, 0xc8, 0x89, 0x6d, 0xf5, 0xe9, 0xb9, 0xb6, 0xba, 0x77, 0x99, 0xb6, 0x4a, 0x1b, 0x9e, 0xde, - 0x55, 0x8f, 0xc1, 0xca, 0xe4, 0x80, 0xae, 0x7c, 0x39, 0x9b, 0x3f, 0xea, 0x60, 0xf1, 0xcd, 0x33, - 0x7f, 0x95, 0xb1, 0xfe, 0xad, 0x00, 0x96, 0xdf, 0x8c, 0xf4, 0xa4, 0x45, 0x27, 0x64, 0x84, 0xaa, - 0x67, 0x7c, 0x54, 0x9c, 0x7d, 0x46, 0x28, 0x92, 0x08, 0x34, 0xc1, 0x4c, 0x3b, 0x7e, 0xdd, 0xe2, - 0xf7, 0x07, 0x88, 0x04, 0xab, 0xa7, 0x4d, 0x21, 0xb0, 0x05, 0x8a, 0x44, 0xec, 0xad, 0x46, 0x71, - 0x35, 0xbf, 0x56, 0xa9, 0x7f, 0xf0, 0x6f, 0x3a, 0xc3, 0x92, 0x9b, 0xef, 0x8e, 0xc7, 0xe9, 0x49, - 0xb2, 0x4e, 0x48, 0x19, 0x8a, 0x8d, 0xc3, 0xff, 0x83, 0x7c, 0xe8, 0xb6, 0xd4, 0x6b, 0x5f, 0x51, - 0x94, 0xfc, 0xfe, 0xee, 0x36, 0x12, 0xf2, 0x15, 0xac, 0x96, 0x67, 0x69, 0x02, 0x2e, 0x80, 0xfc, - 0x11, 0x39, 0x89, 0x07, 0x0a, 0x89, 0x4f, 0xf8, 0x10, 0x14, 0xfb, 0x62, 0xaf, 0x56, 0xf9, 0x7d, - 0x67, 0x6a, 0x90, 0xc9, 0x1a, 0x8e, 0x62, 0xad, 0x07, 0xfa, 0xa6, 0x66, 0xfe, 0xae, 0x81, 0x9b, - 0x13, 0xdb, 0x4f, 0xac, 0x3b, 0xb8, 0xdb, 0xf5, 0x8f, 0x49, 0x4b, 0xba, 0x2d, 0x25, 0xeb, 0xce, - 0x56, 0x2c, 0x46, 0x43, 0x1c, 0xbe, 0x0d, 0x66, 0x28, 0xc1, 0xcc, 0xf7, 0xd4, 0x8a, 0x35, 0xea, - 0x5c, 0x24, 0xa5, 0x48, 0xa1, 0x70, 0x0b, 0xcc, 0x13, 0xe1, 0x5e, 0xc6, 0xb5, 0x43, 0xa9, 0x3f, - 0xac, 0xd4, 0xb2, 0x52, 0x98, 0xdf, 0x19, 0x87, 0xd1, 0x79, 0xbe, 0x70, 0xd5, 0x22, 0x9e, 0x4b, - 0x5a, 0x72, 0x07, 0x2b, 0x25, 0xae, 0xb6, 0xa5, 0x14, 0x29, 0xd4, 0xfc, 0x5b, 0x07, 0xc6, 0xa4, - 0xab, 0x0d, 0x1e, 0x26, 0xbb, 0x88, 0x04, 0xe5, 0x3a, 0x54, 0xa9, 0xdf, 0xbe, 0xd4, 0x80, 0x08, - 0x8d, 0xc6, 0x92, 0x72, 0x3b, 0x97, 0x96, 0xa6, 0x56, 0x17, 0xf9, 0x0b, 0x29, 0x58, 0xf0, 0xc6, - 0x77, 0xe6, 0x78, 0xa9, 0xaa, 0xd4, 0xef, 0x5c, 0x76, 0x1c, 0xa4, 0x37, 0x43, 0x79, 0x5b, 0x38, - 0x07, 0x30, 0x74, 0xc1, 0x3e, 0xac, 0x03, 0xe0, 0x7a, 0x8e, 0xdf, 0x0b, 0xba, 0x84, 0x13, 0x99, - 0xde, 0x52, 0x72, 0x0f, 0xee, 0x8e, 0x10, 0x94, 0x62, 0x65, 0xd5, 0xa5, 0x70, 0xb5, 0xba, 0x34, - 0xd6, 0x4e, 0xcf, 0xaa, 0xb9, 0x17, 0x67, 0xd5, 0xdc, 0xcb, 0xb3, 0x6a, 0xee, 0x79, 0x54, 0xd5, - 0x4e, 0xa3, 0xaa, 0xf6, 0x22, 0xaa, 0x6a, 0x2f, 0xa3, 0xaa, 0xf6, 0x67, 0x54, 0xd5, 0xbe, 0xfd, - 0xab, 0x9a, 0x7b, 0xa6, 0xf7, 0xd7, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x9f, 0x85, 0x45, 0x74, - 0x47, 0x0f, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go index 7cce98eb1..f0def20b9 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go @@ -17,40 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto - - It has these top-level messages: - ExtraValue - LocalSubjectAccessReview - NonResourceAttributes - NonResourceRule - ResourceAttributes - ResourceRule - SelfSubjectAccessReview - SelfSubjectAccessReviewSpec - SelfSubjectRulesReview - SelfSubjectRulesReviewSpec - SubjectAccessReview - SubjectAccessReviewSpec - SubjectAccessReviewStatus - SubjectRulesReviewStatus -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -63,74 +44,398 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{0} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } func (*LocalSubjectAccessReview) ProtoMessage() {} func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptor_43130d8376f09103, []int{1} +} +func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalSubjectAccessReview.Merge(m, src) +} +func (m *LocalSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *LocalSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_LocalSubjectAccessReview.DiscardUnknown(m) } -func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } -func (*NonResourceAttributes) ProtoMessage() {} -func (*NonResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo -func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } -func (*NonResourceRule) ProtoMessage() {} -func (*NonResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } +func (*NonResourceAttributes) ProtoMessage() {} +func (*NonResourceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{2} +} +func (m *NonResourceAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourceAttributes.Merge(m, src) +} +func (m *NonResourceAttributes) XXX_Size() int { + return m.Size() +} +func (m *NonResourceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourceAttributes.DiscardUnknown(m) +} -func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } -func (*ResourceAttributes) ProtoMessage() {} -func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_NonResourceAttributes proto.InternalMessageInfo -func (m *ResourceRule) Reset() { *m = ResourceRule{} } -func (*ResourceRule) ProtoMessage() {} -func (*ResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } +func (*NonResourceRule) ProtoMessage() {} +func (*NonResourceRule) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{3} +} +func (m *NonResourceRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourceRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourceRule.Merge(m, src) +} +func (m *NonResourceRule) XXX_Size() int { + return m.Size() +} +func (m *NonResourceRule) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourceRule.DiscardUnknown(m) +} -func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } -func (*SelfSubjectAccessReview) ProtoMessage() {} -func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +var xxx_messageInfo_NonResourceRule proto.InternalMessageInfo + +func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } +func (*ResourceAttributes) ProtoMessage() {} +func (*ResourceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{4} +} +func (m *ResourceAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAttributes.Merge(m, src) +} +func (m *ResourceAttributes) XXX_Size() int { + return m.Size() +} +func (m *ResourceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAttributes proto.InternalMessageInfo + +func (m *ResourceRule) Reset() { *m = ResourceRule{} } +func (*ResourceRule) ProtoMessage() {} +func (*ResourceRule) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{5} +} +func (m *ResourceRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceRule.Merge(m, src) +} +func (m *ResourceRule) XXX_Size() int { + return m.Size() +} +func (m *ResourceRule) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceRule proto.InternalMessageInfo + +func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } +func (*SelfSubjectAccessReview) ProtoMessage() {} +func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{6} +} +func (m *SelfSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectAccessReview.Merge(m, src) +} +func (m *SelfSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectAccessReview proto.InternalMessageInfo func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_43130d8376f09103, []int{7} +} +func (m *SelfSubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectAccessReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectAccessReviewSpec.Merge(m, src) +} +func (m *SelfSubjectAccessReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectAccessReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectAccessReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectAccessReviewSpec proto.InternalMessageInfo + +func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } +func (*SelfSubjectRulesReview) ProtoMessage() {} +func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{8} +} +func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReview.Merge(m, src) +} +func (m *SelfSubjectRulesReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReview.DiscardUnknown(m) } -func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } -func (*SelfSubjectRulesReview) ProtoMessage() {} -func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} + return fileDescriptor_43130d8376f09103, []int{9} +} +func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReviewSpec.Merge(m, src) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReviewSpec.DiscardUnknown(m) } -func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } -func (*SubjectAccessReview) ProtoMessage() {} -func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo + +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{10} +} +func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReview.Merge(m, src) +} +func (m *SubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } func (*SubjectAccessReviewSpec) ProtoMessage() {} func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} + return fileDescriptor_43130d8376f09103, []int{11} +} +func (m *SubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *SubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewSpec.Merge(m, src) +} +func (m *SubjectAccessReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewSpec proto.InternalMessageInfo func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } func (*SubjectAccessReviewStatus) ProtoMessage() {} func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{12} + return fileDescriptor_43130d8376f09103, []int{12} +} +func (m *SubjectAccessReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewStatus.Merge(m, src) } +func (m *SubjectAccessReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewStatus proto.InternalMessageInfo func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } func (*SubjectRulesReviewStatus) ProtoMessage() {} func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} + return fileDescriptor_43130d8376f09103, []int{13} +} +func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectRulesReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectRulesReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectRulesReviewStatus.Merge(m, src) +} +func (m *SubjectRulesReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SubjectRulesReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectRulesReviewStatus.DiscardUnknown(m) } +var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo + func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1beta1.ExtraValue") proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.LocalSubjectAccessReview") @@ -144,13 +449,95 @@ func init() { proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectRulesReviewSpec") proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReview") proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewSpec") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewSpec.ExtraEntry") proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewStatus") proto.RegisterType((*SubjectRulesReviewStatus)(nil), "k8s.io.api.authorization.v1beta1.SubjectRulesReviewStatus") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto", fileDescriptor_43130d8376f09103) +} + +var fileDescriptor_43130d8376f09103 = []byte{ + // 1141 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xfa, 0x4f, 0x62, 0x8f, 0x1b, 0x92, 0x4e, 0x94, 0x66, 0x1b, 0x84, 0x6d, 0x19, 0x09, + 0x05, 0xd1, 0xee, 0x92, 0xa8, 0x90, 0x12, 0xe8, 0x21, 0x56, 0x22, 0x14, 0xa9, 0x2d, 0xd5, 0x44, + 0xc9, 0x81, 0x4a, 0xc0, 0xec, 0x7a, 0x62, 0x2f, 0xb6, 0x77, 0x97, 0x99, 0x59, 0x87, 0x20, 0x0e, + 0x3d, 0x72, 0xe4, 0xc8, 0x91, 0x13, 0xdf, 0x81, 0x0b, 0x12, 0x9c, 0x72, 0xec, 0x31, 0x48, 0xc8, + 0x22, 0xcb, 0x87, 0xe0, 0x8a, 0x66, 0x76, 0xec, 0x5d, 0x27, 0x9b, 0x38, 0xce, 0x81, 0x5e, 0x7a, + 0xdb, 0x79, 0xbf, 0xdf, 0x7b, 0xf3, 0xde, 0x9b, 0xf7, 0xde, 0x3e, 0xb0, 0xdb, 0x79, 0xc8, 0x0c, + 0xc7, 0x33, 0x3b, 0x81, 0x45, 0xa8, 0x4b, 0x38, 0x61, 0x66, 0x9f, 0xb8, 0x4d, 0x8f, 0x9a, 0x0a, + 0xc0, 0xbe, 0x63, 0xe2, 0x80, 0xb7, 0x3d, 0xea, 0x7c, 0x87, 0xb9, 0xe3, 0xb9, 0x66, 0x7f, 0xcd, + 0x22, 0x1c, 0xaf, 0x99, 0x2d, 0xe2, 0x12, 0x8a, 0x39, 0x69, 0x1a, 0x3e, 0xf5, 0xb8, 0x07, 0x6b, + 0x91, 0x86, 0x81, 0x7d, 0xc7, 0x18, 0xd3, 0x30, 0x94, 0xc6, 0xca, 0xfd, 0x96, 0xc3, 0xdb, 0x81, + 0x65, 0xd8, 0x5e, 0xcf, 0x6c, 0x79, 0x2d, 0xcf, 0x94, 0x8a, 0x56, 0x70, 0x28, 0x4f, 0xf2, 0x20, + 0xbf, 0x22, 0x83, 0x2b, 0x0f, 0x62, 0x17, 0x7a, 0xd8, 0x6e, 0x3b, 0x2e, 0xa1, 0xc7, 0xa6, 0xdf, + 0x69, 0x09, 0x01, 0x33, 0x7b, 0x84, 0x63, 0xb3, 0x7f, 0xc1, 0x8d, 0x15, 0xf3, 0x32, 0x2d, 0x1a, + 0xb8, 0xdc, 0xe9, 0x91, 0x0b, 0x0a, 0x1f, 0x4e, 0x52, 0x60, 0x76, 0x9b, 0xf4, 0xf0, 0x79, 0xbd, + 0xfa, 0x06, 0x00, 0x3b, 0xdf, 0x72, 0x8a, 0x0f, 0x70, 0x37, 0x20, 0xb0, 0x0a, 0x0a, 0x0e, 0x27, + 0x3d, 0xa6, 0x6b, 0xb5, 0xdc, 0x6a, 0xa9, 0x51, 0x0a, 0x07, 0xd5, 0xc2, 0xae, 0x10, 0xa0, 0x48, + 0xbe, 0x59, 0xfc, 0xe9, 0xe7, 0x6a, 0xe6, 0xc5, 0x5f, 0xb5, 0x4c, 0xfd, 0xb7, 0x2c, 0xd0, 0x1f, + 0x7b, 0x36, 0xee, 0xee, 0x05, 0xd6, 0xd7, 0xc4, 0xe6, 0x5b, 0xb6, 0x4d, 0x18, 0x43, 0xa4, 0xef, + 0x90, 0x23, 0xf8, 0x15, 0x28, 0x8a, 0xc8, 0x9a, 0x98, 0x63, 0x5d, 0xab, 0x69, 0xab, 0xe5, 0xf5, + 0xf7, 0x8d, 0x38, 0xb1, 0x23, 0x07, 0x0d, 0xbf, 0xd3, 0x12, 0x02, 0x66, 0x08, 0xb6, 0xd1, 0x5f, + 0x33, 0x3e, 0x93, 0xb6, 0x9e, 0x10, 0x8e, 0x1b, 0xf0, 0x64, 0x50, 0xcd, 0x84, 0x83, 0x2a, 0x88, + 0x65, 0x68, 0x64, 0x15, 0x3e, 0x07, 0x79, 0xe6, 0x13, 0x5b, 0xcf, 0x4a, 0xeb, 0x1f, 0x19, 0x93, + 0x9e, 0xcd, 0x48, 0x71, 0x73, 0xcf, 0x27, 0x76, 0xe3, 0x96, 0xba, 0x26, 0x2f, 0x4e, 0x48, 0x1a, + 0x85, 0x36, 0x98, 0x61, 0x1c, 0xf3, 0x80, 0xe9, 0x39, 0x69, 0xfe, 0xe3, 0x9b, 0x99, 0x97, 0x26, + 0x1a, 0x6f, 0xa8, 0x0b, 0x66, 0xa2, 0x33, 0x52, 0xa6, 0xeb, 0xcf, 0xc1, 0xd2, 0x53, 0xcf, 0x45, + 0x84, 0x79, 0x01, 0xb5, 0xc9, 0x16, 0xe7, 0xd4, 0xb1, 0x02, 0x4e, 0x18, 0xac, 0x81, 0xbc, 0x8f, + 0x79, 0x5b, 0x26, 0xae, 0x14, 0xfb, 0xf7, 0x0c, 0xf3, 0x36, 0x92, 0x88, 0x60, 0xf4, 0x09, 0xb5, + 0x64, 0xf0, 0x09, 0xc6, 0x01, 0xa1, 0x16, 0x92, 0x48, 0xfd, 0x1b, 0x30, 0x9f, 0x30, 0x8e, 0x82, + 0xae, 0x7c, 0x5b, 0x01, 0x8d, 0xbd, 0xad, 0xd0, 0x60, 0x28, 0x92, 0xc3, 0x47, 0x60, 0xde, 0x8d, + 0x75, 0xf6, 0xd1, 0x63, 0xa6, 0x67, 0x25, 0x75, 0x31, 0x1c, 0x54, 0x93, 0xe6, 0x04, 0x84, 0xce, + 0x73, 0x45, 0x41, 0xc0, 0x94, 0x68, 0x4c, 0x50, 0x72, 0x71, 0x8f, 0x30, 0x1f, 0xdb, 0x44, 0x85, + 0x74, 0x5b, 0x39, 0x5c, 0x7a, 0x3a, 0x04, 0x50, 0xcc, 0x99, 0x1c, 0x1c, 0x7c, 0x1b, 0x14, 0x5a, + 0xd4, 0x0b, 0x7c, 0xf9, 0x3a, 0xa5, 0xc6, 0x9c, 0xa2, 0x14, 0x3e, 0x15, 0x42, 0x14, 0x61, 0xf0, + 0x5d, 0x30, 0xdb, 0x27, 0x94, 0x39, 0x9e, 0xab, 0xe7, 0x25, 0x6d, 0x5e, 0xd1, 0x66, 0x0f, 0x22, + 0x31, 0x1a, 0xe2, 0xf0, 0x1e, 0x28, 0x52, 0xe5, 0xb8, 0x5e, 0x90, 0xdc, 0x05, 0xc5, 0x2d, 0x8e, + 0x32, 0x38, 0x62, 0xc0, 0x0f, 0x40, 0x99, 0x05, 0xd6, 0x48, 0x61, 0x46, 0x2a, 0x2c, 0x2a, 0x85, + 0xf2, 0x5e, 0x0c, 0xa1, 0x24, 0x4f, 0x84, 0x25, 0x62, 0xd4, 0x67, 0xc7, 0xc3, 0x12, 0x29, 0x40, + 0x12, 0xa9, 0xff, 0xa1, 0x81, 0x5b, 0xd3, 0xbd, 0xd8, 0x7b, 0xa0, 0x84, 0x7d, 0x47, 0x86, 0x3d, + 0x7c, 0xab, 0x39, 0x91, 0xd7, 0xad, 0x67, 0xbb, 0x91, 0x10, 0xc5, 0xb8, 0x20, 0x0f, 0x9d, 0x11, + 0x75, 0x3d, 0x22, 0x0f, 0xaf, 0x64, 0x28, 0xc6, 0xe1, 0x06, 0x98, 0x1b, 0x1e, 0xe4, 0x23, 0xe9, + 0x79, 0xa9, 0x70, 0x3b, 0x1c, 0x54, 0xe7, 0x50, 0x12, 0x40, 0xe3, 0xbc, 0xfa, 0xef, 0x59, 0xb0, + 0xbc, 0x47, 0xba, 0x87, 0xaf, 0x66, 0x2a, 0x7c, 0x39, 0x36, 0x15, 0x1e, 0x5d, 0xa3, 0x6d, 0xd3, + 0x5d, 0x7d, 0xb5, 0x93, 0xe1, 0x97, 0x2c, 0x78, 0xf3, 0x0a, 0xc7, 0xe0, 0xf7, 0x00, 0xd2, 0x0b, + 0x8d, 0xa6, 0x32, 0xfa, 0x60, 0xb2, 0x43, 0x17, 0x9b, 0xb4, 0x71, 0x27, 0x1c, 0x54, 0x53, 0x9a, + 0x17, 0xa5, 0xdc, 0x03, 0x7f, 0xd0, 0xc0, 0x92, 0x9b, 0x36, 0xb8, 0x54, 0xd6, 0x37, 0x26, 0x7b, + 0x90, 0x3a, 0xf7, 0x1a, 0x77, 0xc3, 0x41, 0x35, 0x7d, 0x24, 0xa2, 0xf4, 0x0b, 0xc5, 0xc8, 0xb9, + 0x93, 0x48, 0x94, 0x68, 0x9a, 0xff, 0xaf, 0xd6, 0xbe, 0x18, 0xab, 0xb5, 0x4f, 0xa6, 0xaa, 0xb5, + 0x84, 0xa7, 0x97, 0x96, 0x9a, 0x75, 0xae, 0xd4, 0x36, 0xaf, 0x5d, 0x6a, 0x49, 0xeb, 0x57, 0x57, + 0xda, 0x13, 0xb0, 0x72, 0xb9, 0x57, 0x53, 0x8f, 0xee, 0xfa, 0xaf, 0x59, 0xb0, 0xf8, 0x7a, 0x1d, + 0xb8, 0x59, 0xd3, 0x9f, 0xe6, 0xc1, 0xf2, 0xeb, 0x86, 0xbf, 0xba, 0xe1, 0xc5, 0x4f, 0x34, 0x60, + 0x84, 0xaa, 0x1f, 0xff, 0xe8, 0xad, 0xf6, 0x19, 0xa1, 0x48, 0x22, 0xb0, 0x36, 0xdc, 0x0d, 0xa2, + 0x1f, 0x16, 0x10, 0x99, 0x56, 0xff, 0x42, 0xb5, 0x18, 0x38, 0xa0, 0x40, 0xc4, 0xc6, 0xab, 0x17, + 0x6a, 0xb9, 0xd5, 0xf2, 0xfa, 0xf6, 0x8d, 0x6b, 0xc5, 0x90, 0x8b, 0xf3, 0x8e, 0xcb, 0xe9, 0x71, + 0xbc, 0x83, 0x48, 0x19, 0x8a, 0x6e, 0x80, 0x6f, 0x81, 0x5c, 0xe0, 0x34, 0xd5, 0x8a, 0x50, 0x56, + 0x94, 0xdc, 0xfe, 0xee, 0x36, 0x12, 0xf2, 0x95, 0x43, 0xb5, 0x7b, 0x4b, 0x13, 0x70, 0x01, 0xe4, + 0x3a, 0xe4, 0x38, 0xea, 0x33, 0x24, 0x3e, 0x61, 0x03, 0x14, 0xfa, 0x62, 0x2d, 0x57, 0x79, 0xbe, + 0x37, 0xd9, 0xd3, 0x78, 0x95, 0x47, 0x91, 0xea, 0x66, 0xf6, 0xa1, 0x56, 0xff, 0x53, 0x03, 0x77, + 0x2f, 0x2d, 0x48, 0xb1, 0x28, 0xe1, 0x6e, 0xd7, 0x3b, 0x22, 0x4d, 0x79, 0x77, 0x31, 0x5e, 0x94, + 0xb6, 0x22, 0x31, 0x1a, 0xe2, 0xf0, 0x1d, 0x30, 0xd3, 0x24, 0xae, 0x43, 0x9a, 0x72, 0xa5, 0x2a, + 0xc6, 0xb5, 0xbc, 0x2d, 0xa5, 0x48, 0xa1, 0x82, 0x47, 0x09, 0x66, 0x9e, 0xab, 0x96, 0xb8, 0x11, + 0x0f, 0x49, 0x29, 0x52, 0x28, 0xdc, 0x02, 0xf3, 0x44, 0xb8, 0x29, 0x83, 0xd8, 0xa1, 0xd4, 0x1b, + 0xbe, 0xec, 0xb2, 0x52, 0x98, 0xdf, 0x19, 0x87, 0xd1, 0x79, 0x7e, 0xfd, 0xdf, 0x2c, 0xd0, 0x2f, + 0x1b, 0x7b, 0xb0, 0x13, 0x6f, 0x31, 0x12, 0x94, 0x8b, 0x54, 0x79, 0xdd, 0xb8, 0x7e, 0xcb, 0x08, + 0xb5, 0xc6, 0x92, 0xf2, 0x66, 0x2e, 0x29, 0x4d, 0x6c, 0x3e, 0xf2, 0x08, 0x8f, 0xc0, 0x82, 0x3b, + 0xbe, 0x72, 0x47, 0x3b, 0x59, 0x79, 0x7d, 0x6d, 0xaa, 0x06, 0x91, 0x57, 0xea, 0xea, 0xca, 0x85, + 0x73, 0x00, 0x43, 0x17, 0x2e, 0x81, 0xeb, 0x00, 0x38, 0xae, 0xed, 0xf5, 0xfc, 0x2e, 0xe1, 0x44, + 0x26, 0xb0, 0x18, 0x4f, 0xcb, 0xdd, 0x11, 0x82, 0x12, 0xac, 0xb4, 0xcc, 0xe7, 0xa7, 0xcb, 0x7c, + 0xe3, 0xfe, 0xc9, 0x59, 0x25, 0xf3, 0xf2, 0xac, 0x92, 0x39, 0x3d, 0xab, 0x64, 0x5e, 0x84, 0x15, + 0xed, 0x24, 0xac, 0x68, 0x2f, 0xc3, 0x8a, 0x76, 0x1a, 0x56, 0xb4, 0xbf, 0xc3, 0x8a, 0xf6, 0xe3, + 0x3f, 0x95, 0xcc, 0xe7, 0xb3, 0x2a, 0xc2, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x7b, 0xc9, 0xa5, + 0x34, 0xa4, 0x0f, 0x00, 0x00, +} + func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -158,32 +545,31 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -191,41 +577,52 @@ func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *LocalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -233,25 +630,32 @@ func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { } func (m *NonResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i += copy(dAtA[i:], m.Verb) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -259,47 +663,40 @@ func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { } func (m *NonResourceRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -307,45 +704,57 @@ func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { } func (m *ResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i += copy(dAtA[i:], m.Verb) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) - i += copy(dAtA[i:], m.Subresource) - dAtA[i] = 0x3a - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0x3a + i -= len(m.Subresource) + copy(dAtA[i:], m.Subresource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i-- + dAtA[i] = 0x32 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x2a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x1a + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -353,77 +762,58 @@ func (m *ResourceRule) Marshal() (dAtA []byte, err error) { } func (m *ResourceRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Resources) > 0 { - for _, s := range m.Resources { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -431,41 +821,52 @@ func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -473,37 +874,46 @@ func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ResourceAttributes != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n7, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } if m.NonResourceAttributes != nil { + { + size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n8, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.ResourceAttributes != nil { + { + size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -511,41 +921,52 @@ func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n11, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -553,21 +974,27 @@ func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -575,133 +1002,147 @@ func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n13, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n14, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - return i, nil -} - -func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *SubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ResourceAttributes != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n15, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n15 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.NonResourceAttributes != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n16, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n16 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x32 if len(m.Extra) > 0 { keysForExtra := make([]string, 0, len(m.Extra)) for k := range m.Extra { keysForExtra = append(keysForExtra, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x2a - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n17, err := (&v).MarshalTo(dAtA[i:]) + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + if m.NonResourceAttributes != nil { + { + size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n17 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - return i, nil + if m.ResourceAttributes != nil { + { + size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -709,41 +1150,48 @@ func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Allowed { + i-- + if m.Denied { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i += copy(dAtA[i:], m.EvaluationError) + i-- dAtA[i] = 0x20 - i++ - if m.Denied { + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x1a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + i-- + if m.Allowed { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -751,59 +1199,74 @@ func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectRulesReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.ResourceRules) > 0 { - for _, msg := range m.ResourceRules { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x22 + i-- + if m.Incomplete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x18 if len(m.NonResourceRules) > 0 { - for _, msg := range m.NonResourceRules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.NonResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NonResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x18 - i++ - if m.Incomplete { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i += copy(dAtA[i:], m.EvaluationError) - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -816,6 +1279,9 @@ func (m ExtraValue) Size() (n int) { } func (m *LocalSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -828,6 +1294,9 @@ func (m *LocalSubjectAccessReview) Size() (n int) { } func (m *NonResourceAttributes) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Path) @@ -838,6 +1307,9 @@ func (m *NonResourceAttributes) Size() (n int) { } func (m *NonResourceRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -856,6 +1328,9 @@ func (m *NonResourceRule) Size() (n int) { } func (m *ResourceAttributes) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -876,6 +1351,9 @@ func (m *ResourceAttributes) Size() (n int) { } func (m *ResourceRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -906,6 +1384,9 @@ func (m *ResourceRule) Size() (n int) { } func (m *SelfSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -918,6 +1399,9 @@ func (m *SelfSubjectAccessReview) Size() (n int) { } func (m *SelfSubjectAccessReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ResourceAttributes != nil { @@ -932,6 +1416,9 @@ func (m *SelfSubjectAccessReviewSpec) Size() (n int) { } func (m *SelfSubjectRulesReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -944,6 +1431,9 @@ func (m *SelfSubjectRulesReview) Size() (n int) { } func (m *SelfSubjectRulesReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -952,6 +1442,9 @@ func (m *SelfSubjectRulesReviewSpec) Size() (n int) { } func (m *SubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -964,6 +1457,9 @@ func (m *SubjectAccessReview) Size() (n int) { } func (m *SubjectAccessReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ResourceAttributes != nil { @@ -997,6 +1493,9 @@ func (m *SubjectAccessReviewSpec) Size() (n int) { } func (m *SubjectAccessReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -1009,6 +1508,9 @@ func (m *SubjectAccessReviewStatus) Size() (n int) { } func (m *SubjectRulesReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.ResourceRules) > 0 { @@ -1030,14 +1532,7 @@ func (m *SubjectRulesReviewStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1047,7 +1542,7 @@ func (this *LocalSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&LocalSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1110,7 +1605,7 @@ func (this *SelfSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1122,8 +1617,8 @@ func (this *SelfSubjectAccessReviewSpec) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, `}`, }, "") return s @@ -1133,7 +1628,7 @@ func (this *SelfSubjectRulesReview) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectRulesReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectRulesReviewSpec", "SelfSubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1155,7 +1650,7 @@ func (this *SubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1177,8 +1672,8 @@ func (this *SubjectAccessReviewSpec) String() string { } mapStringForExtra += "}" s := strings.Join([]string{`&SubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, `User:` + fmt.Sprintf("%v", this.User) + `,`, `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, `Extra:` + mapStringForExtra + `,`, @@ -1204,9 +1699,19 @@ func (this *SubjectRulesReviewStatus) String() string { if this == nil { return "nil" } + repeatedStringForResourceRules := "[]ResourceRule{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForNonResourceRules := "[]NonResourceRule{" + for _, f := range this.NonResourceRules { + repeatedStringForNonResourceRules += strings.Replace(strings.Replace(f.String(), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + "," + } + repeatedStringForNonResourceRules += "}" s := strings.Join([]string{`&SubjectRulesReviewStatus{`, - `ResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceRules), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + `,`, - `NonResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NonResourceRules), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `NonResourceRules:` + repeatedStringForNonResourceRules + `,`, `Incomplete:` + fmt.Sprintf("%v", this.Incomplete) + `,`, `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, `}`, @@ -1236,7 +1741,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1264,7 +1769,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1274,6 +1779,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1288,6 +1796,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1315,7 +1826,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1343,7 +1854,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1352,6 +1863,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1373,7 +1887,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1382,6 +1896,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1403,7 +1920,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1412,6 +1929,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1428,6 +1948,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1455,7 +1978,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1483,7 +2006,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1493,6 +2016,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1512,7 +2038,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1522,6 +2048,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1536,6 +2065,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1563,7 +2095,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1591,7 +2123,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1601,6 +2133,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1620,7 +2155,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1630,6 +2165,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1644,6 +2182,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1671,7 +2212,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1699,7 +2240,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1709,6 +2250,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1728,7 +2272,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1738,6 +2282,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1757,7 +2304,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1767,6 +2314,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1786,7 +2336,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1796,6 +2346,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1815,7 +2368,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1825,6 +2378,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1844,7 +2400,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1854,6 +2410,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1873,7 +2432,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1883,6 +2442,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1897,6 +2459,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1924,7 +2489,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1952,7 +2517,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1962,6 +2527,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1981,7 +2549,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1991,6 +2559,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2010,7 +2581,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2020,6 +2591,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2039,7 +2613,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2049,6 +2623,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2063,6 +2640,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2090,7 +2670,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2118,7 +2698,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2127,6 +2707,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2148,7 +2731,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2157,6 +2740,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2178,7 +2764,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2187,6 +2773,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2203,6 +2792,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2230,7 +2822,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2258,7 +2850,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2267,6 +2859,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2291,7 +2886,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2300,6 +2895,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2319,6 +2917,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2346,7 +2947,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2374,7 +2975,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2383,6 +2984,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2404,7 +3008,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2413,6 +3017,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2434,7 +3041,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2443,6 +3050,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2459,6 +3069,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2486,7 +3099,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2514,7 +3127,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2524,6 +3137,9 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2538,6 +3154,9 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2565,7 +3184,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2593,7 +3212,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2602,6 +3221,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2623,7 +3245,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2632,6 +3254,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2653,7 +3278,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2662,6 +3287,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2678,6 +3306,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2705,7 +3336,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2733,7 +3364,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2742,6 +3373,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2766,7 +3400,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2775,6 +3409,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2799,7 +3436,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2809,6 +3446,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2828,7 +3468,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2838,6 +3478,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2857,7 +3500,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2866,6 +3509,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2886,7 +3532,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2903,7 +3549,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2913,6 +3559,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -2929,7 +3578,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2938,7 +3587,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -2980,7 +3629,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2990,6 +3639,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3004,6 +3656,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3031,7 +3686,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3059,7 +3714,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3079,7 +3734,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3089,6 +3744,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3108,7 +3766,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3118,6 +3776,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3137,7 +3798,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3152,6 +3813,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3179,7 +3843,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3207,7 +3871,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3216,6 +3880,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3238,7 +3905,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3247,6 +3914,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3269,7 +3939,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3289,7 +3959,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3299,6 +3969,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3313,6 +3986,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3379,10 +4055,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -3411,6 +4090,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -3429,83 +4111,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1137 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xcb, 0x6f, 0x1b, 0x45, - 0x18, 0xf7, 0xfa, 0x91, 0xd8, 0xe3, 0x86, 0xa4, 0x13, 0xa5, 0xd9, 0x06, 0x61, 0x5b, 0x46, 0x42, - 0x41, 0xb4, 0xbb, 0x24, 0x2a, 0xa4, 0x04, 0x7a, 0x88, 0x95, 0x08, 0x45, 0x6a, 0x4b, 0x35, 0x51, - 0x72, 0xa0, 0x12, 0x30, 0xbb, 0x9e, 0xd8, 0x8b, 0xed, 0xdd, 0x65, 0x66, 0xd6, 0x21, 0x88, 0x43, - 0x8f, 0x1c, 0x39, 0x72, 0xe4, 0xc4, 0xff, 0xc0, 0x05, 0x09, 0x4e, 0x39, 0xf6, 0x18, 0x24, 0x64, - 0x91, 0xe5, 0x8f, 0xe0, 0x8a, 0x66, 0x76, 0xec, 0x5d, 0x27, 0x9b, 0x38, 0xce, 0x81, 0x5e, 0x7a, - 0xdb, 0xf9, 0x7e, 0xdf, 0xfb, 0xb5, 0x1f, 0xd8, 0xed, 0x3c, 0x64, 0x86, 0xe3, 0x99, 0x9d, 0xc0, - 0x22, 0xd4, 0x25, 0x9c, 0x30, 0xb3, 0x4f, 0xdc, 0xa6, 0x47, 0x4d, 0x05, 0x60, 0xdf, 0x31, 0x71, - 0xc0, 0xdb, 0x1e, 0x75, 0xbe, 0xc3, 0xdc, 0xf1, 0x5c, 0xb3, 0xbf, 0x66, 0x11, 0x8e, 0xd7, 0xcc, - 0x16, 0x71, 0x09, 0xc5, 0x9c, 0x34, 0x0d, 0x9f, 0x7a, 0xdc, 0x83, 0xb5, 0x48, 0xc2, 0xc0, 0xbe, - 0x63, 0x8c, 0x49, 0x18, 0x4a, 0x62, 0xe5, 0x7e, 0xcb, 0xe1, 0xed, 0xc0, 0x32, 0x6c, 0xaf, 0x67, - 0xb6, 0xbc, 0x96, 0x67, 0x4a, 0x41, 0x2b, 0x38, 0x94, 0x2f, 0xf9, 0x90, 0x5f, 0x91, 0xc2, 0x95, - 0x07, 0xb1, 0x0b, 0x3d, 0x6c, 0xb7, 0x1d, 0x97, 0xd0, 0x63, 0xd3, 0xef, 0xb4, 0x04, 0x81, 0x99, - 0x3d, 0xc2, 0xb1, 0xd9, 0xbf, 0xe0, 0xc6, 0x8a, 0x79, 0x99, 0x14, 0x0d, 0x5c, 0xee, 0xf4, 0xc8, - 0x05, 0x81, 0x0f, 0x27, 0x09, 0x30, 0xbb, 0x4d, 0x7a, 0xf8, 0xbc, 0x5c, 0x7d, 0x03, 0x80, 0x9d, - 0x6f, 0x39, 0xc5, 0x07, 0xb8, 0x1b, 0x10, 0x58, 0x05, 0x05, 0x87, 0x93, 0x1e, 0xd3, 0xb5, 0x5a, - 0x6e, 0xb5, 0xd4, 0x28, 0x85, 0x83, 0x6a, 0x61, 0x57, 0x10, 0x50, 0x44, 0xdf, 0x2c, 0xfe, 0xf4, - 0x73, 0x35, 0xf3, 0xe2, 0xaf, 0x5a, 0xa6, 0xfe, 0x5b, 0x16, 0xe8, 0x8f, 0x3d, 0x1b, 0x77, 0xf7, - 0x02, 0xeb, 0x6b, 0x62, 0xf3, 0x2d, 0xdb, 0x26, 0x8c, 0x21, 0xd2, 0x77, 0xc8, 0x11, 0xfc, 0x0a, - 0x14, 0x45, 0x64, 0x4d, 0xcc, 0xb1, 0xae, 0xd5, 0xb4, 0xd5, 0xf2, 0xfa, 0xfb, 0x46, 0x9c, 0xd8, - 0x91, 0x83, 0x86, 0xdf, 0x69, 0x09, 0x02, 0x33, 0x04, 0xb7, 0xd1, 0x5f, 0x33, 0x3e, 0x93, 0xba, - 0x9e, 0x10, 0x8e, 0x1b, 0xf0, 0x64, 0x50, 0xcd, 0x84, 0x83, 0x2a, 0x88, 0x69, 0x68, 0xa4, 0x15, - 0x3e, 0x07, 0x79, 0xe6, 0x13, 0x5b, 0xcf, 0x4a, 0xed, 0x1f, 0x19, 0x93, 0xca, 0x66, 0xa4, 0xb8, - 0xb9, 0xe7, 0x13, 0xbb, 0x71, 0x4b, 0x99, 0xc9, 0x8b, 0x17, 0x92, 0x4a, 0xa1, 0x0d, 0x66, 0x18, - 0xc7, 0x3c, 0x60, 0x7a, 0x4e, 0xaa, 0xff, 0xf8, 0x66, 0xea, 0xa5, 0x8a, 0xc6, 0x1b, 0xca, 0xc0, - 0x4c, 0xf4, 0x46, 0x4a, 0x75, 0xfd, 0x39, 0x58, 0x7a, 0xea, 0xb9, 0x88, 0x30, 0x2f, 0xa0, 0x36, - 0xd9, 0xe2, 0x9c, 0x3a, 0x56, 0xc0, 0x09, 0x83, 0x35, 0x90, 0xf7, 0x31, 0x6f, 0xcb, 0xc4, 0x95, - 0x62, 0xff, 0x9e, 0x61, 0xde, 0x46, 0x12, 0x11, 0x1c, 0x7d, 0x42, 0x2d, 0x19, 0x7c, 0x82, 0xe3, - 0x80, 0x50, 0x0b, 0x49, 0xa4, 0xfe, 0x0d, 0x98, 0x4f, 0x28, 0x47, 0x41, 0x57, 0xd6, 0x56, 0x40, - 0x63, 0xb5, 0x15, 0x12, 0x0c, 0x45, 0x74, 0xf8, 0x08, 0xcc, 0xbb, 0xb1, 0xcc, 0x3e, 0x7a, 0xcc, - 0xf4, 0xac, 0x64, 0x5d, 0x0c, 0x07, 0xd5, 0xa4, 0x3a, 0x01, 0xa1, 0xf3, 0xbc, 0xa2, 0x21, 0x60, - 0x4a, 0x34, 0x26, 0x28, 0xb9, 0xb8, 0x47, 0x98, 0x8f, 0x6d, 0xa2, 0x42, 0xba, 0xad, 0x1c, 0x2e, - 0x3d, 0x1d, 0x02, 0x28, 0xe6, 0x99, 0x1c, 0x1c, 0x7c, 0x1b, 0x14, 0x5a, 0xd4, 0x0b, 0x7c, 0x59, - 0x9d, 0x52, 0x63, 0x4e, 0xb1, 0x14, 0x3e, 0x15, 0x44, 0x14, 0x61, 0xf0, 0x5d, 0x30, 0xdb, 0x27, - 0x94, 0x39, 0x9e, 0xab, 0xe7, 0x25, 0xdb, 0xbc, 0x62, 0x9b, 0x3d, 0x88, 0xc8, 0x68, 0x88, 0xc3, - 0x7b, 0xa0, 0x48, 0x95, 0xe3, 0x7a, 0x41, 0xf2, 0x2e, 0x28, 0xde, 0xe2, 0x28, 0x83, 0x23, 0x0e, - 0xf8, 0x01, 0x28, 0xb3, 0xc0, 0x1a, 0x09, 0xcc, 0x48, 0x81, 0x45, 0x25, 0x50, 0xde, 0x8b, 0x21, - 0x94, 0xe4, 0x13, 0x61, 0x89, 0x18, 0xf5, 0xd9, 0xf1, 0xb0, 0x44, 0x0a, 0x90, 0x44, 0xea, 0x7f, - 0x68, 0xe0, 0xd6, 0x74, 0x15, 0x7b, 0x0f, 0x94, 0xb0, 0xef, 0xc8, 0xb0, 0x87, 0xb5, 0x9a, 0x13, - 0x79, 0xdd, 0x7a, 0xb6, 0x1b, 0x11, 0x51, 0x8c, 0x0b, 0xe6, 0xa1, 0x33, 0xa2, 0xaf, 0x47, 0xcc, - 0x43, 0x93, 0x0c, 0xc5, 0x38, 0xdc, 0x00, 0x73, 0xc3, 0x87, 0x2c, 0x92, 0x9e, 0x97, 0x02, 0xb7, - 0xc3, 0x41, 0x75, 0x0e, 0x25, 0x01, 0x34, 0xce, 0x57, 0xff, 0x3d, 0x0b, 0x96, 0xf7, 0x48, 0xf7, - 0xf0, 0xd5, 0x6c, 0x85, 0x2f, 0xc7, 0xb6, 0xc2, 0xa3, 0x6b, 0x8c, 0x6d, 0xba, 0xab, 0xaf, 0x76, - 0x33, 0xfc, 0x92, 0x05, 0x6f, 0x5e, 0xe1, 0x18, 0xfc, 0x1e, 0x40, 0x7a, 0x61, 0xd0, 0x54, 0x46, - 0x1f, 0x4c, 0x76, 0xe8, 0xe2, 0x90, 0x36, 0xee, 0x84, 0x83, 0x6a, 0xca, 0xf0, 0xa2, 0x14, 0x3b, - 0xf0, 0x07, 0x0d, 0x2c, 0xb9, 0x69, 0x8b, 0x4b, 0x65, 0x7d, 0x63, 0xb2, 0x07, 0xa9, 0x7b, 0xaf, - 0x71, 0x37, 0x1c, 0x54, 0xd3, 0x57, 0x22, 0x4a, 0x37, 0x28, 0x56, 0xce, 0x9d, 0x44, 0xa2, 0xc4, - 0xd0, 0xfc, 0x7f, 0xbd, 0xf6, 0xc5, 0x58, 0xaf, 0x7d, 0x32, 0x55, 0xaf, 0x25, 0x3c, 0xbd, 0xb4, - 0xd5, 0xac, 0x73, 0xad, 0xb6, 0x79, 0xed, 0x56, 0x4b, 0x6a, 0xbf, 0xba, 0xd3, 0x9e, 0x80, 0x95, - 0xcb, 0xbd, 0x9a, 0x7a, 0x75, 0xd7, 0x7f, 0xcd, 0x82, 0xc5, 0xd7, 0xe7, 0xc0, 0xcd, 0x86, 0xfe, - 0x34, 0x0f, 0x96, 0x5f, 0x0f, 0xfc, 0xd5, 0x03, 0x2f, 0x7e, 0xa2, 0x01, 0x23, 0x54, 0xfd, 0xf8, - 0x47, 0xb5, 0xda, 0x67, 0x84, 0x22, 0x89, 0xc0, 0xda, 0xf0, 0x36, 0x88, 0x7e, 0x58, 0x40, 0x64, - 0x5a, 0xfd, 0x0b, 0xd5, 0x61, 0xe0, 0x80, 0x02, 0x11, 0x17, 0xaf, 0x5e, 0xa8, 0xe5, 0x56, 0xcb, - 0xeb, 0xdb, 0x37, 0xee, 0x15, 0x43, 0x1e, 0xce, 0x3b, 0x2e, 0xa7, 0xc7, 0xf1, 0x0d, 0x22, 0x69, - 0x28, 0xb2, 0x00, 0xdf, 0x02, 0xb9, 0xc0, 0x69, 0xaa, 0x13, 0xa1, 0xac, 0x58, 0x72, 0xfb, 0xbb, - 0xdb, 0x48, 0xd0, 0x57, 0x0e, 0xd5, 0xed, 0x2d, 0x55, 0xc0, 0x05, 0x90, 0xeb, 0x90, 0xe3, 0x68, - 0xce, 0x90, 0xf8, 0x84, 0x0d, 0x50, 0xe8, 0x8b, 0xb3, 0x5c, 0xe5, 0xf9, 0xde, 0x64, 0x4f, 0xe3, - 0x53, 0x1e, 0x45, 0xa2, 0x9b, 0xd9, 0x87, 0x5a, 0xfd, 0x4f, 0x0d, 0xdc, 0xbd, 0xb4, 0x21, 0xc5, - 0xa1, 0x84, 0xbb, 0x5d, 0xef, 0x88, 0x34, 0xa5, 0xed, 0x62, 0x7c, 0x28, 0x6d, 0x45, 0x64, 0x34, - 0xc4, 0xe1, 0x3b, 0x60, 0x86, 0x12, 0xcc, 0x3c, 0x57, 0x1d, 0x67, 0xa3, 0x5e, 0x46, 0x92, 0x8a, - 0x14, 0x0a, 0xb7, 0xc0, 0x3c, 0x11, 0xe6, 0xa5, 0x73, 0x3b, 0x94, 0x7a, 0xc3, 0x8a, 0x2d, 0x2b, - 0x81, 0xf9, 0x9d, 0x71, 0x18, 0x9d, 0xe7, 0x17, 0xa6, 0x9a, 0xc4, 0x75, 0x48, 0x53, 0x5e, 0x6f, - 0xc5, 0xd8, 0xd4, 0xb6, 0xa4, 0x22, 0x85, 0xd6, 0xff, 0xcd, 0x02, 0xfd, 0xb2, 0xb5, 0x07, 0x3b, - 0xf1, 0x15, 0x23, 0x41, 0x79, 0x48, 0x95, 0xd7, 0x8d, 0xeb, 0x8f, 0x8c, 0x10, 0x6b, 0x2c, 0x29, - 0xdb, 0x73, 0x49, 0x6a, 0xe2, 0xf2, 0x91, 0x4f, 0x78, 0x04, 0x16, 0xdc, 0xf1, 0x93, 0x3b, 0xba, - 0xc9, 0xca, 0xeb, 0x6b, 0x53, 0x0d, 0x88, 0x34, 0xa9, 0x2b, 0x93, 0x0b, 0xe7, 0x00, 0x86, 0x2e, - 0x18, 0x81, 0xeb, 0x00, 0x38, 0xae, 0xed, 0xf5, 0xfc, 0x2e, 0xe1, 0x44, 0x26, 0xba, 0x18, 0x6f, - 0xcb, 0xdd, 0x11, 0x82, 0x12, 0x5c, 0x69, 0x15, 0xca, 0x4f, 0x57, 0xa1, 0xc6, 0xfd, 0x93, 0xb3, - 0x4a, 0xe6, 0xe5, 0x59, 0x25, 0x73, 0x7a, 0x56, 0xc9, 0xbc, 0x08, 0x2b, 0xda, 0x49, 0x58, 0xd1, - 0x5e, 0x86, 0x15, 0xed, 0x34, 0xac, 0x68, 0x7f, 0x87, 0x15, 0xed, 0xc7, 0x7f, 0x2a, 0x99, 0xcf, - 0x67, 0x55, 0x84, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xc5, 0xba, 0xf8, 0x96, 0xa4, 0x0f, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go index 950e93340..174e6f5f8 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go @@ -17,48 +17,24 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto - - It has these top-level messages: - CrossVersionObjectReference - ExternalMetricSource - ExternalMetricStatus - HorizontalPodAutoscaler - HorizontalPodAutoscalerCondition - HorizontalPodAutoscalerList - HorizontalPodAutoscalerSpec - HorizontalPodAutoscalerStatus - MetricSpec - MetricStatus - ObjectMetricSource - ObjectMetricStatus - PodsMetricSource - PodsMetricStatus - ResourceMetricSource - ResourceMetricStatus - Scale - ScaleSpec - ScaleStatus -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import reflect "reflect" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -74,114 +50,664 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_2bb1f2101a7f10e2, []int{0} +} +func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_CrossVersionObjectReference.Merge(m, src) +} +func (m *CrossVersionObjectReference) XXX_Size() int { + return m.Size() +} +func (m *CrossVersionObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m) } -func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } -func (*ExternalMetricSource) ProtoMessage() {} -func (*ExternalMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo + +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{1} +} +func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricSource.Merge(m, src) +} +func (m *ExternalMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m) +} -func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } -func (*ExternalMetricStatus) ProtoMessage() {} -func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{2} +} +func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricStatus.Merge(m, src) +} +func (m *ExternalMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{3} +} +func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src) +} +func (m *HorizontalPodAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptor_2bb1f2101a7f10e2, []int{4} +} +func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Size() int { + return m.Size() } +func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptor_2bb1f2101a7f10e2, []int{5} +} +func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src) +} +func (m *HorizontalPodAutoscalerList) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m) } +var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo + func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptor_2bb1f2101a7f10e2, []int{6} +} +func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_2bb1f2101a7f10e2, []int{7} +} +func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo + +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{8} +} +func (m *MetricSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricSpec.Merge(m, src) +} +func (m *MetricSpec) XXX_Size() int { + return m.Size() +} +func (m *MetricSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MetricSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricSpec proto.InternalMessageInfo + +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{9} +} +func (m *MetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricStatus.Merge(m, src) +} +func (m *MetricStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricStatus proto.InternalMessageInfo + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{10} +} +func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricSource.Merge(m, src) +} +func (m *ObjectMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{11} +} +func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricStatus.Merge(m, src) +} +func (m *ObjectMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{12} +} +func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricSource.Merge(m, src) +} +func (m *PodsMetricSource) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo + +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{13} +} +func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricStatus.Merge(m, src) +} +func (m *PodsMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo + +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{14} +} +func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricSource.Merge(m, src) +} +func (m *ResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo + +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{15} +} +func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricStatus.Merge(m, src) +} +func (m *ResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{16} +} +func (m *Scale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) +} +func (m *Scale) XXX_Size() int { + return m.Size() +} +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) +} + +var xxx_messageInfo_Scale proto.InternalMessageInfo + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{17} +} +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) +} +func (m *ScaleSpec) XXX_Size() int { + return m.Size() +} +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{18} +} +func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleStatus.Merge(m, src) +} +func (m *ScaleStatus) XXX_Size() int { + return m.Size() +} +func (m *ScaleStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v1.CrossVersionObjectReference") + proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricSource") + proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricStatus") + proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscaler") + proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerCondition") + proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerList") + proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerSpec") + proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerStatus") + proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v1.MetricSpec") + proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v1.MetricStatus") + proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricSource") + proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricStatus") + proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v1.PodsMetricSource") + proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v1.PodsMetricStatus") + proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricSource") + proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricStatus") + proto.RegisterType((*Scale)(nil), "k8s.io.api.autoscaling.v1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.autoscaling.v1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.autoscaling.v1.ScaleStatus") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto", fileDescriptor_2bb1f2101a7f10e2) +} + +var fileDescriptor_2bb1f2101a7f10e2 = []byte{ + // 1516 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcf, 0x6f, 0x13, 0xc7, + 0x17, 0x8f, 0x7f, 0x24, 0x24, 0xe3, 0x90, 0xe4, 0x3b, 0x20, 0x08, 0xe1, 0x8b, 0x37, 0xda, 0x22, + 0x44, 0x7f, 0xb0, 0x6e, 0x52, 0x8a, 0xe8, 0x31, 0x76, 0x4b, 0x41, 0x8d, 0x21, 0x4c, 0x02, 0xa5, + 0x3f, 0xc5, 0x64, 0x3d, 0x38, 0x43, 0xbc, 0xbb, 0xd6, 0xec, 0xd8, 0x22, 0x48, 0x95, 0xda, 0x43, + 0xef, 0xbd, 0xb4, 0xea, 0xb1, 0x95, 0x7a, 0xed, 0x99, 0x73, 0x6f, 0x1c, 0x39, 0x20, 0x95, 0xd3, + 0xaa, 0x6c, 0x8f, 0xfd, 0x0f, 0x38, 0x55, 0xf3, 0xc3, 0xeb, 0x5d, 0xdb, 0xeb, 0x24, 0x26, 0x44, + 0x6d, 0x6f, 0x3b, 0x33, 0xef, 0x7d, 0xde, 0xec, 0x7b, 0x6f, 0xde, 0x2f, 0x50, 0xde, 0xbe, 0xec, + 0x5b, 0xd4, 0x2b, 0x6d, 0xb7, 0x36, 0x09, 0x73, 0x09, 0x27, 0x7e, 0xa9, 0x4d, 0xdc, 0x9a, 0xc7, + 0x4a, 0xfa, 0x00, 0x37, 0x69, 0x09, 0xb7, 0xb8, 0xe7, 0xdb, 0xb8, 0x41, 0xdd, 0x7a, 0xa9, 0xbd, + 0x54, 0xaa, 0x13, 0x97, 0x30, 0xcc, 0x49, 0xcd, 0x6a, 0x32, 0x8f, 0x7b, 0xf0, 0x94, 0x22, 0xb5, + 0x70, 0x93, 0x5a, 0x31, 0x52, 0xab, 0xbd, 0xb4, 0x70, 0xa1, 0x4e, 0xf9, 0x56, 0x6b, 0xd3, 0xb2, + 0x3d, 0xa7, 0x54, 0xf7, 0xea, 0x5e, 0x49, 0x72, 0x6c, 0xb6, 0xee, 0xc9, 0x95, 0x5c, 0xc8, 0x2f, + 0x85, 0xb4, 0x60, 0xc6, 0x84, 0xda, 0x1e, 0x23, 0x03, 0xa4, 0x2d, 0x5c, 0xec, 0xd2, 0x38, 0xd8, + 0xde, 0xa2, 0x2e, 0x61, 0x3b, 0xa5, 0xe6, 0x76, 0x5d, 0x32, 0x31, 0xe2, 0x7b, 0x2d, 0x66, 0x93, + 0x7d, 0x71, 0xf9, 0x25, 0x87, 0x70, 0x3c, 0x48, 0x56, 0x29, 0x8d, 0x8b, 0xb5, 0x5c, 0x4e, 0x9d, + 0x7e, 0x31, 0x97, 0x76, 0x63, 0xf0, 0xed, 0x2d, 0xe2, 0xe0, 0x5e, 0x3e, 0xf3, 0xfb, 0x0c, 0x38, + 0x5d, 0x61, 0x9e, 0xef, 0xdf, 0x26, 0xcc, 0xa7, 0x9e, 0x7b, 0x63, 0xf3, 0x3e, 0xb1, 0x39, 0x22, + 0xf7, 0x08, 0x23, 0xae, 0x4d, 0xe0, 0x22, 0xc8, 0x6f, 0x53, 0xb7, 0x36, 0x9f, 0x59, 0xcc, 0x9c, + 0x9f, 0x2a, 0x4f, 0x3f, 0x0e, 0x8c, 0xb1, 0x30, 0x30, 0xf2, 0x1f, 0x51, 0xb7, 0x86, 0xe4, 0x89, + 0xa0, 0x70, 0xb1, 0x43, 0xe6, 0xb3, 0x49, 0x8a, 0xeb, 0xd8, 0x21, 0x48, 0x9e, 0xc0, 0x65, 0x00, + 0x70, 0x93, 0x6a, 0x01, 0xf3, 0x39, 0x49, 0x07, 0x35, 0x1d, 0x58, 0x59, 0xbb, 0xa6, 0x4f, 0x50, + 0x8c, 0xca, 0xfc, 0x21, 0x07, 0x8e, 0x7f, 0xf0, 0x80, 0x13, 0xe6, 0xe2, 0x46, 0x95, 0x70, 0x46, + 0xed, 0x75, 0xa9, 0x5f, 0x01, 0xe6, 0xc8, 0xb5, 0x10, 0xa0, 0xaf, 0x15, 0x81, 0x55, 0xa3, 0x13, + 0x14, 0xa3, 0x82, 0x1e, 0x98, 0x51, 0xab, 0x75, 0xd2, 0x20, 0x36, 0xf7, 0x98, 0xbc, 0x6c, 0x61, + 0xf9, 0x1d, 0xab, 0xeb, 0x40, 0x91, 0xd6, 0xac, 0xe6, 0x76, 0x5d, 0x6c, 0xf8, 0x96, 0x30, 0x8e, + 0xd5, 0x5e, 0xb2, 0x56, 0xf1, 0x26, 0x69, 0x74, 0x58, 0xcb, 0x30, 0x0c, 0x8c, 0x99, 0x6a, 0x02, + 0x0e, 0xf5, 0xc0, 0x43, 0x0c, 0x0a, 0x1c, 0xb3, 0x3a, 0xe1, 0xb7, 0x71, 0xa3, 0x45, 0xe4, 0x2f, + 0x17, 0x96, 0xad, 0x61, 0xd2, 0xac, 0x8e, 0x03, 0x59, 0x37, 0x5b, 0xd8, 0xe5, 0x94, 0xef, 0x94, + 0x67, 0xc3, 0xc0, 0x28, 0x6c, 0x74, 0x61, 0x50, 0x1c, 0x13, 0xb6, 0x01, 0x54, 0xcb, 0x95, 0x36, + 0x61, 0xb8, 0x4e, 0x94, 0xa4, 0xfc, 0x48, 0x92, 0x4e, 0x84, 0x81, 0x01, 0x37, 0xfa, 0xd0, 0xd0, + 0x00, 0x09, 0xe6, 0x4f, 0xfd, 0x86, 0xe1, 0x98, 0xb7, 0xfc, 0x7f, 0x87, 0x61, 0xb6, 0xc0, 0xb4, + 0xdd, 0x62, 0x8c, 0xb8, 0x2f, 0x65, 0x99, 0xe3, 0xfa, 0xb7, 0xa6, 0x2b, 0x31, 0x2c, 0x94, 0x40, + 0x86, 0x3b, 0xe0, 0x98, 0x5e, 0x1f, 0x80, 0x81, 0x4e, 0x86, 0x81, 0x71, 0xac, 0xd2, 0x0f, 0x87, + 0x06, 0xc9, 0x30, 0x1f, 0x65, 0xc1, 0xc9, 0xab, 0x1e, 0xa3, 0x0f, 0x3d, 0x97, 0xe3, 0xc6, 0x9a, + 0x57, 0x5b, 0xd1, 0xb1, 0x91, 0x30, 0x78, 0x17, 0x4c, 0x0a, 0xed, 0xd5, 0x30, 0xc7, 0xd2, 0x46, + 0x85, 0xe5, 0xb7, 0xf7, 0xa6, 0x6b, 0x15, 0x18, 0xaa, 0x84, 0xe3, 0xae, 0x55, 0xbb, 0x7b, 0x28, + 0x42, 0x85, 0x77, 0x40, 0xde, 0x6f, 0x12, 0x5b, 0x5b, 0xf2, 0x92, 0x95, 0x1a, 0xa3, 0xad, 0x94, + 0x3b, 0xae, 0x37, 0x89, 0xdd, 0x8d, 0x23, 0x62, 0x85, 0x24, 0x22, 0xbc, 0x0b, 0x26, 0x7c, 0xe9, + 0x6b, 0xda, 0x6c, 0x97, 0x47, 0xc0, 0x96, 0xfc, 0xe5, 0x19, 0x8d, 0x3e, 0xa1, 0xd6, 0x48, 0xe3, + 0x9a, 0xdf, 0xe6, 0xc0, 0x62, 0x0a, 0x67, 0xc5, 0x73, 0x6b, 0x94, 0x53, 0xcf, 0x85, 0x57, 0x41, + 0x9e, 0xef, 0x34, 0x3b, 0x2e, 0x7e, 0xb1, 0x73, 0xd1, 0x8d, 0x9d, 0x26, 0x79, 0x11, 0x18, 0x67, + 0x77, 0xe3, 0x17, 0x74, 0x48, 0x22, 0xc0, 0xd5, 0xe8, 0x87, 0xb2, 0x09, 0x2c, 0x7d, 0xad, 0x17, + 0x81, 0x31, 0x20, 0x2f, 0x59, 0x11, 0x52, 0xf2, 0xf2, 0x22, 0x22, 0x34, 0xb0, 0xcf, 0x37, 0x18, + 0x76, 0x7d, 0x25, 0x89, 0x3a, 0x1d, 0x0f, 0x7f, 0x63, 0x6f, 0x46, 0x16, 0x1c, 0xe5, 0x05, 0x7d, + 0x0b, 0xb8, 0xda, 0x87, 0x86, 0x06, 0x48, 0x80, 0xe7, 0xc0, 0x04, 0x23, 0xd8, 0xf7, 0x5c, 0xe9, + 0xdc, 0x53, 0x5d, 0xe5, 0x22, 0xb9, 0x8b, 0xf4, 0x29, 0x7c, 0x1d, 0x1c, 0x71, 0x88, 0xef, 0xe3, + 0x3a, 0x99, 0x1f, 0x97, 0x84, 0xb3, 0x9a, 0xf0, 0x48, 0x55, 0x6d, 0xa3, 0xce, 0xb9, 0xf9, 0x34, + 0x03, 0x4e, 0xa7, 0xe8, 0x71, 0x95, 0xfa, 0x1c, 0x7e, 0xde, 0xe7, 0xc5, 0xd6, 0x1e, 0x23, 0x06, + 0xf5, 0x95, 0x0f, 0xcf, 0x69, 0xd9, 0x93, 0x9d, 0x9d, 0x98, 0x07, 0x7f, 0x0c, 0xc6, 0x29, 0x27, + 0x8e, 0xb0, 0x4a, 0xee, 0x7c, 0x61, 0x79, 0x79, 0xff, 0x6e, 0x56, 0x3e, 0xaa, 0xe1, 0xc7, 0xaf, + 0x09, 0x20, 0xa4, 0xf0, 0xcc, 0xbf, 0xb2, 0xa9, 0xbf, 0x25, 0xdc, 0x1c, 0xb6, 0xc1, 0x8c, 0x5c, + 0xa9, 0x50, 0x8c, 0xc8, 0x3d, 0xfd, 0x73, 0xc3, 0x1e, 0xd1, 0x90, 0xe4, 0x5d, 0x3e, 0xa1, 0x6f, + 0x31, 0xb3, 0x9e, 0x40, 0x45, 0x3d, 0x52, 0xe0, 0x12, 0x28, 0x38, 0xd4, 0x45, 0xa4, 0xd9, 0xa0, + 0x36, 0x56, 0xce, 0x38, 0xae, 0xd2, 0x4f, 0xb5, 0xbb, 0x8d, 0xe2, 0x34, 0xf0, 0x5d, 0x50, 0x70, + 0xf0, 0x83, 0x88, 0x25, 0x27, 0x59, 0x8e, 0x69, 0x79, 0x85, 0x6a, 0xf7, 0x08, 0xc5, 0xe9, 0xe0, + 0x7d, 0x50, 0x54, 0x39, 0xa5, 0xb2, 0x76, 0xeb, 0x16, 0xa7, 0x0d, 0xfa, 0x10, 0x0b, 0x3f, 0x5a, + 0x23, 0xcc, 0x26, 0x2e, 0x17, 0xae, 0x91, 0x97, 0x48, 0x66, 0x18, 0x18, 0xc5, 0x8d, 0xa1, 0x94, + 0x68, 0x17, 0x24, 0xf3, 0xb7, 0x1c, 0x38, 0x33, 0x34, 0x0c, 0xc0, 0x2b, 0x00, 0x7a, 0x9b, 0x3e, + 0x61, 0x6d, 0x52, 0xfb, 0x50, 0xd5, 0x45, 0xa2, 0x40, 0x11, 0x3a, 0xcf, 0xa9, 0x9c, 0x78, 0xa3, + 0xef, 0x14, 0x0d, 0xe0, 0x80, 0x36, 0x38, 0x2a, 0xde, 0x85, 0xd2, 0x32, 0xd5, 0xb5, 0xd0, 0xfe, + 0x1e, 0xdd, 0xff, 0xc2, 0xc0, 0x38, 0xba, 0x1a, 0x07, 0x41, 0x49, 0x4c, 0xb8, 0x02, 0x66, 0x75, + 0xb0, 0xef, 0xd1, 0xfa, 0x49, 0xad, 0xf5, 0xd9, 0x4a, 0xf2, 0x18, 0xf5, 0xd2, 0x0b, 0x88, 0x1a, + 0xf1, 0x29, 0x23, 0xb5, 0x08, 0x22, 0x9f, 0x84, 0x78, 0x3f, 0x79, 0x8c, 0x7a, 0xe9, 0xa1, 0x03, + 0x0c, 0x8d, 0x9a, 0x6a, 0xc1, 0x71, 0x09, 0xf9, 0x5a, 0x18, 0x18, 0x46, 0x65, 0x38, 0x29, 0xda, + 0x0d, 0x4b, 0x94, 0x81, 0xba, 0x76, 0x90, 0x0f, 0xe4, 0x62, 0x22, 0xf4, 0x2e, 0xf6, 0x84, 0xde, + 0xb9, 0x78, 0xa1, 0x18, 0x0b, 0xb3, 0x37, 0xc1, 0x84, 0x27, 0x5f, 0x86, 0xb6, 0xcb, 0x85, 0x21, + 0xcf, 0x29, 0x4a, 0x69, 0x11, 0x50, 0x19, 0x88, 0x58, 0xa6, 0x9f, 0x96, 0x06, 0x82, 0xd7, 0x40, + 0xbe, 0xe9, 0xd5, 0x3a, 0x89, 0xe8, 0xcd, 0x21, 0x80, 0x6b, 0x5e, 0xcd, 0x4f, 0xc0, 0x4d, 0x8a, + 0x1b, 0x8b, 0x5d, 0x24, 0x21, 0xe0, 0x27, 0x60, 0xb2, 0x93, 0xf0, 0x75, 0x75, 0x50, 0x1a, 0x02, + 0x87, 0x34, 0x69, 0x02, 0x72, 0x5a, 0x04, 0xb2, 0xce, 0x09, 0x8a, 0xe0, 0x04, 0x34, 0xd1, 0xa5, + 0x9a, 0xb4, 0xca, 0x70, 0xe8, 0x41, 0xe5, 0xb6, 0x82, 0xee, 0x9c, 0xa0, 0x08, 0xce, 0xfc, 0x31, + 0x07, 0xa6, 0x13, 0xe5, 0xdf, 0x21, 0x9b, 0x46, 0xe5, 0xf1, 0x03, 0x33, 0x8d, 0x82, 0x3b, 0x50, + 0xd3, 0x28, 0xc8, 0x57, 0x62, 0x9a, 0x18, 0xf4, 0x00, 0xd3, 0x3c, 0xcd, 0x01, 0xd8, 0xef, 0xc6, + 0xf0, 0x4b, 0x30, 0xa1, 0x02, 0xe6, 0x4b, 0x26, 0x95, 0x28, 0xbd, 0xeb, 0xfc, 0xa1, 0x51, 0x7b, + 0xea, 0xff, 0xec, 0x9e, 0xea, 0x7f, 0x72, 0x10, 0x7d, 0x52, 0x94, 0x75, 0x52, 0x7b, 0xa5, 0x2f, + 0xc0, 0xa4, 0xdf, 0x69, 0x30, 0xf2, 0xa3, 0x37, 0x18, 0x52, 0xe1, 0x51, 0x6b, 0x11, 0x41, 0xc2, + 0x1a, 0x98, 0xc6, 0xf1, 0x1a, 0x7f, 0x7c, 0xa4, 0xdf, 0x98, 0x13, 0x0d, 0x45, 0xa2, 0xb8, 0x4f, + 0xa0, 0x9a, 0xbf, 0xf7, 0x9a, 0x55, 0xbd, 0xbb, 0x7f, 0xa2, 0x59, 0x0f, 0xaf, 0xcb, 0xfa, 0x4f, + 0x58, 0xf6, 0xe7, 0x2c, 0x98, 0xeb, 0x4d, 0x13, 0x23, 0xb5, 0xd3, 0x0f, 0x07, 0xce, 0x04, 0xb2, + 0x23, 0x5d, 0x3a, 0xea, 0x02, 0xf6, 0x36, 0x17, 0x48, 0x58, 0x22, 0x77, 0xe0, 0x96, 0x30, 0x7f, + 0x49, 0xea, 0x68, 0xf4, 0x91, 0xc3, 0x57, 0x83, 0xfb, 0xf2, 0xd1, 0x94, 0x74, 0x5a, 0x0b, 0xdb, + 0x73, 0x6f, 0xfe, 0xaa, 0xd5, 0xf4, 0x6b, 0x16, 0x1c, 0x1f, 0x54, 0x22, 0xc0, 0x8a, 0x9e, 0xd2, + 0x29, 0x25, 0x95, 0xe2, 0x53, 0xba, 0x17, 0x81, 0x61, 0x0c, 0x68, 0x33, 0x3b, 0x30, 0xb1, 0x41, + 0xde, 0x1d, 0x30, 0x9f, 0xb0, 0x7c, 0xac, 0x66, 0xd3, 0x4d, 0xc3, 0xff, 0xc3, 0xc0, 0x98, 0xdf, + 0x48, 0xa1, 0x41, 0xa9, 0xdc, 0x29, 0xd3, 0xac, 0xdc, 0x2b, 0x9f, 0x66, 0x3d, 0xea, 0xd7, 0x97, + 0x72, 0xad, 0x03, 0xd1, 0xd7, 0x67, 0xe0, 0x54, 0xd2, 0x07, 0xfa, 0x15, 0x76, 0x26, 0x0c, 0x8c, + 0x53, 0x95, 0x34, 0x22, 0x94, 0xce, 0x9f, 0xe6, 0xc8, 0xb9, 0xc3, 0x71, 0x64, 0xf3, 0x9b, 0x2c, + 0x18, 0x97, 0xcd, 0xc9, 0x21, 0x8c, 0x94, 0xae, 0x24, 0x46, 0x4a, 0x67, 0x87, 0x64, 0x38, 0x79, + 0xa3, 0xd4, 0x01, 0xd2, 0xf5, 0x9e, 0x01, 0xd2, 0xb9, 0x5d, 0x91, 0x86, 0x8f, 0x8b, 0xde, 0x03, + 0x53, 0x91, 0x40, 0xf8, 0x96, 0x28, 0x16, 0x75, 0x57, 0x95, 0x91, 0xb6, 0x8d, 0x66, 0x0c, 0x51, + 0x3b, 0x15, 0x51, 0x98, 0x14, 0x14, 0x62, 0x12, 0xf6, 0xc7, 0x2c, 0xa8, 0xfd, 0xf8, 0xc0, 0x74, + 0xaa, 0x4b, 0xdd, 0x1f, 0x13, 0xca, 0xe7, 0x1f, 0x3f, 0x2f, 0x8e, 0x3d, 0x79, 0x5e, 0x1c, 0x7b, + 0xf6, 0xbc, 0x38, 0xf6, 0x75, 0x58, 0xcc, 0x3c, 0x0e, 0x8b, 0x99, 0x27, 0x61, 0x31, 0xf3, 0x2c, + 0x2c, 0x66, 0xfe, 0x08, 0x8b, 0x99, 0xef, 0xfe, 0x2c, 0x8e, 0x7d, 0x9a, 0x6d, 0x2f, 0xfd, 0x1d, + 0x00, 0x00, 0xff, 0xff, 0x3c, 0x26, 0x41, 0xcb, 0x94, 0x19, 0x00, 0x00, } -func (m *MetricSpec) Reset() { *m = MetricSpec{} } -func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *MetricStatus) Reset() { *m = MetricStatus{} } -func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } -func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } - -func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } -func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } - -func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } -func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } - -func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } -func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } - -func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } -func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } - -func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } -func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } - -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } - -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } - -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } - -func init() { - proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v1.CrossVersionObjectReference") - proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricSource") - proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricStatus") - proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscaler") - proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerCondition") - proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerList") - proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerSpec") - proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerStatus") - proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v1.MetricSpec") - proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v1.MetricStatus") - proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricSource") - proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricStatus") - proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v1.PodsMetricSource") - proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v1.PodsMetricStatus") - proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricSource") - proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricStatus") - proto.RegisterType((*Scale)(nil), "k8s.io.api.autoscaling.v1.Scale") - proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.autoscaling.v1.ScaleSpec") - proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.autoscaling.v1.ScaleStatus") -} func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -189,29 +715,37 @@ func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { } func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CrossVersionObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x1a - i++ + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -219,51 +753,63 @@ func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - if m.MetricSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) - n1, err := m.MetricSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.TargetAverageValue != nil { + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- + dAtA[i] = 0x22 } if m.TargetValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n2, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- + dAtA[i] = 0x1a } - if m.TargetAverageValue != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n3, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MetricSelector != nil { + { + size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -271,49 +817,61 @@ func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - if m.MetricSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) - n4, err := m.MetricSelector.MarshalTo(dAtA[i:]) + if m.CurrentAverageValue != nil { + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n4 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n5, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.CurrentAverageValue != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n6, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MetricSelector != nil { + { + size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -321,41 +879,52 @@ func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n8, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n9, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -363,41 +932,52 @@ func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n10, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -405,37 +985,46 @@ func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -443,38 +1032,45 @@ func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) - n12, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.TargetCPUUtilizationPercentage != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetCPUUtilizationPercentage)) + i-- + dAtA[i] = 0x20 } - i += n12 + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) + i-- + dAtA[i] = 0x18 if m.MinReplicas != nil { - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) + i-- + dAtA[i] = 0x10 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) - if m.TargetCPUUtilizationPercentage != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetCPUUtilizationPercentage)) + { + size, err := m.ScaleTargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -482,43 +1078,50 @@ func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + if m.CurrentCPUUtilizationPercentage != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentCPUUtilizationPercentage)) + i-- + dAtA[i] = 0x28 } + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x18 if m.LastScaleTime != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) - n13, err := m.LastScaleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastScaleTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n13 + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) - if m.CurrentCPUUtilizationPercentage != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentCPUUtilizationPercentage)) + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *MetricSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -526,61 +1129,75 @@ func (m *MetricSpec) Marshal() (dAtA []byte, err error) { } func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n14, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n15, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n16, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- + dAtA[i] = 0x22 } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n17, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -588,61 +1205,75 @@ func (m *MetricStatus) Marshal() (dAtA []byte, err error) { } func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n18, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n19, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n19 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n20, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n20 + i-- + dAtA[i] = 0x1a } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n21, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n21 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -650,57 +1281,71 @@ func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n22, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n23, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - i += n23 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n24, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n24 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AverageValue != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n25, err := m.AverageValue.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n25 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -708,57 +1353,71 @@ func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n26, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n27, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - i += n27 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n28, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n28 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AverageValue != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n29, err := m.AverageValue.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n29 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -766,39 +1425,49 @@ func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n30, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n30 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n31, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n31 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -806,39 +1475,49 @@ func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n32, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n33, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n33 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -846,36 +1525,44 @@ func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.TargetAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) - } if m.TargetAverageValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n34, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n34 + i-- + dAtA[i] = 0x1a + } + if m.TargetAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) + i-- + dAtA[i] = 0x10 } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -883,34 +1570,42 @@ func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.CurrentAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n35, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.CurrentAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + i-- + dAtA[i] = 0x10 } - i += n35 - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -918,41 +1613,52 @@ func (m *Scale) Marshal() (dAtA []byte, err error) { } func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n36, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n37, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n37 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n38, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n38 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -960,20 +1666,25 @@ func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { } func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -981,30 +1692,41 @@ func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { } func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x12 - i++ + i -= len(m.Selector) + copy(dAtA[i:], m.Selector) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector))) - i += copy(dAtA[i:], m.Selector) - return i, nil + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CrossVersionObjectReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -1017,6 +1739,9 @@ func (m *CrossVersionObjectReference) Size() (n int) { } func (m *ExternalMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1037,6 +1762,9 @@ func (m *ExternalMetricSource) Size() (n int) { } func (m *ExternalMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1055,6 +1783,9 @@ func (m *ExternalMetricStatus) Size() (n int) { } func (m *HorizontalPodAutoscaler) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1067,6 +1798,9 @@ func (m *HorizontalPodAutoscaler) Size() (n int) { } func (m *HorizontalPodAutoscalerCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1083,6 +1817,9 @@ func (m *HorizontalPodAutoscalerCondition) Size() (n int) { } func (m *HorizontalPodAutoscalerList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1097,6 +1834,9 @@ func (m *HorizontalPodAutoscalerList) Size() (n int) { } func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ScaleTargetRef.Size() @@ -1112,6 +1852,9 @@ func (m *HorizontalPodAutoscalerSpec) Size() (n int) { } func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ObservedGeneration != nil { @@ -1130,6 +1873,9 @@ func (m *HorizontalPodAutoscalerStatus) Size() (n int) { } func (m *MetricSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1154,6 +1900,9 @@ func (m *MetricSpec) Size() (n int) { } func (m *MetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1178,6 +1927,9 @@ func (m *MetricStatus) Size() (n int) { } func (m *ObjectMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Target.Size() @@ -1198,6 +1950,9 @@ func (m *ObjectMetricSource) Size() (n int) { } func (m *ObjectMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Target.Size() @@ -1218,6 +1973,9 @@ func (m *ObjectMetricStatus) Size() (n int) { } func (m *PodsMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1232,6 +1990,9 @@ func (m *PodsMetricSource) Size() (n int) { } func (m *PodsMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1246,6 +2007,9 @@ func (m *PodsMetricStatus) Size() (n int) { } func (m *ResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1261,6 +2025,9 @@ func (m *ResourceMetricSource) Size() (n int) { } func (m *ResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1274,6 +2041,9 @@ func (m *ResourceMetricStatus) Size() (n int) { } func (m *Scale) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1286,6 +2056,9 @@ func (m *Scale) Size() (n int) { } func (m *ScaleSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1293,6 +2066,9 @@ func (m *ScaleSpec) Size() (n int) { } func (m *ScaleStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1302,14 +2078,7 @@ func (m *ScaleStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1332,9 +2101,9 @@ func (this *ExternalMetricSource) String() string { } s := strings.Join([]string{`&ExternalMetricSource{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1345,9 +2114,9 @@ func (this *ExternalMetricStatus) String() string { } s := strings.Join([]string{`&ExternalMetricStatus{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1357,7 +2126,7 @@ func (this *HorizontalPodAutoscaler) String() string { return "nil" } s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1371,7 +2140,7 @@ func (this *HorizontalPodAutoscalerCondition) String() string { s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -1382,9 +2151,14 @@ func (this *HorizontalPodAutoscalerList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]HorizontalPodAutoscaler{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1408,7 +2182,7 @@ func (this *HorizontalPodAutoscalerStatus) String() string { } s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "v1.Time", 1) + `,`, `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, `CurrentCPUUtilizationPercentage:` + valueToStringGenerated(this.CurrentCPUUtilizationPercentage) + `,`, @@ -1422,10 +2196,10 @@ func (this *MetricSpec) String() string { } s := strings.Join([]string{`&MetricSpec{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, `}`, }, "") return s @@ -1436,10 +2210,10 @@ func (this *MetricStatus) String() string { } s := strings.Join([]string{`&MetricStatus{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, `}`, }, "") return s @@ -1451,9 +2225,9 @@ func (this *ObjectMetricSource) String() string { s := strings.Join([]string{`&ObjectMetricSource{`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1465,9 +2239,9 @@ func (this *ObjectMetricStatus) String() string { s := strings.Join([]string{`&ObjectMetricStatus{`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1478,8 +2252,8 @@ func (this *PodsMetricSource) String() string { } s := strings.Join([]string{`&PodsMetricSource{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetAverageValue:` + strings.Replace(strings.Replace(this.TargetAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `TargetAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1490,8 +2264,8 @@ func (this *PodsMetricStatus) String() string { } s := strings.Join([]string{`&PodsMetricStatus{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1503,7 +2277,7 @@ func (this *ResourceMetricSource) String() string { s := strings.Join([]string{`&ResourceMetricSource{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1515,7 +2289,7 @@ func (this *ResourceMetricStatus) String() string { s := strings.Join([]string{`&ResourceMetricStatus{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1525,7 +2299,7 @@ func (this *Scale) String() string { return "nil" } s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1576,7 +2350,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1604,7 +2378,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1614,6 +2388,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1633,7 +2410,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1643,6 +2420,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1662,7 +2442,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1672,6 +2452,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1686,6 +2469,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1713,7 +2499,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1741,7 +2527,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1751,6 +2537,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1770,7 +2559,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1779,11 +2568,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MetricSelector == nil { - m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.MetricSelector = &v1.LabelSelector{} } if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1803,7 +2595,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1812,11 +2604,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetValue == nil { - m.TargetValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetValue = &resource.Quantity{} } if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1836,7 +2631,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1845,11 +2640,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetAverageValue = &resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1864,6 +2662,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1891,7 +2692,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1919,7 +2720,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1929,6 +2730,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1948,7 +2752,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1957,11 +2761,14 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MetricSelector == nil { - m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.MetricSelector = &v1.LabelSelector{} } if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1981,7 +2788,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1990,6 +2797,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2011,7 +2821,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2020,11 +2830,14 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.CurrentAverageValue == nil { - m.CurrentAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.CurrentAverageValue = &resource.Quantity{} } if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2039,6 +2852,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2066,7 +2882,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2094,7 +2910,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2103,6 +2919,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2124,7 +2943,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2133,6 +2952,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2154,7 +2976,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2163,6 +2985,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2179,6 +3004,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2206,7 +3034,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2234,7 +3062,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2244,6 +3072,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2263,7 +3094,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2273,6 +3104,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2292,7 +3126,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2301,6 +3135,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2322,7 +3159,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2332,6 +3169,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2351,7 +3191,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2361,6 +3201,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2375,6 +3218,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2402,7 +3248,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2430,7 +3276,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2439,6 +3285,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2460,7 +3309,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2469,6 +3318,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2486,6 +3338,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2513,7 +3368,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2541,7 +3396,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2550,6 +3405,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2571,7 +3429,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2591,7 +3449,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxReplicas |= (int32(b) & 0x7F) << shift + m.MaxReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2610,7 +3468,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2625,6 +3483,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2652,7 +3513,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2680,7 +3541,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2700,7 +3561,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2709,11 +3570,14 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.LastScaleTime = &v1.Time{} } if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2733,7 +3597,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2752,7 +3616,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredReplicas |= (int32(b) & 0x7F) << shift + m.DesiredReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2771,7 +3635,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2786,6 +3650,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2813,7 +3680,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2841,7 +3708,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2851,6 +3718,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2870,7 +3740,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2879,6 +3749,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2903,7 +3776,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2912,6 +3785,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2936,7 +3812,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2945,6 +3821,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2969,7 +3848,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2978,6 +3857,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2997,6 +3879,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3024,7 +3909,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3052,7 +3937,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3062,6 +3947,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3081,7 +3969,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3090,6 +3978,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3114,7 +4005,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3123,6 +4014,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3147,7 +4041,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3156,6 +4050,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3180,7 +4077,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3189,6 +4086,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3208,6 +4108,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3235,7 +4138,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3263,7 +4166,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3272,6 +4175,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3293,7 +4199,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3303,6 +4209,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3322,7 +4231,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3331,6 +4240,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3352,7 +4264,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3361,11 +4273,14 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3385,7 +4300,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3394,11 +4309,14 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3413,6 +4331,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3440,7 +4361,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3468,7 +4389,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3477,6 +4398,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3498,7 +4422,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3508,6 +4432,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3527,7 +4454,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3536,6 +4463,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3557,7 +4487,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3566,11 +4496,14 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3590,7 +4523,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3599,11 +4532,14 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3618,6 +4554,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3645,7 +4584,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3673,7 +4612,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3683,6 +4622,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3702,7 +4644,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3711,6 +4653,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3732,7 +4677,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3741,11 +4686,14 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3760,6 +4708,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3787,7 +4738,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3815,7 +4766,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3825,6 +4776,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3844,7 +4798,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3853,6 +4807,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3874,7 +4831,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3883,11 +4840,14 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3902,6 +4862,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3929,7 +4892,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3957,7 +4920,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3967,6 +4930,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3986,7 +4952,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4006,7 +4972,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4015,11 +4981,14 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetAverageValue = &resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4034,6 +5003,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4061,7 +5033,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4089,7 +5061,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4099,6 +5071,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4118,7 +5093,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4138,7 +5113,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4147,6 +5122,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4163,6 +5141,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4190,7 +5171,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4218,7 +5199,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4227,6 +5208,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4248,7 +5232,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4257,6 +5241,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4278,7 +5265,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4287,6 +5274,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4303,6 +5293,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4330,7 +5323,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4358,7 +5351,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4372,6 +5365,9 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4399,7 +5395,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4427,7 +5423,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4446,7 +5442,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4456,6 +5452,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4470,6 +5469,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4536,10 +5538,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -4568,6 +5573,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -4586,106 +5594,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1516 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcf, 0x6f, 0x13, 0xc7, - 0x17, 0x8f, 0x7f, 0x24, 0x24, 0xe3, 0x90, 0xe4, 0x3b, 0x20, 0x08, 0xe1, 0x8b, 0x37, 0xda, 0x22, - 0x44, 0x7f, 0xb0, 0x6e, 0x52, 0x8a, 0xe8, 0x31, 0x76, 0x4b, 0x41, 0x8d, 0x21, 0x4c, 0x02, 0xa5, - 0x3f, 0xc5, 0x64, 0x3d, 0x38, 0x43, 0xbc, 0xbb, 0xd6, 0xec, 0xd8, 0x22, 0x48, 0x95, 0xda, 0x43, - 0xef, 0xbd, 0xb4, 0xea, 0xb1, 0x95, 0x7a, 0xed, 0x99, 0x73, 0x6f, 0x1c, 0x39, 0x20, 0x95, 0xd3, - 0xaa, 0x6c, 0x8f, 0xfd, 0x0f, 0x38, 0x55, 0xf3, 0xc3, 0xeb, 0x5d, 0xdb, 0xeb, 0x24, 0x26, 0x44, - 0x6d, 0x6f, 0x3b, 0x33, 0xef, 0x7d, 0xde, 0xec, 0x7b, 0x6f, 0xde, 0x2f, 0x50, 0xde, 0xbe, 0xec, - 0x5b, 0xd4, 0x2b, 0x6d, 0xb7, 0x36, 0x09, 0x73, 0x09, 0x27, 0x7e, 0xa9, 0x4d, 0xdc, 0x9a, 0xc7, - 0x4a, 0xfa, 0x00, 0x37, 0x69, 0x09, 0xb7, 0xb8, 0xe7, 0xdb, 0xb8, 0x41, 0xdd, 0x7a, 0xa9, 0xbd, - 0x54, 0xaa, 0x13, 0x97, 0x30, 0xcc, 0x49, 0xcd, 0x6a, 0x32, 0x8f, 0x7b, 0xf0, 0x94, 0x22, 0xb5, - 0x70, 0x93, 0x5a, 0x31, 0x52, 0xab, 0xbd, 0xb4, 0x70, 0xa1, 0x4e, 0xf9, 0x56, 0x6b, 0xd3, 0xb2, - 0x3d, 0xa7, 0x54, 0xf7, 0xea, 0x5e, 0x49, 0x72, 0x6c, 0xb6, 0xee, 0xc9, 0x95, 0x5c, 0xc8, 0x2f, - 0x85, 0xb4, 0x60, 0xc6, 0x84, 0xda, 0x1e, 0x23, 0x03, 0xa4, 0x2d, 0x5c, 0xec, 0xd2, 0x38, 0xd8, - 0xde, 0xa2, 0x2e, 0x61, 0x3b, 0xa5, 0xe6, 0x76, 0x5d, 0x32, 0x31, 0xe2, 0x7b, 0x2d, 0x66, 0x93, - 0x7d, 0x71, 0xf9, 0x25, 0x87, 0x70, 0x3c, 0x48, 0x56, 0x29, 0x8d, 0x8b, 0xb5, 0x5c, 0x4e, 0x9d, - 0x7e, 0x31, 0x97, 0x76, 0x63, 0xf0, 0xed, 0x2d, 0xe2, 0xe0, 0x5e, 0x3e, 0xf3, 0xfb, 0x0c, 0x38, - 0x5d, 0x61, 0x9e, 0xef, 0xdf, 0x26, 0xcc, 0xa7, 0x9e, 0x7b, 0x63, 0xf3, 0x3e, 0xb1, 0x39, 0x22, - 0xf7, 0x08, 0x23, 0xae, 0x4d, 0xe0, 0x22, 0xc8, 0x6f, 0x53, 0xb7, 0x36, 0x9f, 0x59, 0xcc, 0x9c, - 0x9f, 0x2a, 0x4f, 0x3f, 0x0e, 0x8c, 0xb1, 0x30, 0x30, 0xf2, 0x1f, 0x51, 0xb7, 0x86, 0xe4, 0x89, - 0xa0, 0x70, 0xb1, 0x43, 0xe6, 0xb3, 0x49, 0x8a, 0xeb, 0xd8, 0x21, 0x48, 0x9e, 0xc0, 0x65, 0x00, - 0x70, 0x93, 0x6a, 0x01, 0xf3, 0x39, 0x49, 0x07, 0x35, 0x1d, 0x58, 0x59, 0xbb, 0xa6, 0x4f, 0x50, - 0x8c, 0xca, 0xfc, 0x21, 0x07, 0x8e, 0x7f, 0xf0, 0x80, 0x13, 0xe6, 0xe2, 0x46, 0x95, 0x70, 0x46, - 0xed, 0x75, 0xa9, 0x5f, 0x01, 0xe6, 0xc8, 0xb5, 0x10, 0xa0, 0xaf, 0x15, 0x81, 0x55, 0xa3, 0x13, - 0x14, 0xa3, 0x82, 0x1e, 0x98, 0x51, 0xab, 0x75, 0xd2, 0x20, 0x36, 0xf7, 0x98, 0xbc, 0x6c, 0x61, - 0xf9, 0x1d, 0xab, 0xeb, 0x40, 0x91, 0xd6, 0xac, 0xe6, 0x76, 0x5d, 0x6c, 0xf8, 0x96, 0x30, 0x8e, - 0xd5, 0x5e, 0xb2, 0x56, 0xf1, 0x26, 0x69, 0x74, 0x58, 0xcb, 0x30, 0x0c, 0x8c, 0x99, 0x6a, 0x02, - 0x0e, 0xf5, 0xc0, 0x43, 0x0c, 0x0a, 0x1c, 0xb3, 0x3a, 0xe1, 0xb7, 0x71, 0xa3, 0x45, 0xe4, 0x2f, - 0x17, 0x96, 0xad, 0x61, 0xd2, 0xac, 0x8e, 0x03, 0x59, 0x37, 0x5b, 0xd8, 0xe5, 0x94, 0xef, 0x94, - 0x67, 0xc3, 0xc0, 0x28, 0x6c, 0x74, 0x61, 0x50, 0x1c, 0x13, 0xb6, 0x01, 0x54, 0xcb, 0x95, 0x36, - 0x61, 0xb8, 0x4e, 0x94, 0xa4, 0xfc, 0x48, 0x92, 0x4e, 0x84, 0x81, 0x01, 0x37, 0xfa, 0xd0, 0xd0, - 0x00, 0x09, 0xe6, 0x4f, 0xfd, 0x86, 0xe1, 0x98, 0xb7, 0xfc, 0x7f, 0x87, 0x61, 0xb6, 0xc0, 0xb4, - 0xdd, 0x62, 0x8c, 0xb8, 0x2f, 0x65, 0x99, 0xe3, 0xfa, 0xb7, 0xa6, 0x2b, 0x31, 0x2c, 0x94, 0x40, - 0x86, 0x3b, 0xe0, 0x98, 0x5e, 0x1f, 0x80, 0x81, 0x4e, 0x86, 0x81, 0x71, 0xac, 0xd2, 0x0f, 0x87, - 0x06, 0xc9, 0x30, 0x1f, 0x65, 0xc1, 0xc9, 0xab, 0x1e, 0xa3, 0x0f, 0x3d, 0x97, 0xe3, 0xc6, 0x9a, - 0x57, 0x5b, 0xd1, 0xb1, 0x91, 0x30, 0x78, 0x17, 0x4c, 0x0a, 0xed, 0xd5, 0x30, 0xc7, 0xd2, 0x46, - 0x85, 0xe5, 0xb7, 0xf7, 0xa6, 0x6b, 0x15, 0x18, 0xaa, 0x84, 0xe3, 0xae, 0x55, 0xbb, 0x7b, 0x28, - 0x42, 0x85, 0x77, 0x40, 0xde, 0x6f, 0x12, 0x5b, 0x5b, 0xf2, 0x92, 0x95, 0x1a, 0xa3, 0xad, 0x94, - 0x3b, 0xae, 0x37, 0x89, 0xdd, 0x8d, 0x23, 0x62, 0x85, 0x24, 0x22, 0xbc, 0x0b, 0x26, 0x7c, 0xe9, - 0x6b, 0xda, 0x6c, 0x97, 0x47, 0xc0, 0x96, 0xfc, 0xe5, 0x19, 0x8d, 0x3e, 0xa1, 0xd6, 0x48, 0xe3, - 0x9a, 0xdf, 0xe6, 0xc0, 0x62, 0x0a, 0x67, 0xc5, 0x73, 0x6b, 0x94, 0x53, 0xcf, 0x85, 0x57, 0x41, - 0x9e, 0xef, 0x34, 0x3b, 0x2e, 0x7e, 0xb1, 0x73, 0xd1, 0x8d, 0x9d, 0x26, 0x79, 0x11, 0x18, 0x67, - 0x77, 0xe3, 0x17, 0x74, 0x48, 0x22, 0xc0, 0xd5, 0xe8, 0x87, 0xb2, 0x09, 0x2c, 0x7d, 0xad, 0x17, - 0x81, 0x31, 0x20, 0x2f, 0x59, 0x11, 0x52, 0xf2, 0xf2, 0x22, 0x22, 0x34, 0xb0, 0xcf, 0x37, 0x18, - 0x76, 0x7d, 0x25, 0x89, 0x3a, 0x1d, 0x0f, 0x7f, 0x63, 0x6f, 0x46, 0x16, 0x1c, 0xe5, 0x05, 0x7d, - 0x0b, 0xb8, 0xda, 0x87, 0x86, 0x06, 0x48, 0x80, 0xe7, 0xc0, 0x04, 0x23, 0xd8, 0xf7, 0x5c, 0xe9, - 0xdc, 0x53, 0x5d, 0xe5, 0x22, 0xb9, 0x8b, 0xf4, 0x29, 0x7c, 0x1d, 0x1c, 0x71, 0x88, 0xef, 0xe3, - 0x3a, 0x99, 0x1f, 0x97, 0x84, 0xb3, 0x9a, 0xf0, 0x48, 0x55, 0x6d, 0xa3, 0xce, 0xb9, 0xf9, 0x34, - 0x03, 0x4e, 0xa7, 0xe8, 0x71, 0x95, 0xfa, 0x1c, 0x7e, 0xde, 0xe7, 0xc5, 0xd6, 0x1e, 0x23, 0x06, - 0xf5, 0x95, 0x0f, 0xcf, 0x69, 0xd9, 0x93, 0x9d, 0x9d, 0x98, 0x07, 0x7f, 0x0c, 0xc6, 0x29, 0x27, - 0x8e, 0xb0, 0x4a, 0xee, 0x7c, 0x61, 0x79, 0x79, 0xff, 0x6e, 0x56, 0x3e, 0xaa, 0xe1, 0xc7, 0xaf, - 0x09, 0x20, 0xa4, 0xf0, 0xcc, 0xbf, 0xb2, 0xa9, 0xbf, 0x25, 0xdc, 0x1c, 0xb6, 0xc1, 0x8c, 0x5c, - 0xa9, 0x50, 0x8c, 0xc8, 0x3d, 0xfd, 0x73, 0xc3, 0x1e, 0xd1, 0x90, 0xe4, 0x5d, 0x3e, 0xa1, 0x6f, - 0x31, 0xb3, 0x9e, 0x40, 0x45, 0x3d, 0x52, 0xe0, 0x12, 0x28, 0x38, 0xd4, 0x45, 0xa4, 0xd9, 0xa0, - 0x36, 0x56, 0xce, 0x38, 0xae, 0xd2, 0x4f, 0xb5, 0xbb, 0x8d, 0xe2, 0x34, 0xf0, 0x5d, 0x50, 0x70, - 0xf0, 0x83, 0x88, 0x25, 0x27, 0x59, 0x8e, 0x69, 0x79, 0x85, 0x6a, 0xf7, 0x08, 0xc5, 0xe9, 0xe0, - 0x7d, 0x50, 0x54, 0x39, 0xa5, 0xb2, 0x76, 0xeb, 0x16, 0xa7, 0x0d, 0xfa, 0x10, 0x0b, 0x3f, 0x5a, - 0x23, 0xcc, 0x26, 0x2e, 0x17, 0xae, 0x91, 0x97, 0x48, 0x66, 0x18, 0x18, 0xc5, 0x8d, 0xa1, 0x94, - 0x68, 0x17, 0x24, 0xf3, 0xb7, 0x1c, 0x38, 0x33, 0x34, 0x0c, 0xc0, 0x2b, 0x00, 0x7a, 0x9b, 0x3e, - 0x61, 0x6d, 0x52, 0xfb, 0x50, 0xd5, 0x45, 0xa2, 0x40, 0x11, 0x3a, 0xcf, 0xa9, 0x9c, 0x78, 0xa3, - 0xef, 0x14, 0x0d, 0xe0, 0x80, 0x36, 0x38, 0x2a, 0xde, 0x85, 0xd2, 0x32, 0xd5, 0xb5, 0xd0, 0xfe, - 0x1e, 0xdd, 0xff, 0xc2, 0xc0, 0x38, 0xba, 0x1a, 0x07, 0x41, 0x49, 0x4c, 0xb8, 0x02, 0x66, 0x75, - 0xb0, 0xef, 0xd1, 0xfa, 0x49, 0xad, 0xf5, 0xd9, 0x4a, 0xf2, 0x18, 0xf5, 0xd2, 0x0b, 0x88, 0x1a, - 0xf1, 0x29, 0x23, 0xb5, 0x08, 0x22, 0x9f, 0x84, 0x78, 0x3f, 0x79, 0x8c, 0x7a, 0xe9, 0xa1, 0x03, - 0x0c, 0x8d, 0x9a, 0x6a, 0xc1, 0x71, 0x09, 0xf9, 0x5a, 0x18, 0x18, 0x46, 0x65, 0x38, 0x29, 0xda, - 0x0d, 0x4b, 0x94, 0x81, 0xba, 0x76, 0x90, 0x0f, 0xe4, 0x62, 0x22, 0xf4, 0x2e, 0xf6, 0x84, 0xde, - 0xb9, 0x78, 0xa1, 0x18, 0x0b, 0xb3, 0x37, 0xc1, 0x84, 0x27, 0x5f, 0x86, 0xb6, 0xcb, 0x85, 0x21, - 0xcf, 0x29, 0x4a, 0x69, 0x11, 0x50, 0x19, 0x88, 0x58, 0xa6, 0x9f, 0x96, 0x06, 0x82, 0xd7, 0x40, - 0xbe, 0xe9, 0xd5, 0x3a, 0x89, 0xe8, 0xcd, 0x21, 0x80, 0x6b, 0x5e, 0xcd, 0x4f, 0xc0, 0x4d, 0x8a, - 0x1b, 0x8b, 0x5d, 0x24, 0x21, 0xe0, 0x27, 0x60, 0xb2, 0x93, 0xf0, 0x75, 0x75, 0x50, 0x1a, 0x02, - 0x87, 0x34, 0x69, 0x02, 0x72, 0x5a, 0x04, 0xb2, 0xce, 0x09, 0x8a, 0xe0, 0x04, 0x34, 0xd1, 0xa5, - 0x9a, 0xb4, 0xca, 0x70, 0xe8, 0x41, 0xe5, 0xb6, 0x82, 0xee, 0x9c, 0xa0, 0x08, 0xce, 0xfc, 0x31, - 0x07, 0xa6, 0x13, 0xe5, 0xdf, 0x21, 0x9b, 0x46, 0xe5, 0xf1, 0x03, 0x33, 0x8d, 0x82, 0x3b, 0x50, - 0xd3, 0x28, 0xc8, 0x57, 0x62, 0x9a, 0x18, 0xf4, 0x00, 0xd3, 0x3c, 0xcd, 0x01, 0xd8, 0xef, 0xc6, - 0xf0, 0x4b, 0x30, 0xa1, 0x02, 0xe6, 0x4b, 0x26, 0x95, 0x28, 0xbd, 0xeb, 0xfc, 0xa1, 0x51, 0x7b, - 0xea, 0xff, 0xec, 0x9e, 0xea, 0x7f, 0x72, 0x10, 0x7d, 0x52, 0x94, 0x75, 0x52, 0x7b, 0xa5, 0x2f, - 0xc0, 0xa4, 0xdf, 0x69, 0x30, 0xf2, 0xa3, 0x37, 0x18, 0x52, 0xe1, 0x51, 0x6b, 0x11, 0x41, 0xc2, - 0x1a, 0x98, 0xc6, 0xf1, 0x1a, 0x7f, 0x7c, 0xa4, 0xdf, 0x98, 0x13, 0x0d, 0x45, 0xa2, 0xb8, 0x4f, - 0xa0, 0x9a, 0xbf, 0xf7, 0x9a, 0x55, 0xbd, 0xbb, 0x7f, 0xa2, 0x59, 0x0f, 0xaf, 0xcb, 0xfa, 0x4f, - 0x58, 0xf6, 0xe7, 0x2c, 0x98, 0xeb, 0x4d, 0x13, 0x23, 0xb5, 0xd3, 0x0f, 0x07, 0xce, 0x04, 0xb2, - 0x23, 0x5d, 0x3a, 0xea, 0x02, 0xf6, 0x36, 0x17, 0x48, 0x58, 0x22, 0x77, 0xe0, 0x96, 0x30, 0x7f, - 0x49, 0xea, 0x68, 0xf4, 0x91, 0xc3, 0x57, 0x83, 0xfb, 0xf2, 0xd1, 0x94, 0x74, 0x5a, 0x0b, 0xdb, - 0x73, 0x6f, 0xfe, 0xaa, 0xd5, 0xf4, 0x6b, 0x16, 0x1c, 0x1f, 0x54, 0x22, 0xc0, 0x8a, 0x9e, 0xd2, - 0x29, 0x25, 0x95, 0xe2, 0x53, 0xba, 0x17, 0x81, 0x61, 0x0c, 0x68, 0x33, 0x3b, 0x30, 0xb1, 0x41, - 0xde, 0x1d, 0x30, 0x9f, 0xb0, 0x7c, 0xac, 0x66, 0xd3, 0x4d, 0xc3, 0xff, 0xc3, 0xc0, 0x98, 0xdf, - 0x48, 0xa1, 0x41, 0xa9, 0xdc, 0x29, 0xd3, 0xac, 0xdc, 0x2b, 0x9f, 0x66, 0x3d, 0xea, 0xd7, 0x97, - 0x72, 0xad, 0x03, 0xd1, 0xd7, 0x67, 0xe0, 0x54, 0xd2, 0x07, 0xfa, 0x15, 0x76, 0x26, 0x0c, 0x8c, - 0x53, 0x95, 0x34, 0x22, 0x94, 0xce, 0x9f, 0xe6, 0xc8, 0xb9, 0xc3, 0x71, 0x64, 0xf3, 0x9b, 0x2c, - 0x18, 0x97, 0xcd, 0xc9, 0x21, 0x8c, 0x94, 0xae, 0x24, 0x46, 0x4a, 0x67, 0x87, 0x64, 0x38, 0x79, - 0xa3, 0xd4, 0x01, 0xd2, 0xf5, 0x9e, 0x01, 0xd2, 0xb9, 0x5d, 0x91, 0x86, 0x8f, 0x8b, 0xde, 0x03, - 0x53, 0x91, 0x40, 0xf8, 0x96, 0x28, 0x16, 0x75, 0x57, 0x95, 0x91, 0xb6, 0x8d, 0x66, 0x0c, 0x51, - 0x3b, 0x15, 0x51, 0x98, 0x14, 0x14, 0x62, 0x12, 0xf6, 0xc7, 0x2c, 0xa8, 0xfd, 0xf8, 0xc0, 0x74, - 0xaa, 0x4b, 0xdd, 0x1f, 0x13, 0xca, 0xe7, 0x1f, 0x3f, 0x2f, 0x8e, 0x3d, 0x79, 0x5e, 0x1c, 0x7b, - 0xf6, 0xbc, 0x38, 0xf6, 0x75, 0x58, 0xcc, 0x3c, 0x0e, 0x8b, 0x99, 0x27, 0x61, 0x31, 0xf3, 0x2c, - 0x2c, 0x66, 0xfe, 0x08, 0x8b, 0x99, 0xef, 0xfe, 0x2c, 0x8e, 0x7d, 0x9a, 0x6d, 0x2f, 0xfd, 0x1d, - 0x00, 0x00, 0xff, 0xff, 0x3c, 0x26, 0x41, 0xcb, 0x94, 0x19, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto index 5b56b2ac8..f50ed9d1f 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -32,7 +32,7 @@ option go_package = "v1"; // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" optional string kind = 1; // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -88,11 +88,11 @@ message ExternalMetricStatus { // configuration of a horizontal pod autoscaler. message HorizontalPodAutoscaler { - // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; @@ -141,7 +141,11 @@ message HorizontalPodAutoscalerSpec { // and will set the desired number of pods by using its Scale subresource. optional CrossVersionObjectReference scaleTargetRef = 1; - // lower limit for the number of pods that can be set by the autoscaler, default 1. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional optional int32 minReplicas = 2; @@ -380,15 +384,15 @@ message ResourceMetricStatus { // Scale represents a scaling request for a resource. message Scale { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go index c03af13ae..519bd7869 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/vendor/k8s.io/api/autoscaling/v1/types.go @@ -17,14 +17,14 @@ limitations under the License. package v1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` @@ -38,7 +38,11 @@ type HorizontalPodAutoscalerSpec struct { // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption // and will set the desired number of pods by using its Scale subresource. ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` - // lower limit for the number of pods that can be set by the autoscaler, default 1. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. @@ -78,11 +82,11 @@ type HorizontalPodAutoscalerStatus struct { // configuration of a horizontal pod autoscaler. type HorizontalPodAutoscaler struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -109,15 +113,15 @@ type HorizontalPodAutoscalerList struct { // Scale represents a scaling request for a resource. type Scale struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } diff --git a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go index 72ac97271..129ce2b48 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", "apiVersion": "API version of the referent", } @@ -64,8 +64,8 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "configuration of a horizontal pod autoscaler.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", "status": "current information about the autoscaler.", } @@ -99,7 +99,7 @@ func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerSpec = map[string]string{ "": "specification of a horizontal pod autoscaler.", "scaleTargetRef": "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.", - "minReplicas": "lower limit for the number of pods that can be set by the autoscaler, default 1.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", "maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", "targetCPUUtilizationPercentage": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", } @@ -219,9 +219,9 @@ func (ResourceMetricStatus) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go index 3fda47d54..ddb601128 100644 --- a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go @@ -148,7 +148,7 @@ func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerC func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]HorizontalPodAutoscaler, len(*in)) diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go index b6a5f3562..0b6ed3815 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go @@ -17,45 +17,24 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto -/* - Package v2beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto - - It has these top-level messages: - CrossVersionObjectReference - ExternalMetricSource - ExternalMetricStatus - HorizontalPodAutoscaler - HorizontalPodAutoscalerCondition - HorizontalPodAutoscalerList - HorizontalPodAutoscalerSpec - HorizontalPodAutoscalerStatus - MetricSpec - MetricStatus - ObjectMetricSource - ObjectMetricStatus - PodsMetricSource - PodsMetricStatus - ResourceMetricSource - ResourceMetricStatus -*/ package v2beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import reflect "reflect" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -71,76 +50,450 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_26c1bfc7a52d0478, []int{0} +} +func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_CrossVersionObjectReference.Merge(m, src) +} +func (m *CrossVersionObjectReference) XXX_Size() int { + return m.Size() +} +func (m *CrossVersionObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo + +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{1} +} +func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricSource.Merge(m, src) +} +func (m *ExternalMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo + +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{2} +} +func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricStatus.Merge(m, src) +} +func (m *ExternalMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m) } -func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } -func (*ExternalMetricSource) ProtoMessage() {} -func (*ExternalMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo -func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } -func (*ExternalMetricStatus) ProtoMessage() {} -func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{3} +} +func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src) +} +func (m *HorizontalPodAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m) +} -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptor_26c1bfc7a52d0478, []int{4} } +func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptor_26c1bfc7a52d0478, []int{5} +} +func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } +func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src) +} +func (m *HorizontalPodAutoscalerList) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptor_26c1bfc7a52d0478, []int{6} +} +func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Size() int { + return m.Size() } +func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_26c1bfc7a52d0478, []int{7} +} +func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo + +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{8} +} +func (m *MetricSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricSpec.Merge(m, src) +} +func (m *MetricSpec) XXX_Size() int { + return m.Size() +} +func (m *MetricSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MetricSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricSpec proto.InternalMessageInfo + +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{9} +} +func (m *MetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricStatus.Merge(m, src) +} +func (m *MetricStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricStatus proto.InternalMessageInfo + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{10} +} +func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricSource.Merge(m, src) +} +func (m *ObjectMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{11} +} +func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricStatus.Merge(m, src) +} +func (m *ObjectMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m) } -func (m *MetricSpec) Reset() { *m = MetricSpec{} } -func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{12} +} +func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricSource.Merge(m, src) +} +func (m *PodsMetricSource) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricSource.DiscardUnknown(m) +} -func (m *MetricStatus) Reset() { *m = MetricStatus{} } -func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo -func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } -func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{13} +} +func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricStatus.Merge(m, src) +} +func (m *PodsMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m) +} -func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } -func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo -func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } -func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{14} +} +func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricSource.Merge(m, src) +} +func (m *ResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m) +} -func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } -func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo -func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } -func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{15} +} +func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricStatus.Merge(m, src) +} +func (m *ResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m) +} -func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } -func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo func init() { proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta1.CrossVersionObjectReference") @@ -160,10 +513,112 @@ func init() { proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ResourceMetricSource") proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ResourceMetricStatus") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto", fileDescriptor_26c1bfc7a52d0478) +} + +var fileDescriptor_26c1bfc7a52d0478 = []byte{ + // 1475 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcb, 0x8f, 0x1b, 0x45, + 0x13, 0x5f, 0x3f, 0x76, 0xb3, 0x69, 0x6f, 0x76, 0xf7, 0xeb, 0x44, 0x89, 0xb3, 0xf9, 0x62, 0xaf, + 0x2c, 0x84, 0x42, 0x44, 0x66, 0x12, 0xb3, 0x3c, 0x24, 0x84, 0xc4, 0xda, 0x40, 0x12, 0xb1, 0x4e, + 0x42, 0xef, 0x26, 0x42, 0x90, 0x20, 0xda, 0x33, 0x1d, 0x6f, 0xb3, 0x9e, 0x19, 0x6b, 0xba, 0x6d, + 0x65, 0x83, 0x90, 0xb8, 0x70, 0xe7, 0x02, 0x67, 0x90, 0x38, 0x21, 0xb8, 0xc2, 0x99, 0x5b, 0x8e, + 0x39, 0x26, 0x02, 0x59, 0x64, 0xf8, 0x2f, 0x72, 0x42, 0xfd, 0x98, 0xf1, 0x8c, 0x1f, 0x6b, 0xc7, + 0x38, 0xe1, 0x71, 0x9b, 0xee, 0xaa, 0xfa, 0x55, 0x4f, 0xfd, 0xaa, 0xab, 0xbb, 0x1a, 0x5c, 0xdc, + 0x7b, 0x8d, 0x19, 0xd4, 0x33, 0xf7, 0xda, 0x75, 0xe2, 0xbb, 0x84, 0x13, 0x66, 0x76, 0x88, 0x6b, + 0x7b, 0xbe, 0xa9, 0x05, 0xb8, 0x45, 0x4d, 0xdc, 0xe6, 0x1e, 0xb3, 0x70, 0x93, 0xba, 0x0d, 0xb3, + 0x53, 0xae, 0x13, 0x8e, 0x2f, 0x98, 0x0d, 0xe2, 0x12, 0x1f, 0x73, 0x62, 0x1b, 0x2d, 0xdf, 0xe3, + 0x1e, 0x2c, 0x28, 0x7d, 0x03, 0xb7, 0xa8, 0x11, 0xd3, 0x37, 0xb4, 0xfe, 0xda, 0xb9, 0x06, 0xe5, + 0xbb, 0xed, 0xba, 0x61, 0x79, 0x8e, 0xd9, 0xf0, 0x1a, 0x9e, 0x29, 0xcd, 0xea, 0xed, 0xdb, 0x72, + 0x24, 0x07, 0xf2, 0x4b, 0xc1, 0xad, 0x95, 0x62, 0xee, 0x2d, 0xcf, 0x27, 0x66, 0x67, 0xc0, 0xe5, + 0xda, 0x46, 0x4f, 0xc7, 0xc1, 0xd6, 0x2e, 0x75, 0x89, 0xbf, 0x6f, 0xb6, 0xf6, 0x1a, 0xd2, 0xc8, + 0x27, 0xcc, 0x6b, 0xfb, 0x16, 0x79, 0x22, 0x2b, 0x66, 0x3a, 0x84, 0xe3, 0x61, 0xbe, 0xcc, 0x51, + 0x56, 0x7e, 0xdb, 0xe5, 0xd4, 0x19, 0x74, 0xf3, 0xca, 0x38, 0x03, 0x66, 0xed, 0x12, 0x07, 0xf7, + 0xdb, 0x95, 0xbe, 0x4a, 0x81, 0x53, 0x55, 0xdf, 0x63, 0xec, 0x06, 0xf1, 0x19, 0xf5, 0xdc, 0xab, + 0xf5, 0x4f, 0x88, 0xc5, 0x11, 0xb9, 0x4d, 0x7c, 0xe2, 0x5a, 0x04, 0xae, 0x83, 0xec, 0x1e, 0x75, + 0xed, 0x7c, 0x6a, 0x3d, 0x75, 0xe6, 0x70, 0x65, 0xe9, 0x5e, 0xb7, 0x38, 0x17, 0x74, 0x8b, 0xd9, + 0x77, 0xa9, 0x6b, 0x23, 0x29, 0x11, 0x1a, 0x2e, 0x76, 0x48, 0x3e, 0x9d, 0xd4, 0xb8, 0x82, 0x1d, + 0x82, 0xa4, 0x04, 0x96, 0x01, 0xc0, 0x2d, 0xaa, 0x1d, 0xe4, 0x33, 0x52, 0x0f, 0x6a, 0x3d, 0xb0, + 0x79, 0xed, 0xb2, 0x96, 0xa0, 0x98, 0x56, 0xe9, 0xeb, 0x0c, 0x38, 0xf6, 0xf6, 0x1d, 0x4e, 0x7c, + 0x17, 0x37, 0x6b, 0x84, 0xfb, 0xd4, 0xda, 0x96, 0xf1, 0x15, 0x60, 0x8e, 0x1c, 0x0b, 0x07, 0x7a, + 0x59, 0x11, 0x58, 0x2d, 0x92, 0xa0, 0x98, 0x16, 0xf4, 0xc0, 0xb2, 0x1a, 0x6d, 0x93, 0x26, 0xb1, + 0xb8, 0xe7, 0xcb, 0xc5, 0xe6, 0xca, 0x2f, 0x19, 0xbd, 0x2c, 0x8a, 0xa2, 0x66, 0xb4, 0xf6, 0x1a, + 0x62, 0x82, 0x19, 0x82, 0x1c, 0xa3, 0x73, 0xc1, 0xd8, 0xc2, 0x75, 0xd2, 0x0c, 0x4d, 0x2b, 0x30, + 0xe8, 0x16, 0x97, 0x6b, 0x09, 0x38, 0xd4, 0x07, 0x0f, 0x31, 0xc8, 0x71, 0xec, 0x37, 0x08, 0xbf, + 0x81, 0x9b, 0x6d, 0x22, 0x7f, 0x39, 0x57, 0x36, 0x0e, 0xf2, 0x66, 0x84, 0x09, 0x64, 0xbc, 0xd7, + 0xc6, 0x2e, 0xa7, 0x7c, 0xbf, 0xb2, 0x12, 0x74, 0x8b, 0xb9, 0x9d, 0x1e, 0x0c, 0x8a, 0x63, 0xc2, + 0x0e, 0x80, 0x6a, 0xb8, 0xd9, 0x21, 0x3e, 0x6e, 0x10, 0xe5, 0x29, 0x3b, 0x95, 0xa7, 0xe3, 0x41, + 0xb7, 0x08, 0x77, 0x06, 0xd0, 0xd0, 0x10, 0x0f, 0xa5, 0x6f, 0x06, 0x89, 0xe1, 0x98, 0xb7, 0xd9, + 0xbf, 0x83, 0x98, 0x5d, 0xb0, 0x64, 0xb5, 0x7d, 0x9f, 0xb8, 0x7f, 0x89, 0x99, 0x63, 0xfa, 0xb7, + 0x96, 0xaa, 0x31, 0x2c, 0x94, 0x40, 0x86, 0xfb, 0xe0, 0xa8, 0x1e, 0xcf, 0x80, 0xa0, 0x13, 0x41, + 0xb7, 0x78, 0xb4, 0x3a, 0x08, 0x87, 0x86, 0xf9, 0x28, 0xfd, 0x92, 0x06, 0x27, 0x2e, 0x79, 0x3e, + 0xbd, 0xeb, 0xb9, 0x1c, 0x37, 0xaf, 0x79, 0xf6, 0xa6, 0x2e, 0x90, 0xc4, 0x87, 0x1f, 0x83, 0x45, + 0x11, 0x3d, 0x1b, 0x73, 0x2c, 0x39, 0xca, 0x95, 0xcf, 0x4f, 0x16, 0x6b, 0x55, 0x18, 0x6a, 0x84, + 0xe3, 0x1e, 0xab, 0xbd, 0x39, 0x14, 0xa1, 0xc2, 0x5b, 0x20, 0xcb, 0x5a, 0xc4, 0xd2, 0x4c, 0xbe, + 0x6e, 0x1c, 0x5c, 0xa8, 0x8d, 0x11, 0x0b, 0xdd, 0x6e, 0x11, 0xab, 0x57, 0x4c, 0xc4, 0x08, 0x49, + 0x58, 0x48, 0xc0, 0x02, 0x93, 0x09, 0xa7, 0xb9, 0x7b, 0x63, 0x5a, 0x07, 0x12, 0xa4, 0xb2, 0xac, + 0x5d, 0x2c, 0xa8, 0x31, 0xd2, 0xe0, 0xa5, 0x2f, 0x32, 0x60, 0x7d, 0x84, 0x65, 0xd5, 0x73, 0x6d, + 0xca, 0xa9, 0xe7, 0xc2, 0x4b, 0x20, 0xcb, 0xf7, 0x5b, 0x61, 0xb2, 0x6f, 0x84, 0xab, 0xdd, 0xd9, + 0x6f, 0x91, 0xc7, 0xdd, 0xe2, 0x73, 0xe3, 0xec, 0x85, 0x1e, 0x92, 0x08, 0x70, 0x2b, 0xfa, 0xab, + 0x74, 0x02, 0x4b, 0x2f, 0xeb, 0x71, 0xb7, 0x38, 0xe4, 0x84, 0x32, 0x22, 0xa4, 0xe4, 0xe2, 0x45, + 0x6d, 0x68, 0x62, 0xc6, 0x77, 0x7c, 0xec, 0x32, 0xe5, 0x89, 0x3a, 0x61, 0xae, 0x9f, 0x9d, 0x8c, + 0x6e, 0x61, 0x51, 0x59, 0xd3, 0xab, 0x80, 0x5b, 0x03, 0x68, 0x68, 0x88, 0x07, 0xf8, 0x3c, 0x58, + 0xf0, 0x09, 0x66, 0x9e, 0x2b, 0xd3, 0xfc, 0x70, 0x2f, 0xb8, 0x48, 0xce, 0x22, 0x2d, 0x85, 0x2f, + 0x80, 0x43, 0x0e, 0x61, 0x0c, 0x37, 0x48, 0x7e, 0x5e, 0x2a, 0xae, 0x68, 0xc5, 0x43, 0x35, 0x35, + 0x8d, 0x42, 0x79, 0xe9, 0x61, 0x0a, 0x9c, 0x1a, 0x11, 0xc7, 0x2d, 0xca, 0x38, 0xbc, 0x39, 0x90, + 0xcf, 0xc6, 0x84, 0xb5, 0x83, 0x32, 0x95, 0xcd, 0xab, 0xda, 0xf7, 0x62, 0x38, 0x13, 0xcb, 0xe5, + 0x9b, 0x60, 0x9e, 0x72, 0xe2, 0x08, 0x56, 0x32, 0x67, 0x72, 0xe5, 0x57, 0xa7, 0xcc, 0xb5, 0xca, + 0x11, 0xed, 0x63, 0xfe, 0xb2, 0x40, 0x43, 0x0a, 0xb4, 0xf4, 0x6b, 0x7a, 0xe4, 0xbf, 0x89, 0x84, + 0x87, 0x9f, 0x82, 0x65, 0x39, 0x52, 0x95, 0x19, 0x91, 0xdb, 0xfa, 0x0f, 0xc7, 0xee, 0xa9, 0x03, + 0x0e, 0xf4, 0xca, 0x71, 0xbd, 0x94, 0xe5, 0xed, 0x04, 0x34, 0xea, 0x73, 0x05, 0x2f, 0x80, 0x9c, + 0x43, 0x5d, 0x44, 0x5a, 0x4d, 0x6a, 0x61, 0x95, 0x96, 0xf3, 0xea, 0x48, 0xaa, 0xf5, 0xa6, 0x51, + 0x5c, 0x07, 0xbe, 0x0c, 0x72, 0x0e, 0xbe, 0x13, 0x99, 0x64, 0xa4, 0xc9, 0x51, 0xed, 0x2f, 0x57, + 0xeb, 0x89, 0x50, 0x5c, 0x0f, 0x5e, 0x17, 0xd9, 0x20, 0xaa, 0x34, 0xcb, 0x67, 0x65, 0x98, 0xcf, + 0x8e, 0xfb, 0x3f, 0x5d, 0xe4, 0x45, 0x89, 0x88, 0x65, 0x8e, 0x84, 0x40, 0x21, 0x56, 0xe9, 0xa7, + 0x2c, 0x38, 0x7d, 0xe0, 0xde, 0x87, 0xef, 0x00, 0xe8, 0xd5, 0x19, 0xf1, 0x3b, 0xc4, 0xbe, 0xa8, + 0xae, 0x45, 0xe2, 0x7e, 0x22, 0x62, 0x9c, 0x51, 0x47, 0xe2, 0xd5, 0x01, 0x29, 0x1a, 0x62, 0x01, + 0x2d, 0x70, 0x44, 0x6c, 0x06, 0x15, 0x50, 0xaa, 0xaf, 0x42, 0x4f, 0xb6, 0xd3, 0xfe, 0x17, 0x74, + 0x8b, 0x47, 0xb6, 0xe2, 0x20, 0x28, 0x89, 0x09, 0x37, 0xc1, 0x8a, 0xae, 0xf5, 0x7d, 0x01, 0x3e, + 0xa1, 0x23, 0xb0, 0x52, 0x4d, 0x8a, 0x51, 0xbf, 0xbe, 0x80, 0xb0, 0x09, 0xa3, 0x3e, 0xb1, 0x23, + 0x88, 0x6c, 0x12, 0xe2, 0xad, 0xa4, 0x18, 0xf5, 0xeb, 0xc3, 0x26, 0x58, 0xd6, 0xa8, 0x3a, 0xde, + 0xf9, 0x79, 0x49, 0xd9, 0x8b, 0x13, 0x52, 0xa6, 0x8a, 0x6e, 0x94, 0x83, 0xd5, 0x04, 0x16, 0xea, + 0xc3, 0x86, 0x1c, 0x00, 0x2b, 0x2c, 0x71, 0x2c, 0xbf, 0x20, 0x3d, 0xbd, 0x39, 0xe5, 0x1e, 0x8c, + 0x6a, 0x65, 0xef, 0xf8, 0x8a, 0xa6, 0x18, 0x8a, 0xf9, 0x29, 0x7d, 0x9f, 0x01, 0xa0, 0x97, 0x61, + 0x70, 0x23, 0x51, 0xe4, 0xd7, 0xfb, 0x8a, 0xfc, 0x6a, 0xfc, 0x72, 0x1a, 0x2b, 0xe8, 0x37, 0xc0, + 0x82, 0x27, 0x77, 0x9e, 0x4e, 0x86, 0xf2, 0xb8, 0x65, 0x47, 0x67, 0x69, 0x84, 0x56, 0x01, 0xa2, + 0x74, 0xea, 0xfd, 0xab, 0xd1, 0xe0, 0x15, 0x90, 0x6d, 0x79, 0x76, 0x78, 0xf8, 0x9d, 0x1f, 0x87, + 0x7a, 0xcd, 0xb3, 0x59, 0x02, 0x73, 0x51, 0xac, 0x5d, 0xcc, 0x22, 0x89, 0x03, 0x3f, 0x02, 0x8b, + 0xe1, 0x75, 0x43, 0xdf, 0x4d, 0x36, 0xc6, 0x61, 0x22, 0xad, 0x9f, 0xc0, 0x5d, 0x12, 0x15, 0x34, + 0x94, 0xa0, 0x08, 0x53, 0xe0, 0x13, 0x7d, 0x5b, 0x94, 0xb5, 0x7e, 0x02, 0xfc, 0x61, 0xd7, 0x7e, + 0x85, 0x1f, 0x4a, 0x50, 0x84, 0x59, 0xfa, 0x21, 0x03, 0x96, 0x12, 0xd7, 0xd0, 0xbf, 0x83, 0x2e, + 0x95, 0xd5, 0xb3, 0xa5, 0x4b, 0x61, 0xce, 0x9e, 0x2e, 0x85, 0xfb, 0xf4, 0xe8, 0x8a, 0xe1, 0x0f, + 0xa1, 0xeb, 0x61, 0x06, 0xc0, 0xc1, 0x4c, 0x87, 0x16, 0x58, 0x50, 0xad, 0xc6, 0x2c, 0x4e, 0xb8, + 0xe8, 0xd6, 0xa1, 0x0f, 0x33, 0x0d, 0xdd, 0xd7, 0xa0, 0xa4, 0x27, 0x6a, 0x50, 0xc8, 0x2c, 0x1a, + 0xb9, 0xe8, 0x08, 0x1c, 0xd9, 0xcc, 0xdd, 0x02, 0x8b, 0x2c, 0xec, 0x80, 0xb2, 0xd3, 0x77, 0x40, + 0x32, 0xea, 0x51, 0xef, 0x13, 0x41, 0x42, 0x1b, 0x2c, 0xe1, 0x78, 0x13, 0x32, 0x3f, 0xd5, 0x6f, + 0xac, 0x8a, 0x8e, 0x27, 0xd1, 0x7d, 0x24, 0x50, 0x4b, 0xbf, 0xf5, 0x73, 0xab, 0x36, 0xe4, 0x3f, + 0x96, 0xdb, 0x67, 0xd7, 0x0b, 0xfe, 0x27, 0xe8, 0xfd, 0x36, 0x0d, 0x56, 0xfb, 0x8f, 0x93, 0xa9, + 0x9a, 0xfe, 0xbb, 0x43, 0x5f, 0x2e, 0xd2, 0x53, 0x2d, 0x3a, 0xea, 0x50, 0x26, 0x7b, 0xbd, 0x48, + 0x30, 0x91, 0x99, 0x39, 0x13, 0xa5, 0xef, 0x92, 0x31, 0x9a, 0xfe, 0x61, 0xe4, 0xb3, 0xe1, 0xaf, + 0x07, 0xd3, 0x05, 0xe9, 0x94, 0x76, 0x36, 0xf1, 0x0b, 0xc2, 0xd3, 0x0e, 0xd3, 0x8f, 0x69, 0x70, + 0x6c, 0xd8, 0x2d, 0x02, 0x56, 0xf5, 0x5b, 0xa2, 0x0a, 0x92, 0x19, 0x7f, 0x4b, 0x7c, 0xdc, 0x2d, + 0x16, 0x87, 0xb4, 0xc0, 0x21, 0x4c, 0xec, 0xb9, 0xf1, 0x7d, 0x90, 0x4f, 0x30, 0x7f, 0x9d, 0xd3, + 0x26, 0xbd, 0xab, 0x2e, 0xf7, 0xaa, 0x8d, 0xf9, 0x7f, 0xd0, 0x2d, 0xe6, 0x77, 0x46, 0xe8, 0xa0, + 0x91, 0xd6, 0x23, 0xde, 0xdc, 0x32, 0x4f, 0xfd, 0xcd, 0xed, 0xe7, 0xc1, 0x78, 0xa9, 0xd4, 0x9a, + 0x49, 0xbc, 0x3e, 0x04, 0x27, 0x93, 0x39, 0x30, 0x18, 0xb0, 0xd3, 0x41, 0xb7, 0x78, 0xb2, 0x3a, + 0x4a, 0x09, 0x8d, 0xb6, 0x1f, 0x95, 0xc8, 0x99, 0x67, 0x93, 0xc8, 0x95, 0x73, 0xf7, 0x1e, 0x15, + 0xe6, 0xee, 0x3f, 0x2a, 0xcc, 0x3d, 0x78, 0x54, 0x98, 0xfb, 0x3c, 0x28, 0xa4, 0xee, 0x05, 0x85, + 0xd4, 0xfd, 0xa0, 0x90, 0x7a, 0x10, 0x14, 0x52, 0xbf, 0x07, 0x85, 0xd4, 0x97, 0x7f, 0x14, 0xe6, + 0x3e, 0x38, 0xa4, 0x8f, 0x9e, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x05, 0x26, 0x31, 0x5d, 0x9f, + 0x18, 0x00, 0x00, +} + func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -171,29 +626,37 @@ func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { } func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CrossVersionObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x1a - i++ + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -201,51 +664,63 @@ func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - if m.MetricSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) - n1, err := m.MetricSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.TargetAverageValue != nil { + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- + dAtA[i] = 0x22 } if m.TargetValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n2, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- + dAtA[i] = 0x1a } - if m.TargetAverageValue != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n3, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MetricSelector != nil { + { + size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -253,49 +728,61 @@ func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - if m.MetricSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) - n4, err := m.MetricSelector.MarshalTo(dAtA[i:]) + if m.CurrentAverageValue != nil { + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n4 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n5, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.CurrentAverageValue != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n6, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MetricSelector != nil { + { + size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -303,41 +790,52 @@ func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n8, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n9, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -345,41 +843,52 @@ func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n10, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -387,37 +896,46 @@ func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -425,45 +943,54 @@ func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) - n12, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Metrics) > 0 { + for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } } - i += n12 + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) + i-- + dAtA[i] = 0x18 if m.MinReplicas != nil { - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) + i-- + dAtA[i] = 0x10 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) - if len(m.Metrics) > 0 { - for _, msg := range m.Metrics { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.ScaleTargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -471,62 +998,73 @@ func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) - n13, err := m.LastScaleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } - i += n13 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) if len(m.CurrentMetrics) > 0 { - for _, msg := range m.CurrentMetrics { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.CurrentMetrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CurrentMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x2a } } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x18 + if m.LastScaleTime != nil { + { + size, err := m.LastScaleTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *MetricSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -534,61 +1072,75 @@ func (m *MetricSpec) Marshal() (dAtA []byte, err error) { } func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n14, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n15, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n16, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- + dAtA[i] = 0x1a } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n17, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -596,61 +1148,75 @@ func (m *MetricStatus) Marshal() (dAtA []byte, err error) { } func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n18, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n19, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n19 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n20, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n20 + i-- + dAtA[i] = 0x1a } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n21, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n21 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -658,57 +1224,71 @@ func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n22, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n23, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - i += n23 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n24, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n24 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AverageValue != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n25, err := m.AverageValue.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n25 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -716,57 +1296,71 @@ func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n26, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n27, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - i += n27 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n28, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n28 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AverageValue != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n29, err := m.AverageValue.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n29 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -774,39 +1368,49 @@ func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n30, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n30 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n31, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n31 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -814,39 +1418,49 @@ func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n32, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n33, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n33 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -854,36 +1468,44 @@ func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.TargetAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) - } if m.TargetAverageValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n34, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n34 + i-- + dAtA[i] = 0x1a } - return i, nil + if m.TargetAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -891,40 +1513,53 @@ func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.CurrentAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n35, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.CurrentAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + i-- + dAtA[i] = 0x10 } - i += n35 - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CrossVersionObjectReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -937,6 +1572,9 @@ func (m *CrossVersionObjectReference) Size() (n int) { } func (m *ExternalMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -957,6 +1595,9 @@ func (m *ExternalMetricSource) Size() (n int) { } func (m *ExternalMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -975,6 +1616,9 @@ func (m *ExternalMetricStatus) Size() (n int) { } func (m *HorizontalPodAutoscaler) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -987,6 +1631,9 @@ func (m *HorizontalPodAutoscaler) Size() (n int) { } func (m *HorizontalPodAutoscalerCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1003,6 +1650,9 @@ func (m *HorizontalPodAutoscalerCondition) Size() (n int) { } func (m *HorizontalPodAutoscalerList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1017,6 +1667,9 @@ func (m *HorizontalPodAutoscalerList) Size() (n int) { } func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ScaleTargetRef.Size() @@ -1035,6 +1688,9 @@ func (m *HorizontalPodAutoscalerSpec) Size() (n int) { } func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ObservedGeneration != nil { @@ -1062,6 +1718,9 @@ func (m *HorizontalPodAutoscalerStatus) Size() (n int) { } func (m *MetricSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1086,6 +1745,9 @@ func (m *MetricSpec) Size() (n int) { } func (m *MetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1110,6 +1772,9 @@ func (m *MetricStatus) Size() (n int) { } func (m *ObjectMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Target.Size() @@ -1130,6 +1795,9 @@ func (m *ObjectMetricSource) Size() (n int) { } func (m *ObjectMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Target.Size() @@ -1150,6 +1818,9 @@ func (m *ObjectMetricStatus) Size() (n int) { } func (m *PodsMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1164,6 +1835,9 @@ func (m *PodsMetricSource) Size() (n int) { } func (m *PodsMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1178,6 +1852,9 @@ func (m *PodsMetricStatus) Size() (n int) { } func (m *ResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1193,6 +1870,9 @@ func (m *ResourceMetricSource) Size() (n int) { } func (m *ResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1206,14 +1886,7 @@ func (m *ResourceMetricStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1236,9 +1909,9 @@ func (this *ExternalMetricSource) String() string { } s := strings.Join([]string{`&ExternalMetricSource{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1249,9 +1922,9 @@ func (this *ExternalMetricStatus) String() string { } s := strings.Join([]string{`&ExternalMetricStatus{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1261,7 +1934,7 @@ func (this *HorizontalPodAutoscaler) String() string { return "nil" } s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1275,7 +1948,7 @@ func (this *HorizontalPodAutoscalerCondition) String() string { s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -1286,9 +1959,14 @@ func (this *HorizontalPodAutoscalerList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]HorizontalPodAutoscaler{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1297,11 +1975,16 @@ func (this *HorizontalPodAutoscalerSpec) String() string { if this == nil { return "nil" } + repeatedStringForMetrics := "[]MetricSpec{" + for _, f := range this.Metrics { + repeatedStringForMetrics += strings.Replace(strings.Replace(f.String(), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForMetrics += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, - `Metrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Metrics), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + `,`, + `Metrics:` + repeatedStringForMetrics + `,`, `}`, }, "") return s @@ -1310,13 +1993,23 @@ func (this *HorizontalPodAutoscalerStatus) String() string { if this == nil { return "nil" } + repeatedStringForCurrentMetrics := "[]MetricStatus{" + for _, f := range this.CurrentMetrics { + repeatedStringForCurrentMetrics += strings.Replace(strings.Replace(f.String(), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForCurrentMetrics += "}" + repeatedStringForConditions := "[]HorizontalPodAutoscalerCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "v1.Time", 1) + `,`, `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, - `CurrentMetrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentMetrics), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + `,`, + `CurrentMetrics:` + repeatedStringForCurrentMetrics + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -1327,10 +2020,10 @@ func (this *MetricSpec) String() string { } s := strings.Join([]string{`&MetricSpec{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, `}`, }, "") return s @@ -1341,10 +2034,10 @@ func (this *MetricStatus) String() string { } s := strings.Join([]string{`&MetricStatus{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, `}`, }, "") return s @@ -1356,9 +2049,9 @@ func (this *ObjectMetricSource) String() string { s := strings.Join([]string{`&ObjectMetricSource{`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1370,9 +2063,9 @@ func (this *ObjectMetricStatus) String() string { s := strings.Join([]string{`&ObjectMetricStatus{`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1383,8 +2076,8 @@ func (this *PodsMetricSource) String() string { } s := strings.Join([]string{`&PodsMetricSource{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetAverageValue:` + strings.Replace(strings.Replace(this.TargetAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `TargetAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1395,8 +2088,8 @@ func (this *PodsMetricStatus) String() string { } s := strings.Join([]string{`&PodsMetricStatus{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1408,7 +2101,7 @@ func (this *ResourceMetricSource) String() string { s := strings.Join([]string{`&ResourceMetricSource{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1420,7 +2113,7 @@ func (this *ResourceMetricStatus) String() string { s := strings.Join([]string{`&ResourceMetricStatus{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1448,7 +2141,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1476,7 +2169,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1486,6 +2179,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1505,7 +2201,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1515,6 +2211,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1534,7 +2233,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1544,6 +2243,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1555,7 +2257,10 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1585,7 +2290,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1613,7 +2318,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1623,6 +2328,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1642,7 +2350,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1651,11 +2359,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MetricSelector == nil { - m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.MetricSelector = &v1.LabelSelector{} } if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1675,7 +2386,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1684,11 +2395,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetValue == nil { - m.TargetValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetValue = &resource.Quantity{} } if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1708,7 +2422,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1717,11 +2431,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetAverageValue = &resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1736,6 +2453,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1763,7 +2483,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1791,7 +2511,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1801,6 +2521,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1820,7 +2543,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1829,11 +2552,14 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MetricSelector == nil { - m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.MetricSelector = &v1.LabelSelector{} } if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1853,7 +2579,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1862,6 +2588,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1883,7 +2612,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1892,11 +2621,14 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.CurrentAverageValue == nil { - m.CurrentAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.CurrentAverageValue = &resource.Quantity{} } if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1911,6 +2643,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1938,7 +2673,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1966,7 +2701,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1975,6 +2710,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1996,7 +2734,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2005,6 +2743,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2026,7 +2767,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2035,6 +2776,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2051,6 +2795,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2078,7 +2825,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2106,7 +2853,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2116,6 +2863,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2135,7 +2885,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2145,6 +2895,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2164,7 +2917,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2173,6 +2926,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2194,7 +2950,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2204,6 +2960,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2223,7 +2982,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2233,6 +2992,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2247,6 +3009,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2274,7 +3039,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2302,7 +3067,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2311,6 +3076,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2332,7 +3100,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2341,6 +3109,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2358,6 +3129,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2385,7 +3159,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2413,7 +3187,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2422,6 +3196,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2443,7 +3220,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2463,7 +3240,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxReplicas |= (int32(b) & 0x7F) << shift + m.MaxReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2482,7 +3259,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2491,6 +3268,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2508,6 +3288,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2535,7 +3318,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2563,7 +3346,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2583,7 +3366,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2592,11 +3375,14 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.LastScaleTime = &v1.Time{} } if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2616,7 +3402,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2635,7 +3421,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredReplicas |= (int32(b) & 0x7F) << shift + m.DesiredReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2654,7 +3440,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2663,6 +3449,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2685,7 +3474,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2694,6 +3483,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2711,6 +3503,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2738,7 +3533,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2766,7 +3561,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2776,6 +3571,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2795,7 +3593,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2804,6 +3602,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2828,7 +3629,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2837,6 +3638,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2861,7 +3665,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2870,6 +3674,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2894,7 +3701,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2903,6 +3710,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2922,6 +3732,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2949,7 +3762,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2977,7 +3790,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2987,6 +3800,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3006,7 +3822,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3015,6 +3831,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3039,7 +3858,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3048,6 +3867,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3072,7 +3894,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3081,6 +3903,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3105,7 +3930,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3114,6 +3939,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3133,6 +3961,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3160,7 +3991,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3188,7 +4019,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3197,6 +4028,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3218,7 +4052,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3228,6 +4062,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3247,7 +4084,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3256,6 +4093,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3277,7 +4117,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3286,11 +4126,14 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3310,7 +4153,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3319,11 +4162,14 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3338,6 +4184,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3365,7 +4214,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3393,7 +4242,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3402,6 +4251,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3423,7 +4275,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3433,6 +4285,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3452,7 +4307,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3461,6 +4316,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3482,7 +4340,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3491,11 +4349,14 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3515,7 +4376,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3524,11 +4385,14 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3543,6 +4407,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3570,7 +4437,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3598,7 +4465,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3608,6 +4475,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3627,7 +4497,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3636,6 +4506,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3657,7 +4530,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3666,11 +4539,14 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3685,6 +4561,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3712,7 +4591,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3740,7 +4619,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3750,6 +4629,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3769,7 +4651,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3778,6 +4660,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3799,7 +4684,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3808,11 +4693,14 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3827,6 +4715,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3854,7 +4745,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3882,7 +4773,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3892,6 +4783,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3911,7 +4805,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3931,7 +4825,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3940,11 +4834,14 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetAverageValue = &resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3959,6 +4856,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3986,7 +4886,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4014,7 +4914,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4024,6 +4924,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4043,7 +4946,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4063,7 +4966,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4072,6 +4975,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4088,6 +4994,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4154,10 +5063,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -4186,6 +5098,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -4204,104 +5119,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1475 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcb, 0x8f, 0x1b, 0x45, - 0x13, 0x5f, 0x3f, 0x76, 0xb3, 0x69, 0x6f, 0x76, 0xf7, 0xeb, 0x44, 0x89, 0xb3, 0xf9, 0x62, 0xaf, - 0x2c, 0x84, 0x42, 0x44, 0x66, 0x12, 0xb3, 0x3c, 0x24, 0x84, 0xc4, 0xda, 0x40, 0x12, 0xb1, 0x4e, - 0x42, 0xef, 0x26, 0x42, 0x90, 0x20, 0xda, 0x33, 0x1d, 0x6f, 0xb3, 0x9e, 0x19, 0x6b, 0xba, 0x6d, - 0x65, 0x83, 0x90, 0xb8, 0x70, 0xe7, 0x02, 0x67, 0x90, 0x38, 0x21, 0xb8, 0xc2, 0x99, 0x5b, 0x8e, - 0x39, 0x26, 0x02, 0x59, 0x64, 0xf8, 0x2f, 0x72, 0x42, 0xfd, 0x98, 0xf1, 0x8c, 0x1f, 0x6b, 0xc7, - 0x38, 0xe1, 0x71, 0x9b, 0xee, 0xaa, 0xfa, 0x55, 0x4f, 0xfd, 0xaa, 0xab, 0xbb, 0x1a, 0x5c, 0xdc, - 0x7b, 0x8d, 0x19, 0xd4, 0x33, 0xf7, 0xda, 0x75, 0xe2, 0xbb, 0x84, 0x13, 0x66, 0x76, 0x88, 0x6b, - 0x7b, 0xbe, 0xa9, 0x05, 0xb8, 0x45, 0x4d, 0xdc, 0xe6, 0x1e, 0xb3, 0x70, 0x93, 0xba, 0x0d, 0xb3, - 0x53, 0xae, 0x13, 0x8e, 0x2f, 0x98, 0x0d, 0xe2, 0x12, 0x1f, 0x73, 0x62, 0x1b, 0x2d, 0xdf, 0xe3, - 0x1e, 0x2c, 0x28, 0x7d, 0x03, 0xb7, 0xa8, 0x11, 0xd3, 0x37, 0xb4, 0xfe, 0xda, 0xb9, 0x06, 0xe5, - 0xbb, 0xed, 0xba, 0x61, 0x79, 0x8e, 0xd9, 0xf0, 0x1a, 0x9e, 0x29, 0xcd, 0xea, 0xed, 0xdb, 0x72, - 0x24, 0x07, 0xf2, 0x4b, 0xc1, 0xad, 0x95, 0x62, 0xee, 0x2d, 0xcf, 0x27, 0x66, 0x67, 0xc0, 0xe5, - 0xda, 0x46, 0x4f, 0xc7, 0xc1, 0xd6, 0x2e, 0x75, 0x89, 0xbf, 0x6f, 0xb6, 0xf6, 0x1a, 0xd2, 0xc8, - 0x27, 0xcc, 0x6b, 0xfb, 0x16, 0x79, 0x22, 0x2b, 0x66, 0x3a, 0x84, 0xe3, 0x61, 0xbe, 0xcc, 0x51, - 0x56, 0x7e, 0xdb, 0xe5, 0xd4, 0x19, 0x74, 0xf3, 0xca, 0x38, 0x03, 0x66, 0xed, 0x12, 0x07, 0xf7, - 0xdb, 0x95, 0xbe, 0x4a, 0x81, 0x53, 0x55, 0xdf, 0x63, 0xec, 0x06, 0xf1, 0x19, 0xf5, 0xdc, 0xab, - 0xf5, 0x4f, 0x88, 0xc5, 0x11, 0xb9, 0x4d, 0x7c, 0xe2, 0x5a, 0x04, 0xae, 0x83, 0xec, 0x1e, 0x75, - 0xed, 0x7c, 0x6a, 0x3d, 0x75, 0xe6, 0x70, 0x65, 0xe9, 0x5e, 0xb7, 0x38, 0x17, 0x74, 0x8b, 0xd9, - 0x77, 0xa9, 0x6b, 0x23, 0x29, 0x11, 0x1a, 0x2e, 0x76, 0x48, 0x3e, 0x9d, 0xd4, 0xb8, 0x82, 0x1d, - 0x82, 0xa4, 0x04, 0x96, 0x01, 0xc0, 0x2d, 0xaa, 0x1d, 0xe4, 0x33, 0x52, 0x0f, 0x6a, 0x3d, 0xb0, - 0x79, 0xed, 0xb2, 0x96, 0xa0, 0x98, 0x56, 0xe9, 0xeb, 0x0c, 0x38, 0xf6, 0xf6, 0x1d, 0x4e, 0x7c, - 0x17, 0x37, 0x6b, 0x84, 0xfb, 0xd4, 0xda, 0x96, 0xf1, 0x15, 0x60, 0x8e, 0x1c, 0x0b, 0x07, 0x7a, - 0x59, 0x11, 0x58, 0x2d, 0x92, 0xa0, 0x98, 0x16, 0xf4, 0xc0, 0xb2, 0x1a, 0x6d, 0x93, 0x26, 0xb1, - 0xb8, 0xe7, 0xcb, 0xc5, 0xe6, 0xca, 0x2f, 0x19, 0xbd, 0x2c, 0x8a, 0xa2, 0x66, 0xb4, 0xf6, 0x1a, - 0x62, 0x82, 0x19, 0x82, 0x1c, 0xa3, 0x73, 0xc1, 0xd8, 0xc2, 0x75, 0xd2, 0x0c, 0x4d, 0x2b, 0x30, - 0xe8, 0x16, 0x97, 0x6b, 0x09, 0x38, 0xd4, 0x07, 0x0f, 0x31, 0xc8, 0x71, 0xec, 0x37, 0x08, 0xbf, - 0x81, 0x9b, 0x6d, 0x22, 0x7f, 0x39, 0x57, 0x36, 0x0e, 0xf2, 0x66, 0x84, 0x09, 0x64, 0xbc, 0xd7, - 0xc6, 0x2e, 0xa7, 0x7c, 0xbf, 0xb2, 0x12, 0x74, 0x8b, 0xb9, 0x9d, 0x1e, 0x0c, 0x8a, 0x63, 0xc2, - 0x0e, 0x80, 0x6a, 0xb8, 0xd9, 0x21, 0x3e, 0x6e, 0x10, 0xe5, 0x29, 0x3b, 0x95, 0xa7, 0xe3, 0x41, - 0xb7, 0x08, 0x77, 0x06, 0xd0, 0xd0, 0x10, 0x0f, 0xa5, 0x6f, 0x06, 0x89, 0xe1, 0x98, 0xb7, 0xd9, - 0xbf, 0x83, 0x98, 0x5d, 0xb0, 0x64, 0xb5, 0x7d, 0x9f, 0xb8, 0x7f, 0x89, 0x99, 0x63, 0xfa, 0xb7, - 0x96, 0xaa, 0x31, 0x2c, 0x94, 0x40, 0x86, 0xfb, 0xe0, 0xa8, 0x1e, 0xcf, 0x80, 0xa0, 0x13, 0x41, - 0xb7, 0x78, 0xb4, 0x3a, 0x08, 0x87, 0x86, 0xf9, 0x28, 0xfd, 0x92, 0x06, 0x27, 0x2e, 0x79, 0x3e, - 0xbd, 0xeb, 0xb9, 0x1c, 0x37, 0xaf, 0x79, 0xf6, 0xa6, 0x2e, 0x90, 0xc4, 0x87, 0x1f, 0x83, 0x45, - 0x11, 0x3d, 0x1b, 0x73, 0x2c, 0x39, 0xca, 0x95, 0xcf, 0x4f, 0x16, 0x6b, 0x55, 0x18, 0x6a, 0x84, - 0xe3, 0x1e, 0xab, 0xbd, 0x39, 0x14, 0xa1, 0xc2, 0x5b, 0x20, 0xcb, 0x5a, 0xc4, 0xd2, 0x4c, 0xbe, - 0x6e, 0x1c, 0x5c, 0xa8, 0x8d, 0x11, 0x0b, 0xdd, 0x6e, 0x11, 0xab, 0x57, 0x4c, 0xc4, 0x08, 0x49, - 0x58, 0x48, 0xc0, 0x02, 0x93, 0x09, 0xa7, 0xb9, 0x7b, 0x63, 0x5a, 0x07, 0x12, 0xa4, 0xb2, 0xac, - 0x5d, 0x2c, 0xa8, 0x31, 0xd2, 0xe0, 0xa5, 0x2f, 0x32, 0x60, 0x7d, 0x84, 0x65, 0xd5, 0x73, 0x6d, - 0xca, 0xa9, 0xe7, 0xc2, 0x4b, 0x20, 0xcb, 0xf7, 0x5b, 0x61, 0xb2, 0x6f, 0x84, 0xab, 0xdd, 0xd9, - 0x6f, 0x91, 0xc7, 0xdd, 0xe2, 0x73, 0xe3, 0xec, 0x85, 0x1e, 0x92, 0x08, 0x70, 0x2b, 0xfa, 0xab, - 0x74, 0x02, 0x4b, 0x2f, 0xeb, 0x71, 0xb7, 0x38, 0xe4, 0x84, 0x32, 0x22, 0xa4, 0xe4, 0xe2, 0x45, - 0x6d, 0x68, 0x62, 0xc6, 0x77, 0x7c, 0xec, 0x32, 0xe5, 0x89, 0x3a, 0x61, 0xae, 0x9f, 0x9d, 0x8c, - 0x6e, 0x61, 0x51, 0x59, 0xd3, 0xab, 0x80, 0x5b, 0x03, 0x68, 0x68, 0x88, 0x07, 0xf8, 0x3c, 0x58, - 0xf0, 0x09, 0x66, 0x9e, 0x2b, 0xd3, 0xfc, 0x70, 0x2f, 0xb8, 0x48, 0xce, 0x22, 0x2d, 0x85, 0x2f, - 0x80, 0x43, 0x0e, 0x61, 0x0c, 0x37, 0x48, 0x7e, 0x5e, 0x2a, 0xae, 0x68, 0xc5, 0x43, 0x35, 0x35, - 0x8d, 0x42, 0x79, 0xe9, 0x61, 0x0a, 0x9c, 0x1a, 0x11, 0xc7, 0x2d, 0xca, 0x38, 0xbc, 0x39, 0x90, - 0xcf, 0xc6, 0x84, 0xb5, 0x83, 0x32, 0x95, 0xcd, 0xab, 0xda, 0xf7, 0x62, 0x38, 0x13, 0xcb, 0xe5, - 0x9b, 0x60, 0x9e, 0x72, 0xe2, 0x08, 0x56, 0x32, 0x67, 0x72, 0xe5, 0x57, 0xa7, 0xcc, 0xb5, 0xca, - 0x11, 0xed, 0x63, 0xfe, 0xb2, 0x40, 0x43, 0x0a, 0xb4, 0xf4, 0x6b, 0x7a, 0xe4, 0xbf, 0x89, 0x84, - 0x87, 0x9f, 0x82, 0x65, 0x39, 0x52, 0x95, 0x19, 0x91, 0xdb, 0xfa, 0x0f, 0xc7, 0xee, 0xa9, 0x03, - 0x0e, 0xf4, 0xca, 0x71, 0xbd, 0x94, 0xe5, 0xed, 0x04, 0x34, 0xea, 0x73, 0x05, 0x2f, 0x80, 0x9c, - 0x43, 0x5d, 0x44, 0x5a, 0x4d, 0x6a, 0x61, 0x95, 0x96, 0xf3, 0xea, 0x48, 0xaa, 0xf5, 0xa6, 0x51, - 0x5c, 0x07, 0xbe, 0x0c, 0x72, 0x0e, 0xbe, 0x13, 0x99, 0x64, 0xa4, 0xc9, 0x51, 0xed, 0x2f, 0x57, - 0xeb, 0x89, 0x50, 0x5c, 0x0f, 0x5e, 0x17, 0xd9, 0x20, 0xaa, 0x34, 0xcb, 0x67, 0x65, 0x98, 0xcf, - 0x8e, 0xfb, 0x3f, 0x5d, 0xe4, 0x45, 0x89, 0x88, 0x65, 0x8e, 0x84, 0x40, 0x21, 0x56, 0xe9, 0xa7, - 0x2c, 0x38, 0x7d, 0xe0, 0xde, 0x87, 0xef, 0x00, 0xe8, 0xd5, 0x19, 0xf1, 0x3b, 0xc4, 0xbe, 0xa8, - 0xae, 0x45, 0xe2, 0x7e, 0x22, 0x62, 0x9c, 0x51, 0x47, 0xe2, 0xd5, 0x01, 0x29, 0x1a, 0x62, 0x01, - 0x2d, 0x70, 0x44, 0x6c, 0x06, 0x15, 0x50, 0xaa, 0xaf, 0x42, 0x4f, 0xb6, 0xd3, 0xfe, 0x17, 0x74, - 0x8b, 0x47, 0xb6, 0xe2, 0x20, 0x28, 0x89, 0x09, 0x37, 0xc1, 0x8a, 0xae, 0xf5, 0x7d, 0x01, 0x3e, - 0xa1, 0x23, 0xb0, 0x52, 0x4d, 0x8a, 0x51, 0xbf, 0xbe, 0x80, 0xb0, 0x09, 0xa3, 0x3e, 0xb1, 0x23, - 0x88, 0x6c, 0x12, 0xe2, 0xad, 0xa4, 0x18, 0xf5, 0xeb, 0xc3, 0x26, 0x58, 0xd6, 0xa8, 0x3a, 0xde, - 0xf9, 0x79, 0x49, 0xd9, 0x8b, 0x13, 0x52, 0xa6, 0x8a, 0x6e, 0x94, 0x83, 0xd5, 0x04, 0x16, 0xea, - 0xc3, 0x86, 0x1c, 0x00, 0x2b, 0x2c, 0x71, 0x2c, 0xbf, 0x20, 0x3d, 0xbd, 0x39, 0xe5, 0x1e, 0x8c, - 0x6a, 0x65, 0xef, 0xf8, 0x8a, 0xa6, 0x18, 0x8a, 0xf9, 0x29, 0x7d, 0x9f, 0x01, 0xa0, 0x97, 0x61, - 0x70, 0x23, 0x51, 0xe4, 0xd7, 0xfb, 0x8a, 0xfc, 0x6a, 0xfc, 0x72, 0x1a, 0x2b, 0xe8, 0x37, 0xc0, - 0x82, 0x27, 0x77, 0x9e, 0x4e, 0x86, 0xf2, 0xb8, 0x65, 0x47, 0x67, 0x69, 0x84, 0x56, 0x01, 0xa2, - 0x74, 0xea, 0xfd, 0xab, 0xd1, 0xe0, 0x15, 0x90, 0x6d, 0x79, 0x76, 0x78, 0xf8, 0x9d, 0x1f, 0x87, - 0x7a, 0xcd, 0xb3, 0x59, 0x02, 0x73, 0x51, 0xac, 0x5d, 0xcc, 0x22, 0x89, 0x03, 0x3f, 0x02, 0x8b, - 0xe1, 0x75, 0x43, 0xdf, 0x4d, 0x36, 0xc6, 0x61, 0x22, 0xad, 0x9f, 0xc0, 0x5d, 0x12, 0x15, 0x34, - 0x94, 0xa0, 0x08, 0x53, 0xe0, 0x13, 0x7d, 0x5b, 0x94, 0xb5, 0x7e, 0x02, 0xfc, 0x61, 0xd7, 0x7e, - 0x85, 0x1f, 0x4a, 0x50, 0x84, 0x59, 0xfa, 0x21, 0x03, 0x96, 0x12, 0xd7, 0xd0, 0xbf, 0x83, 0x2e, - 0x95, 0xd5, 0xb3, 0xa5, 0x4b, 0x61, 0xce, 0x9e, 0x2e, 0x85, 0xfb, 0xf4, 0xe8, 0x8a, 0xe1, 0x0f, - 0xa1, 0xeb, 0x61, 0x06, 0xc0, 0xc1, 0x4c, 0x87, 0x16, 0x58, 0x50, 0xad, 0xc6, 0x2c, 0x4e, 0xb8, - 0xe8, 0xd6, 0xa1, 0x0f, 0x33, 0x0d, 0xdd, 0xd7, 0xa0, 0xa4, 0x27, 0x6a, 0x50, 0xc8, 0x2c, 0x1a, - 0xb9, 0xe8, 0x08, 0x1c, 0xd9, 0xcc, 0xdd, 0x02, 0x8b, 0x2c, 0xec, 0x80, 0xb2, 0xd3, 0x77, 0x40, - 0x32, 0xea, 0x51, 0xef, 0x13, 0x41, 0x42, 0x1b, 0x2c, 0xe1, 0x78, 0x13, 0x32, 0x3f, 0xd5, 0x6f, - 0xac, 0x8a, 0x8e, 0x27, 0xd1, 0x7d, 0x24, 0x50, 0x4b, 0xbf, 0xf5, 0x73, 0xab, 0x36, 0xe4, 0x3f, - 0x96, 0xdb, 0x67, 0xd7, 0x0b, 0xfe, 0x27, 0xe8, 0xfd, 0x36, 0x0d, 0x56, 0xfb, 0x8f, 0x93, 0xa9, - 0x9a, 0xfe, 0xbb, 0x43, 0x5f, 0x2e, 0xd2, 0x53, 0x2d, 0x3a, 0xea, 0x50, 0x26, 0x7b, 0xbd, 0x48, - 0x30, 0x91, 0x99, 0x39, 0x13, 0xa5, 0xef, 0x92, 0x31, 0x9a, 0xfe, 0x61, 0xe4, 0xb3, 0xe1, 0xaf, - 0x07, 0xd3, 0x05, 0xe9, 0x94, 0x76, 0x36, 0xf1, 0x0b, 0xc2, 0xd3, 0x0e, 0xd3, 0x8f, 0x69, 0x70, - 0x6c, 0xd8, 0x2d, 0x02, 0x56, 0xf5, 0x5b, 0xa2, 0x0a, 0x92, 0x19, 0x7f, 0x4b, 0x7c, 0xdc, 0x2d, - 0x16, 0x87, 0xb4, 0xc0, 0x21, 0x4c, 0xec, 0xb9, 0xf1, 0x7d, 0x90, 0x4f, 0x30, 0x7f, 0x9d, 0xd3, - 0x26, 0xbd, 0xab, 0x2e, 0xf7, 0xaa, 0x8d, 0xf9, 0x7f, 0xd0, 0x2d, 0xe6, 0x77, 0x46, 0xe8, 0xa0, - 0x91, 0xd6, 0x23, 0xde, 0xdc, 0x32, 0x4f, 0xfd, 0xcd, 0xed, 0xe7, 0xc1, 0x78, 0xa9, 0xd4, 0x9a, - 0x49, 0xbc, 0x3e, 0x04, 0x27, 0x93, 0x39, 0x30, 0x18, 0xb0, 0xd3, 0x41, 0xb7, 0x78, 0xb2, 0x3a, - 0x4a, 0x09, 0x8d, 0xb6, 0x1f, 0x95, 0xc8, 0x99, 0x67, 0x93, 0xc8, 0x95, 0x73, 0xf7, 0x1e, 0x15, - 0xe6, 0xee, 0x3f, 0x2a, 0xcc, 0x3d, 0x78, 0x54, 0x98, 0xfb, 0x3c, 0x28, 0xa4, 0xee, 0x05, 0x85, - 0xd4, 0xfd, 0xa0, 0x90, 0x7a, 0x10, 0x14, 0x52, 0xbf, 0x07, 0x85, 0xd4, 0x97, 0x7f, 0x14, 0xe6, - 0x3e, 0x38, 0xa4, 0x8f, 0x9e, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x05, 0x26, 0x31, 0x5d, 0x9f, - 0x18, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index 04bc0ed60..f90f93f9f 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -32,7 +32,7 @@ option go_package = "v2beta1"; // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" optional string kind = 1; // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -92,12 +92,12 @@ message ExternalMetricStatus { // implementing the scale subresource based on the metrics specified. message HorizontalPodAutoscaler { // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; @@ -146,8 +146,11 @@ message HorizontalPodAutoscalerSpec { // should be collected, as well as to actually change the replica count. optional CrossVersionObjectReference scaleTargetRef = 1; - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional optional int32 minReplicas = 2; diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go index 6a30e6774..92fe5a2b1 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -17,14 +17,14 @@ limitations under the License. package v2beta1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` @@ -38,8 +38,11 @@ type HorizontalPodAutoscalerSpec struct { // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics // should be collected, as well as to actually change the replica count. ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. @@ -377,12 +380,12 @@ type ExternalMetricStatus struct { type HorizontalPodAutoscaler struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go index 589408ace..a0d5f5337 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v2beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", "apiVersion": "API version of the referent", } @@ -64,8 +64,8 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", - "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", "status": "status is the current information about the autoscaler.", } @@ -99,7 +99,7 @@ func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerSpec = map[string]string{ "": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", "scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", - "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond.", } diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go index 2ec7e6156..c51e05b8f 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go @@ -148,7 +148,7 @@ func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerC func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]HorizontalPodAutoscaler, len(*in)) diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go index 816fea9d5..23bc5b983 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go @@ -17,48 +17,24 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto -/* - Package v2beta2 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto - - It has these top-level messages: - CrossVersionObjectReference - ExternalMetricSource - ExternalMetricStatus - HorizontalPodAutoscaler - HorizontalPodAutoscalerCondition - HorizontalPodAutoscalerList - HorizontalPodAutoscalerSpec - HorizontalPodAutoscalerStatus - MetricIdentifier - MetricSpec - MetricStatus - MetricTarget - MetricValueStatus - ObjectMetricSource - ObjectMetricStatus - PodsMetricSource - PodsMetricStatus - ResourceMetricSource - ResourceMetricStatus -*/ package v2beta2 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import reflect "reflect" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -74,88 +50,534 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_592ad94d7d6be24f, []int{0} +} +func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_CrossVersionObjectReference.Merge(m, src) +} +func (m *CrossVersionObjectReference) XXX_Size() int { + return m.Size() +} +func (m *CrossVersionObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m) } -func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } -func (*ExternalMetricSource) ProtoMessage() {} -func (*ExternalMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo -func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } -func (*ExternalMetricStatus) ProtoMessage() {} -func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{1} +} +func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricSource.Merge(m, src) +} +func (m *ExternalMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo + +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{2} +} +func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricStatus.Merge(m, src) +} +func (m *ExternalMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m) +} -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{3} +} +func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src) +} +func (m *HorizontalPodAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptor_592ad94d7d6be24f, []int{4} +} +func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m) } +var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo + func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptor_592ad94d7d6be24f, []int{5} +} +func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src) +} +func (m *HorizontalPodAutoscalerList) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptor_592ad94d7d6be24f, []int{6} +} +func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_592ad94d7d6be24f, []int{7} +} +func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m) } -func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } -func (*MetricIdentifier) ProtoMessage() {} -func (*MetricIdentifier) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo -func (m *MetricSpec) Reset() { *m = MetricSpec{} } -func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } +func (*MetricIdentifier) ProtoMessage() {} +func (*MetricIdentifier) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{8} +} +func (m *MetricIdentifier) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricIdentifier) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricIdentifier.Merge(m, src) +} +func (m *MetricIdentifier) XXX_Size() int { + return m.Size() +} +func (m *MetricIdentifier) XXX_DiscardUnknown() { + xxx_messageInfo_MetricIdentifier.DiscardUnknown(m) +} -func (m *MetricStatus) Reset() { *m = MetricStatus{} } -func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_MetricIdentifier proto.InternalMessageInfo -func (m *MetricTarget) Reset() { *m = MetricTarget{} } -func (*MetricTarget) ProtoMessage() {} -func (*MetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{9} +} +func (m *MetricSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricSpec.Merge(m, src) +} +func (m *MetricSpec) XXX_Size() int { + return m.Size() +} +func (m *MetricSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MetricSpec.DiscardUnknown(m) +} -func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } -func (*MetricValueStatus) ProtoMessage() {} -func (*MetricValueStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +var xxx_messageInfo_MetricSpec proto.InternalMessageInfo -func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } -func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{10} +} +func (m *MetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricStatus.Merge(m, src) +} +func (m *MetricStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricStatus.DiscardUnknown(m) +} -func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } -func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +var xxx_messageInfo_MetricStatus proto.InternalMessageInfo -func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } -func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (m *MetricTarget) Reset() { *m = MetricTarget{} } +func (*MetricTarget) ProtoMessage() {} +func (*MetricTarget) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{11} +} +func (m *MetricTarget) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricTarget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricTarget) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricTarget.Merge(m, src) +} +func (m *MetricTarget) XXX_Size() int { + return m.Size() +} +func (m *MetricTarget) XXX_DiscardUnknown() { + xxx_messageInfo_MetricTarget.DiscardUnknown(m) +} -func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } -func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +var xxx_messageInfo_MetricTarget proto.InternalMessageInfo -func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } -func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } +func (*MetricValueStatus) ProtoMessage() {} +func (*MetricValueStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{12} +} +func (m *MetricValueStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricValueStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricValueStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricValueStatus.Merge(m, src) +} +func (m *MetricValueStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricValueStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricValueStatus.DiscardUnknown(m) +} -func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } -func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +var xxx_messageInfo_MetricValueStatus proto.InternalMessageInfo + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{13} +} +func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricSource.Merge(m, src) +} +func (m *ObjectMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{14} +} +func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricStatus.Merge(m, src) +} +func (m *ObjectMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{15} +} +func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricSource.Merge(m, src) +} +func (m *PodsMetricSource) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo + +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{16} +} +func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricStatus.Merge(m, src) +} +func (m *PodsMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo + +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{17} +} +func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricSource.Merge(m, src) +} +func (m *ResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo + +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{18} +} +func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricStatus.Merge(m, src) +} +func (m *ResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo func init() { proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta2.CrossVersionObjectReference") @@ -178,10 +600,109 @@ func init() { proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ResourceMetricSource") proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ResourceMetricStatus") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto", fileDescriptor_592ad94d7d6be24f) +} + +var fileDescriptor_592ad94d7d6be24f = []byte{ + // 1425 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xdd, 0x6f, 0x1b, 0xc5, + 0x16, 0xcf, 0xda, 0x8e, 0x93, 0x8e, 0xd3, 0x24, 0x9d, 0x5b, 0xb5, 0x56, 0xaa, 0x6b, 0x47, 0xab, + 0xab, 0xab, 0x52, 0xd1, 0x35, 0x31, 0xe1, 0x43, 0x42, 0x48, 0xc4, 0x01, 0xda, 0x8a, 0xa4, 0x2d, + 0x93, 0xb4, 0x42, 0xa8, 0x45, 0x8c, 0x77, 0x4f, 0xdc, 0x21, 0xde, 0x5d, 0x6b, 0x76, 0x6c, 0x35, + 0x45, 0x42, 0xbc, 0xf0, 0x8e, 0x40, 0xfc, 0x13, 0x88, 0x17, 0x5e, 0x90, 0x78, 0xe4, 0x43, 0xa8, + 0x42, 0x08, 0xf5, 0xb1, 0x08, 0xc9, 0xa2, 0xe6, 0xbf, 0xe8, 0x13, 0xda, 0x99, 0xd9, 0xf5, 0xae, + 0xed, 0xc4, 0x4e, 0x95, 0x14, 0xf5, 0xcd, 0x33, 0xe7, 0x9c, 0xdf, 0xf9, 0x9c, 0x73, 0xce, 0x1a, + 0x5d, 0xda, 0x7d, 0x35, 0xb0, 0x98, 0x5f, 0xd9, 0x6d, 0xd7, 0x81, 0x7b, 0x20, 0x20, 0xa8, 0x74, + 0xc0, 0x73, 0x7c, 0x5e, 0xd1, 0x04, 0xda, 0x62, 0x15, 0xda, 0x16, 0x7e, 0x60, 0xd3, 0x26, 0xf3, + 0x1a, 0x95, 0x4e, 0xb5, 0x0e, 0x82, 0x56, 0x2b, 0x0d, 0xf0, 0x80, 0x53, 0x01, 0x8e, 0xd5, 0xe2, + 0xbe, 0xf0, 0x71, 0x49, 0xf1, 0x5b, 0xb4, 0xc5, 0xac, 0x04, 0xbf, 0xa5, 0xf9, 0x97, 0x2e, 0x36, + 0x98, 0xb8, 0xd3, 0xae, 0x5b, 0xb6, 0xef, 0x56, 0x1a, 0x7e, 0xc3, 0xaf, 0x48, 0xb1, 0x7a, 0x7b, + 0x47, 0x9e, 0xe4, 0x41, 0xfe, 0x52, 0x70, 0x4b, 0x66, 0x42, 0xbd, 0xed, 0x73, 0xa8, 0x74, 0x56, + 0x06, 0x55, 0x2e, 0xad, 0xf6, 0x79, 0x5c, 0x6a, 0xdf, 0x61, 0x1e, 0xf0, 0xbd, 0x4a, 0x6b, 0xb7, + 0x21, 0x85, 0x38, 0x04, 0x7e, 0x9b, 0xdb, 0x70, 0x28, 0xa9, 0xa0, 0xe2, 0x82, 0xa0, 0xa3, 0x74, + 0x55, 0xf6, 0x93, 0xe2, 0x6d, 0x4f, 0x30, 0x77, 0x58, 0xcd, 0xcb, 0xe3, 0x04, 0x02, 0xfb, 0x0e, + 0xb8, 0x74, 0x50, 0xce, 0xfc, 0xca, 0x40, 0xe7, 0xd6, 0xb9, 0x1f, 0x04, 0x37, 0x81, 0x07, 0xcc, + 0xf7, 0xae, 0xd5, 0x3f, 0x02, 0x5b, 0x10, 0xd8, 0x01, 0x0e, 0x9e, 0x0d, 0x78, 0x19, 0xe5, 0x76, + 0x99, 0xe7, 0x14, 0x8d, 0x65, 0xe3, 0xfc, 0x89, 0xda, 0xdc, 0xfd, 0x6e, 0x79, 0xaa, 0xd7, 0x2d, + 0xe7, 0xde, 0x61, 0x9e, 0x43, 0x24, 0x25, 0xe4, 0xf0, 0xa8, 0x0b, 0xc5, 0x4c, 0x9a, 0xe3, 0x2a, + 0x75, 0x81, 0x48, 0x0a, 0xae, 0x22, 0x44, 0x5b, 0x4c, 0x2b, 0x28, 0x66, 0x25, 0x1f, 0xd6, 0x7c, + 0x68, 0xed, 0xfa, 0x15, 0x4d, 0x21, 0x09, 0x2e, 0xf3, 0x17, 0x03, 0x9d, 0x7e, 0xeb, 0xae, 0x00, + 0xee, 0xd1, 0xe6, 0x26, 0x08, 0xce, 0xec, 0x2d, 0x19, 0x5f, 0xfc, 0x1e, 0xca, 0xbb, 0xf2, 0x2c, + 0x4d, 0x2a, 0x54, 0x5f, 0xb0, 0x0e, 0xae, 0x04, 0x4b, 0x49, 0x5f, 0x71, 0xc0, 0x13, 0x6c, 0x87, + 0x01, 0xaf, 0xcd, 0x6b, 0xd5, 0x79, 0x45, 0x21, 0x1a, 0x0f, 0x6f, 0xa3, 0xbc, 0xa0, 0xbc, 0x01, + 0x42, 0xba, 0x52, 0xa8, 0x3e, 0x3f, 0x19, 0xf2, 0xb6, 0x94, 0xe9, 0xa3, 0xaa, 0x33, 0xd1, 0x58, + 0xe6, 0xef, 0xc3, 0x8e, 0x08, 0x2a, 0xda, 0xc1, 0x31, 0x3a, 0x72, 0x0b, 0xcd, 0xd8, 0x6d, 0xce, + 0xc1, 0x8b, 0x3c, 0x59, 0x99, 0x0c, 0xfa, 0x26, 0x6d, 0xb6, 0x41, 0x59, 0x57, 0x5b, 0xd0, 0xd8, + 0x33, 0xeb, 0x0a, 0x89, 0x44, 0x90, 0xe6, 0x0f, 0x19, 0x74, 0xf6, 0xb2, 0xcf, 0xd9, 0x3d, 0xdf, + 0x13, 0xb4, 0x79, 0xdd, 0x77, 0xd6, 0x34, 0x20, 0x70, 0xfc, 0x21, 0x9a, 0x0d, 0x2b, 0xda, 0xa1, + 0x82, 0x8e, 0xf0, 0x2a, 0x2e, 0x4c, 0xab, 0xb5, 0xdb, 0x08, 0x2f, 0x02, 0x2b, 0xe4, 0xb6, 0x3a, + 0x2b, 0x96, 0x2a, 0xbb, 0x4d, 0x10, 0xb4, 0x5f, 0x19, 0xfd, 0x3b, 0x12, 0xa3, 0xe2, 0xdb, 0x28, + 0x17, 0xb4, 0xc0, 0xd6, 0x8e, 0xbd, 0x36, 0xce, 0xb1, 0x7d, 0x0c, 0xdd, 0x6a, 0x81, 0xdd, 0x2f, + 0xd5, 0xf0, 0x44, 0x24, 0x2c, 0x06, 0x94, 0x0f, 0x64, 0x00, 0x64, 0x99, 0x16, 0xaa, 0xaf, 0x3f, + 0xa9, 0x02, 0x15, 0xc5, 0x38, 0x43, 0xea, 0x4c, 0x34, 0xb8, 0xf9, 0x59, 0x16, 0x2d, 0xef, 0x23, + 0xb9, 0xee, 0x7b, 0x0e, 0x13, 0xcc, 0xf7, 0xf0, 0x65, 0x94, 0x13, 0x7b, 0x2d, 0xd0, 0x4f, 0x6f, + 0x35, 0xb2, 0x76, 0x7b, 0xaf, 0x05, 0x8f, 0xbb, 0xe5, 0xff, 0x8d, 0x93, 0x0f, 0xf9, 0x88, 0x44, + 0xc0, 0x1b, 0xb1, 0x57, 0x99, 0x14, 0x96, 0x36, 0xeb, 0x71, 0xb7, 0x3c, 0xa2, 0xff, 0x59, 0x31, + 0x52, 0xda, 0x78, 0xdc, 0x41, 0xb8, 0x49, 0x03, 0xb1, 0xcd, 0xa9, 0x17, 0x28, 0x4d, 0xcc, 0x05, + 0x1d, 0xaf, 0x0b, 0x93, 0xa5, 0x3b, 0x94, 0xa8, 0x2d, 0x69, 0x2b, 0xf0, 0xc6, 0x10, 0x1a, 0x19, + 0xa1, 0x01, 0xff, 0x1f, 0xe5, 0x39, 0xd0, 0xc0, 0xf7, 0x8a, 0x39, 0xe9, 0x45, 0x1c, 0x5c, 0x22, + 0x6f, 0x89, 0xa6, 0xe2, 0xe7, 0xd0, 0x8c, 0x0b, 0x41, 0x40, 0x1b, 0x50, 0x9c, 0x96, 0x8c, 0x71, + 0x2d, 0x6f, 0xaa, 0x6b, 0x12, 0xd1, 0xcd, 0x3f, 0x0c, 0x74, 0x6e, 0x9f, 0x38, 0x6e, 0xb0, 0x40, + 0xe0, 0x5b, 0x43, 0xf5, 0x6c, 0x4d, 0xe6, 0x60, 0x28, 0x2d, 0xab, 0x79, 0x51, 0xeb, 0x9e, 0x8d, + 0x6e, 0x12, 0xb5, 0x7c, 0x0b, 0x4d, 0x33, 0x01, 0x6e, 0x98, 0x95, 0xec, 0xf9, 0x42, 0xf5, 0x95, + 0x27, 0xac, 0xb5, 0xda, 0x49, 0xad, 0x63, 0xfa, 0x4a, 0x88, 0x46, 0x14, 0xa8, 0xf9, 0x67, 0x66, + 0x5f, 0xdf, 0xc2, 0x82, 0xc7, 0x1f, 0xa3, 0x79, 0x79, 0xd2, 0xfd, 0x0a, 0x76, 0xb4, 0x87, 0x63, + 0xdf, 0xd4, 0x01, 0xe3, 0xa2, 0x76, 0x46, 0x9b, 0x32, 0xbf, 0x95, 0x82, 0x26, 0x03, 0xaa, 0xf0, + 0x0a, 0x2a, 0xb8, 0xcc, 0x23, 0xd0, 0x6a, 0x32, 0x9b, 0xaa, 0xb2, 0x9c, 0xae, 0x2d, 0xf4, 0xba, + 0xe5, 0xc2, 0x66, 0xff, 0x9a, 0x24, 0x79, 0xf0, 0x4b, 0xa8, 0xe0, 0xd2, 0xbb, 0xb1, 0x48, 0x56, + 0x8a, 0xfc, 0x47, 0xeb, 0x2b, 0x6c, 0xf6, 0x49, 0x24, 0xc9, 0x87, 0x6f, 0x84, 0xd5, 0x10, 0x76, + 0xb7, 0xa0, 0x98, 0x93, 0x61, 0xbe, 0x30, 0x59, 0x33, 0x94, 0x2d, 0x22, 0x51, 0x39, 0x12, 0x82, + 0x44, 0x58, 0xe6, 0x77, 0x39, 0xf4, 0xdf, 0x03, 0xdf, 0x3e, 0x7e, 0x1b, 0x61, 0xbf, 0x1e, 0x00, + 0xef, 0x80, 0x73, 0x49, 0x0d, 0xdd, 0x70, 0xfa, 0x85, 0x31, 0xce, 0xd6, 0xce, 0x84, 0x65, 0x7f, + 0x6d, 0x88, 0x4a, 0x46, 0x48, 0x60, 0x1b, 0x9d, 0x0c, 0x1f, 0x83, 0x0a, 0x28, 0xd3, 0x83, 0xf6, + 0x70, 0x2f, 0xed, 0x54, 0xaf, 0x5b, 0x3e, 0xb9, 0x91, 0x04, 0x21, 0x69, 0x4c, 0xbc, 0x86, 0x16, + 0x74, 0x7f, 0x1f, 0x08, 0xf0, 0x59, 0x1d, 0x81, 0x85, 0xf5, 0x34, 0x99, 0x0c, 0xf2, 0x87, 0x10, + 0x0e, 0x04, 0x8c, 0x83, 0x13, 0x43, 0xe4, 0xd2, 0x10, 0x6f, 0xa6, 0xc9, 0x64, 0x90, 0x1f, 0x37, + 0xd1, 0xbc, 0x46, 0xd5, 0xf1, 0x2e, 0x4e, 0xcb, 0x94, 0x4d, 0x38, 0x89, 0x75, 0xd3, 0x8d, 0x6b, + 0x70, 0x3d, 0x85, 0x45, 0x06, 0xb0, 0xb1, 0x40, 0xc8, 0x8e, 0x5a, 0x5c, 0x50, 0xcc, 0x4b, 0x4d, + 0x6f, 0x3c, 0xe1, 0x1b, 0x8c, 0x7b, 0x65, 0x7f, 0x7c, 0xc5, 0x57, 0x01, 0x49, 0xe8, 0x31, 0xbf, + 0x34, 0xd0, 0xe2, 0xe0, 0x24, 0x8f, 0x77, 0x28, 0x63, 0xdf, 0x1d, 0xea, 0x36, 0x9a, 0x0d, 0xa0, + 0x09, 0xb6, 0xf0, 0xb9, 0x2e, 0x80, 0x17, 0x27, 0xec, 0x44, 0xb4, 0x0e, 0xcd, 0x2d, 0x2d, 0x5a, + 0x9b, 0x0b, 0x5b, 0x51, 0x74, 0x22, 0x31, 0xa4, 0xf9, 0x75, 0x16, 0xa1, 0x7e, 0xdd, 0xe3, 0xd5, + 0xd4, 0xe8, 0x59, 0x1e, 0x18, 0x3d, 0x8b, 0xc9, 0x85, 0x2c, 0x31, 0x66, 0x6e, 0xa2, 0xbc, 0x2f, + 0xfb, 0x81, 0xb6, 0xb0, 0x3a, 0x2e, 0x98, 0xf1, 0x84, 0x8f, 0xd1, 0x6a, 0x28, 0x6c, 0xe8, 0xba, + 0xab, 0x68, 0x34, 0x7c, 0x15, 0xe5, 0x5a, 0xbe, 0x13, 0x8d, 0xe4, 0xb1, 0x7b, 0xd2, 0x75, 0xdf, + 0x09, 0x52, 0x98, 0xb3, 0xa1, 0xed, 0xe1, 0x2d, 0x91, 0x38, 0xf8, 0x03, 0x34, 0x1b, 0xad, 0xeb, + 0xb2, 0x44, 0x0b, 0xd5, 0xd5, 0x71, 0x98, 0x44, 0xf3, 0xa7, 0x70, 0x65, 0x30, 0x23, 0x0a, 0x89, + 0x31, 0x43, 0x7c, 0xd0, 0x1b, 0x9f, 0x9c, 0x40, 0x13, 0xe0, 0x8f, 0x5a, 0x75, 0x15, 0x7e, 0x44, + 0x21, 0x31, 0xa6, 0xf9, 0x4d, 0x16, 0xcd, 0xa5, 0x56, 0xc9, 0x7f, 0x23, 0x5d, 0xea, 0xad, 0x1d, + 0x6d, 0xba, 0x14, 0xe6, 0xd1, 0xa7, 0x4b, 0xe1, 0x1e, 0x5f, 0xba, 0x12, 0xf8, 0x23, 0xd2, 0xf5, + 0x53, 0x26, 0x4a, 0x97, 0x9a, 0x7f, 0x93, 0xa5, 0x4b, 0xf1, 0x26, 0xd2, 0x75, 0x0d, 0x4d, 0x77, + 0xc2, 0x05, 0x5d, 0x67, 0xeb, 0xc0, 0x45, 0xc4, 0x8a, 0x9c, 0xb3, 0xde, 0x6d, 0x53, 0x4f, 0x30, + 0xb1, 0x57, 0x3b, 0x11, 0x2e, 0x08, 0x72, 0xc3, 0x27, 0x0a, 0x07, 0x3b, 0x68, 0x8e, 0x76, 0x80, + 0xd3, 0x06, 0xc8, 0x6b, 0x9d, 0xaf, 0xc3, 0xe2, 0x2e, 0xf6, 0xba, 0xe5, 0xb9, 0xb5, 0x04, 0x0e, + 0x49, 0xa1, 0x86, 0x63, 0x50, 0x9f, 0x6f, 0x08, 0xd6, 0x64, 0xf7, 0xd4, 0x18, 0x54, 0x93, 0x41, + 0x8e, 0xc1, 0xb5, 0x21, 0x2a, 0x19, 0x21, 0x61, 0x7e, 0x91, 0x41, 0xa7, 0x86, 0x3e, 0x53, 0xfa, + 0x41, 0x31, 0x8e, 0x29, 0x28, 0x99, 0xa7, 0x18, 0x94, 0xec, 0xa1, 0x83, 0xf2, 0x73, 0x06, 0xe1, + 0xe1, 0x26, 0x8a, 0x3f, 0x91, 0xa3, 0xd8, 0xe6, 0xac, 0x0e, 0x8e, 0x22, 0x1f, 0xc5, 0x6e, 0x97, + 0x9c, 0xe3, 0x49, 0x6c, 0x32, 0xa8, 0xec, 0x78, 0xbe, 0xa4, 0x13, 0x1f, 0xcc, 0xd9, 0xa3, 0xfd, + 0x60, 0x36, 0x7f, 0x1b, 0x0c, 0xe3, 0x33, 0xfd, 0x85, 0x3e, 0x2a, 0xfd, 0xd9, 0xa7, 0x98, 0x7e, + 0xf3, 0x47, 0x03, 0x2d, 0x0e, 0x0e, 0xe1, 0x67, 0xee, 0x7f, 0x9b, 0x5f, 0xd3, 0x4e, 0x3c, 0xdb, + 0xff, 0xd9, 0x7c, 0x6b, 0xa0, 0xd3, 0xa3, 0x56, 0x18, 0xbc, 0x9e, 0x5a, 0x3c, 0x2b, 0xc9, 0xc5, + 0xf3, 0x71, 0xb7, 0x5c, 0x1e, 0xf1, 0xaf, 0x40, 0x04, 0x93, 0xd8, 0x4d, 0x8f, 0x27, 0x01, 0xdf, + 0x0f, 0xdb, 0xac, 0x92, 0x70, 0x24, 0x36, 0x1f, 0x6b, 0xbc, 0x6b, 0x17, 0xef, 0x3f, 0x2a, 0x4d, + 0x3d, 0x78, 0x54, 0x9a, 0x7a, 0xf8, 0xa8, 0x34, 0xf5, 0x69, 0xaf, 0x64, 0xdc, 0xef, 0x95, 0x8c, + 0x07, 0xbd, 0x92, 0xf1, 0xb0, 0x57, 0x32, 0xfe, 0xea, 0x95, 0x8c, 0xcf, 0xff, 0x2e, 0x4d, 0xbd, + 0x3f, 0xa3, 0xa1, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x7e, 0xa0, 0xce, 0xf5, 0x16, 0x17, 0x00, + 0x00, +} + func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -189,29 +710,37 @@ func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { } func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CrossVersionObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x1a - i++ + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -219,33 +748,42 @@ func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n1, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n2, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -253,33 +791,42 @@ func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n3, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) - n4, err := m.Current.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -287,41 +834,52 @@ func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n5, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n6, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n7, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -329,41 +887,52 @@ func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n8, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -371,37 +940,46 @@ func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -409,45 +987,54 @@ func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) - n10, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Metrics) > 0 { + for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } } - i += n10 + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) + i-- + dAtA[i] = 0x18 if m.MinReplicas != nil { - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) + i-- + dAtA[i] = 0x10 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) - if len(m.Metrics) > 0 { - for _, msg := range m.Metrics { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.ScaleTargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -455,62 +1042,73 @@ func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) - n11, err := m.LastScaleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } - i += n11 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) if len(m.CurrentMetrics) > 0 { - for _, msg := range m.CurrentMetrics { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.CurrentMetrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CurrentMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x2a } } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x18 + if m.LastScaleTime != nil { + { + size, err := m.LastScaleTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *MetricIdentifier) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -518,31 +1116,39 @@ func (m *MetricIdentifier) Marshal() (dAtA []byte, err error) { } func (m *MetricIdentifier) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricIdentifier) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n12, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n12 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -550,61 +1156,75 @@ func (m *MetricSpec) Marshal() (dAtA []byte, err error) { } func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n13, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n14, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n15, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 + i-- + dAtA[i] = 0x1a } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n16, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -612,61 +1232,75 @@ func (m *MetricStatus) Marshal() (dAtA []byte, err error) { } func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n17, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n18, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n18 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n19, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n19 + i-- + dAtA[i] = 0x1a } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n20, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n20 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricTarget) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -674,46 +1308,56 @@ func (m *MetricTarget) Marshal() (dAtA []byte, err error) { } func (m *MetricTarget) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricTarget) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Value != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value.Size())) - n21, err := m.Value.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 + if m.AverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + i-- + dAtA[i] = 0x20 } if m.AverageValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n22, err := m.AverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n22 + i-- + dAtA[i] = 0x1a } - if m.AverageUtilization != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricValueStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -721,42 +1365,51 @@ func (m *MetricValueStatus) Marshal() (dAtA []byte, err error) { } func (m *MetricValueStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricValueStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Value != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value.Size())) - n23, err := m.Value.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 + if m.AverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + i-- + dAtA[i] = 0x18 } if m.AverageValue != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n24, err := m.AverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n24 + i-- + dAtA[i] = 0x12 } - if m.AverageUtilization != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -764,41 +1417,52 @@ func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DescribedObject.Size())) - n25, err := m.DescribedObject.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n26, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n26 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n27, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.DescribedObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n27 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -806,41 +1470,52 @@ func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n28, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) - n29, err := m.Current.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.DescribedObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n29 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DescribedObject.Size())) - n30, err := m.DescribedObject.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n30 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -848,33 +1523,42 @@ func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n31, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n31 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n32, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n32 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -882,33 +1566,42 @@ func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n33, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n33 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) - n34, err := m.Current.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n34 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -916,29 +1609,37 @@ func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n35, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n35 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -946,35 +1647,48 @@ func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) - n36, err := m.Current.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n36 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CrossVersionObjectReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -987,6 +1701,9 @@ func (m *CrossVersionObjectReference) Size() (n int) { } func (m *ExternalMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Metric.Size() @@ -997,6 +1714,9 @@ func (m *ExternalMetricSource) Size() (n int) { } func (m *ExternalMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Metric.Size() @@ -1007,6 +1727,9 @@ func (m *ExternalMetricStatus) Size() (n int) { } func (m *HorizontalPodAutoscaler) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1019,6 +1742,9 @@ func (m *HorizontalPodAutoscaler) Size() (n int) { } func (m *HorizontalPodAutoscalerCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1035,6 +1761,9 @@ func (m *HorizontalPodAutoscalerCondition) Size() (n int) { } func (m *HorizontalPodAutoscalerList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1049,6 +1778,9 @@ func (m *HorizontalPodAutoscalerList) Size() (n int) { } func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ScaleTargetRef.Size() @@ -1067,6 +1799,9 @@ func (m *HorizontalPodAutoscalerSpec) Size() (n int) { } func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ObservedGeneration != nil { @@ -1094,6 +1829,9 @@ func (m *HorizontalPodAutoscalerStatus) Size() (n int) { } func (m *MetricIdentifier) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1106,6 +1844,9 @@ func (m *MetricIdentifier) Size() (n int) { } func (m *MetricSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1130,6 +1871,9 @@ func (m *MetricSpec) Size() (n int) { } func (m *MetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1154,6 +1898,9 @@ func (m *MetricStatus) Size() (n int) { } func (m *MetricTarget) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1173,6 +1920,9 @@ func (m *MetricTarget) Size() (n int) { } func (m *MetricValueStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Value != nil { @@ -1190,6 +1940,9 @@ func (m *MetricValueStatus) Size() (n int) { } func (m *ObjectMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.DescribedObject.Size() @@ -1202,6 +1955,9 @@ func (m *ObjectMetricSource) Size() (n int) { } func (m *ObjectMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Metric.Size() @@ -1214,6 +1970,9 @@ func (m *ObjectMetricStatus) Size() (n int) { } func (m *PodsMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Metric.Size() @@ -1224,6 +1983,9 @@ func (m *PodsMetricSource) Size() (n int) { } func (m *PodsMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Metric.Size() @@ -1234,6 +1996,9 @@ func (m *PodsMetricStatus) Size() (n int) { } func (m *ResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1244,6 +2009,9 @@ func (m *ResourceMetricSource) Size() (n int) { } func (m *ResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1254,14 +2022,7 @@ func (m *ResourceMetricStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1305,7 +2066,7 @@ func (this *HorizontalPodAutoscaler) String() string { return "nil" } s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1319,7 +2080,7 @@ func (this *HorizontalPodAutoscalerCondition) String() string { s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -1330,9 +2091,14 @@ func (this *HorizontalPodAutoscalerList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]HorizontalPodAutoscaler{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1341,11 +2107,16 @@ func (this *HorizontalPodAutoscalerSpec) String() string { if this == nil { return "nil" } + repeatedStringForMetrics := "[]MetricSpec{" + for _, f := range this.Metrics { + repeatedStringForMetrics += strings.Replace(strings.Replace(f.String(), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForMetrics += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, - `Metrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Metrics), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + `,`, + `Metrics:` + repeatedStringForMetrics + `,`, `}`, }, "") return s @@ -1354,13 +2125,23 @@ func (this *HorizontalPodAutoscalerStatus) String() string { if this == nil { return "nil" } + repeatedStringForCurrentMetrics := "[]MetricStatus{" + for _, f := range this.CurrentMetrics { + repeatedStringForCurrentMetrics += strings.Replace(strings.Replace(f.String(), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForCurrentMetrics += "}" + repeatedStringForConditions := "[]HorizontalPodAutoscalerCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "v1.Time", 1) + `,`, `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, - `CurrentMetrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentMetrics), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + `,`, + `CurrentMetrics:` + repeatedStringForCurrentMetrics + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -1371,7 +2152,7 @@ func (this *MetricIdentifier) String() string { } s := strings.Join([]string{`&MetricIdentifier{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1382,10 +2163,10 @@ func (this *MetricSpec) String() string { } s := strings.Join([]string{`&MetricSpec{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, `}`, }, "") return s @@ -1396,10 +2177,10 @@ func (this *MetricStatus) String() string { } s := strings.Join([]string{`&MetricStatus{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, `}`, }, "") return s @@ -1410,8 +2191,8 @@ func (this *MetricTarget) String() string { } s := strings.Join([]string{`&MetricTarget{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `AverageUtilization:` + valueToStringGenerated(this.AverageUtilization) + `,`, `}`, }, "") @@ -1422,8 +2203,8 @@ func (this *MetricValueStatus) String() string { return "nil" } s := strings.Join([]string{`&MetricValueStatus{`, - `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `AverageUtilization:` + valueToStringGenerated(this.AverageUtilization) + `,`, `}`, }, "") @@ -1520,7 +2301,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1548,7 +2329,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1558,6 +2339,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1577,7 +2361,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1587,6 +2371,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1606,7 +2393,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1616,6 +2403,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1627,7 +2417,10 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1657,7 +2450,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1685,7 +2478,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1694,6 +2487,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1715,7 +2511,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1724,6 +2520,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1740,6 +2539,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1767,7 +2569,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1795,7 +2597,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1804,6 +2606,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1825,7 +2630,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1834,6 +2639,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1850,6 +2658,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1877,7 +2688,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1905,7 +2716,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1914,6 +2725,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1935,7 +2749,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1944,6 +2758,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1965,7 +2782,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1974,6 +2791,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1990,6 +2810,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2017,7 +2840,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2045,7 +2868,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2055,6 +2878,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2074,7 +2900,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2084,6 +2910,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2103,7 +2932,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2112,6 +2941,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2133,7 +2965,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2143,6 +2975,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2162,7 +2997,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2172,6 +3007,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2186,6 +3024,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2213,7 +3054,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2241,7 +3082,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2250,6 +3091,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2271,7 +3115,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2280,6 +3124,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2297,6 +3144,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2324,7 +3174,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2352,7 +3202,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2361,6 +3211,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2382,7 +3235,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2402,7 +3255,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxReplicas |= (int32(b) & 0x7F) << shift + m.MaxReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2421,7 +3274,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2430,6 +3283,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2447,6 +3303,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2474,7 +3333,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2502,7 +3361,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2522,7 +3381,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2531,11 +3390,14 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.LastScaleTime = &v1.Time{} } if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2555,7 +3417,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2574,7 +3436,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredReplicas |= (int32(b) & 0x7F) << shift + m.DesiredReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2593,7 +3455,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2602,6 +3464,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2624,7 +3489,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2633,6 +3498,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2650,6 +3518,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2677,7 +3548,7 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2705,7 +3576,7 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2715,6 +3586,9 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2734,7 +3608,7 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2743,11 +3617,14 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2762,6 +3639,9 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2789,7 +3669,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2817,7 +3697,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2827,6 +3707,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2846,7 +3729,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2855,6 +3738,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2879,7 +3765,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2888,6 +3774,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2912,7 +3801,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2921,6 +3810,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2945,7 +3837,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2954,6 +3846,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2973,6 +3868,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3000,7 +3898,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3028,7 +3926,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3038,6 +3936,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3057,7 +3958,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3066,6 +3967,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3090,7 +3994,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3099,6 +4003,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3123,7 +4030,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3132,6 +4039,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3156,7 +4066,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3165,6 +4075,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3184,6 +4097,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3211,7 +4127,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3239,7 +4155,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3249,6 +4165,9 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3268,7 +4187,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3277,11 +4196,14 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Value == nil { - m.Value = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.Value = &resource.Quantity{} } if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3301,7 +4223,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3310,11 +4232,14 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3334,7 +4259,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3349,6 +4274,9 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3376,7 +4304,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3404,7 +4332,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3413,11 +4341,14 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Value == nil { - m.Value = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.Value = &resource.Quantity{} } if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3437,7 +4368,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3446,11 +4377,14 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3470,7 +4404,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3485,6 +4419,9 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3512,7 +4449,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3540,7 +4477,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3549,6 +4486,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3570,7 +4510,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3579,6 +4519,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3600,7 +4543,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3609,6 +4552,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3625,6 +4571,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3652,7 +4601,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3680,7 +4629,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3689,6 +4638,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3710,7 +4662,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3719,6 +4671,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3740,7 +4695,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3749,6 +4704,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3765,6 +4723,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3792,7 +4753,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3820,7 +4781,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3829,6 +4790,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3850,7 +4814,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3859,6 +4823,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3875,6 +4842,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3902,7 +4872,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3930,7 +4900,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3939,6 +4909,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3960,7 +4933,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3969,6 +4942,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3985,6 +4961,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4012,7 +4991,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4040,7 +5019,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4050,6 +5029,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4069,7 +5051,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4078,6 +5060,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4094,6 +5079,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4121,7 +5109,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4149,7 +5137,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4159,6 +5147,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4178,7 +5169,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4187,6 +5178,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4203,6 +5197,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4269,10 +5266,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -4301,6 +5301,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -4319,101 +5322,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1425 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xdd, 0x6f, 0x1b, 0xc5, - 0x16, 0xcf, 0xda, 0x8e, 0x93, 0x8e, 0xd3, 0x24, 0x9d, 0x5b, 0xb5, 0x56, 0xaa, 0x6b, 0x47, 0xab, - 0xab, 0xab, 0x52, 0xd1, 0x35, 0x31, 0xe1, 0x43, 0x42, 0x48, 0xc4, 0x01, 0xda, 0x8a, 0xa4, 0x2d, - 0x93, 0xb4, 0x42, 0xa8, 0x45, 0x8c, 0x77, 0x4f, 0xdc, 0x21, 0xde, 0x5d, 0x6b, 0x76, 0x6c, 0x35, - 0x45, 0x42, 0xbc, 0xf0, 0x8e, 0x40, 0xfc, 0x13, 0x88, 0x17, 0x5e, 0x90, 0x78, 0xe4, 0x43, 0xa8, - 0x42, 0x08, 0xf5, 0xb1, 0x08, 0xc9, 0xa2, 0xe6, 0xbf, 0xe8, 0x13, 0xda, 0x99, 0xd9, 0xf5, 0xae, - 0xed, 0xc4, 0x4e, 0x95, 0x14, 0xf5, 0xcd, 0x33, 0xe7, 0x9c, 0xdf, 0xf9, 0x9c, 0x73, 0xce, 0x1a, - 0x5d, 0xda, 0x7d, 0x35, 0xb0, 0x98, 0x5f, 0xd9, 0x6d, 0xd7, 0x81, 0x7b, 0x20, 0x20, 0xa8, 0x74, - 0xc0, 0x73, 0x7c, 0x5e, 0xd1, 0x04, 0xda, 0x62, 0x15, 0xda, 0x16, 0x7e, 0x60, 0xd3, 0x26, 0xf3, - 0x1a, 0x95, 0x4e, 0xb5, 0x0e, 0x82, 0x56, 0x2b, 0x0d, 0xf0, 0x80, 0x53, 0x01, 0x8e, 0xd5, 0xe2, - 0xbe, 0xf0, 0x71, 0x49, 0xf1, 0x5b, 0xb4, 0xc5, 0xac, 0x04, 0xbf, 0xa5, 0xf9, 0x97, 0x2e, 0x36, - 0x98, 0xb8, 0xd3, 0xae, 0x5b, 0xb6, 0xef, 0x56, 0x1a, 0x7e, 0xc3, 0xaf, 0x48, 0xb1, 0x7a, 0x7b, - 0x47, 0x9e, 0xe4, 0x41, 0xfe, 0x52, 0x70, 0x4b, 0x66, 0x42, 0xbd, 0xed, 0x73, 0xa8, 0x74, 0x56, - 0x06, 0x55, 0x2e, 0xad, 0xf6, 0x79, 0x5c, 0x6a, 0xdf, 0x61, 0x1e, 0xf0, 0xbd, 0x4a, 0x6b, 0xb7, - 0x21, 0x85, 0x38, 0x04, 0x7e, 0x9b, 0xdb, 0x70, 0x28, 0xa9, 0xa0, 0xe2, 0x82, 0xa0, 0xa3, 0x74, - 0x55, 0xf6, 0x93, 0xe2, 0x6d, 0x4f, 0x30, 0x77, 0x58, 0xcd, 0xcb, 0xe3, 0x04, 0x02, 0xfb, 0x0e, - 0xb8, 0x74, 0x50, 0xce, 0xfc, 0xca, 0x40, 0xe7, 0xd6, 0xb9, 0x1f, 0x04, 0x37, 0x81, 0x07, 0xcc, - 0xf7, 0xae, 0xd5, 0x3f, 0x02, 0x5b, 0x10, 0xd8, 0x01, 0x0e, 0x9e, 0x0d, 0x78, 0x19, 0xe5, 0x76, - 0x99, 0xe7, 0x14, 0x8d, 0x65, 0xe3, 0xfc, 0x89, 0xda, 0xdc, 0xfd, 0x6e, 0x79, 0xaa, 0xd7, 0x2d, - 0xe7, 0xde, 0x61, 0x9e, 0x43, 0x24, 0x25, 0xe4, 0xf0, 0xa8, 0x0b, 0xc5, 0x4c, 0x9a, 0xe3, 0x2a, - 0x75, 0x81, 0x48, 0x0a, 0xae, 0x22, 0x44, 0x5b, 0x4c, 0x2b, 0x28, 0x66, 0x25, 0x1f, 0xd6, 0x7c, - 0x68, 0xed, 0xfa, 0x15, 0x4d, 0x21, 0x09, 0x2e, 0xf3, 0x17, 0x03, 0x9d, 0x7e, 0xeb, 0xae, 0x00, - 0xee, 0xd1, 0xe6, 0x26, 0x08, 0xce, 0xec, 0x2d, 0x19, 0x5f, 0xfc, 0x1e, 0xca, 0xbb, 0xf2, 0x2c, - 0x4d, 0x2a, 0x54, 0x5f, 0xb0, 0x0e, 0xae, 0x04, 0x4b, 0x49, 0x5f, 0x71, 0xc0, 0x13, 0x6c, 0x87, - 0x01, 0xaf, 0xcd, 0x6b, 0xd5, 0x79, 0x45, 0x21, 0x1a, 0x0f, 0x6f, 0xa3, 0xbc, 0xa0, 0xbc, 0x01, - 0x42, 0xba, 0x52, 0xa8, 0x3e, 0x3f, 0x19, 0xf2, 0xb6, 0x94, 0xe9, 0xa3, 0xaa, 0x33, 0xd1, 0x58, - 0xe6, 0xef, 0xc3, 0x8e, 0x08, 0x2a, 0xda, 0xc1, 0x31, 0x3a, 0x72, 0x0b, 0xcd, 0xd8, 0x6d, 0xce, - 0xc1, 0x8b, 0x3c, 0x59, 0x99, 0x0c, 0xfa, 0x26, 0x6d, 0xb6, 0x41, 0x59, 0x57, 0x5b, 0xd0, 0xd8, - 0x33, 0xeb, 0x0a, 0x89, 0x44, 0x90, 0xe6, 0x0f, 0x19, 0x74, 0xf6, 0xb2, 0xcf, 0xd9, 0x3d, 0xdf, - 0x13, 0xb4, 0x79, 0xdd, 0x77, 0xd6, 0x34, 0x20, 0x70, 0xfc, 0x21, 0x9a, 0x0d, 0x2b, 0xda, 0xa1, - 0x82, 0x8e, 0xf0, 0x2a, 0x2e, 0x4c, 0xab, 0xb5, 0xdb, 0x08, 0x2f, 0x02, 0x2b, 0xe4, 0xb6, 0x3a, - 0x2b, 0x96, 0x2a, 0xbb, 0x4d, 0x10, 0xb4, 0x5f, 0x19, 0xfd, 0x3b, 0x12, 0xa3, 0xe2, 0xdb, 0x28, - 0x17, 0xb4, 0xc0, 0xd6, 0x8e, 0xbd, 0x36, 0xce, 0xb1, 0x7d, 0x0c, 0xdd, 0x6a, 0x81, 0xdd, 0x2f, - 0xd5, 0xf0, 0x44, 0x24, 0x2c, 0x06, 0x94, 0x0f, 0x64, 0x00, 0x64, 0x99, 0x16, 0xaa, 0xaf, 0x3f, - 0xa9, 0x02, 0x15, 0xc5, 0x38, 0x43, 0xea, 0x4c, 0x34, 0xb8, 0xf9, 0x59, 0x16, 0x2d, 0xef, 0x23, - 0xb9, 0xee, 0x7b, 0x0e, 0x13, 0xcc, 0xf7, 0xf0, 0x65, 0x94, 0x13, 0x7b, 0x2d, 0xd0, 0x4f, 0x6f, - 0x35, 0xb2, 0x76, 0x7b, 0xaf, 0x05, 0x8f, 0xbb, 0xe5, 0xff, 0x8d, 0x93, 0x0f, 0xf9, 0x88, 0x44, - 0xc0, 0x1b, 0xb1, 0x57, 0x99, 0x14, 0x96, 0x36, 0xeb, 0x71, 0xb7, 0x3c, 0xa2, 0xff, 0x59, 0x31, - 0x52, 0xda, 0x78, 0xdc, 0x41, 0xb8, 0x49, 0x03, 0xb1, 0xcd, 0xa9, 0x17, 0x28, 0x4d, 0xcc, 0x05, - 0x1d, 0xaf, 0x0b, 0x93, 0xa5, 0x3b, 0x94, 0xa8, 0x2d, 0x69, 0x2b, 0xf0, 0xc6, 0x10, 0x1a, 0x19, - 0xa1, 0x01, 0xff, 0x1f, 0xe5, 0x39, 0xd0, 0xc0, 0xf7, 0x8a, 0x39, 0xe9, 0x45, 0x1c, 0x5c, 0x22, - 0x6f, 0x89, 0xa6, 0xe2, 0xe7, 0xd0, 0x8c, 0x0b, 0x41, 0x40, 0x1b, 0x50, 0x9c, 0x96, 0x8c, 0x71, - 0x2d, 0x6f, 0xaa, 0x6b, 0x12, 0xd1, 0xcd, 0x3f, 0x0c, 0x74, 0x6e, 0x9f, 0x38, 0x6e, 0xb0, 0x40, - 0xe0, 0x5b, 0x43, 0xf5, 0x6c, 0x4d, 0xe6, 0x60, 0x28, 0x2d, 0xab, 0x79, 0x51, 0xeb, 0x9e, 0x8d, - 0x6e, 0x12, 0xb5, 0x7c, 0x0b, 0x4d, 0x33, 0x01, 0x6e, 0x98, 0x95, 0xec, 0xf9, 0x42, 0xf5, 0x95, - 0x27, 0xac, 0xb5, 0xda, 0x49, 0xad, 0x63, 0xfa, 0x4a, 0x88, 0x46, 0x14, 0xa8, 0xf9, 0x67, 0x66, - 0x5f, 0xdf, 0xc2, 0x82, 0xc7, 0x1f, 0xa3, 0x79, 0x79, 0xd2, 0xfd, 0x0a, 0x76, 0xb4, 0x87, 0x63, - 0xdf, 0xd4, 0x01, 0xe3, 0xa2, 0x76, 0x46, 0x9b, 0x32, 0xbf, 0x95, 0x82, 0x26, 0x03, 0xaa, 0xf0, - 0x0a, 0x2a, 0xb8, 0xcc, 0x23, 0xd0, 0x6a, 0x32, 0x9b, 0xaa, 0xb2, 0x9c, 0xae, 0x2d, 0xf4, 0xba, - 0xe5, 0xc2, 0x66, 0xff, 0x9a, 0x24, 0x79, 0xf0, 0x4b, 0xa8, 0xe0, 0xd2, 0xbb, 0xb1, 0x48, 0x56, - 0x8a, 0xfc, 0x47, 0xeb, 0x2b, 0x6c, 0xf6, 0x49, 0x24, 0xc9, 0x87, 0x6f, 0x84, 0xd5, 0x10, 0x76, - 0xb7, 0xa0, 0x98, 0x93, 0x61, 0xbe, 0x30, 0x59, 0x33, 0x94, 0x2d, 0x22, 0x51, 0x39, 0x12, 0x82, - 0x44, 0x58, 0xe6, 0x77, 0x39, 0xf4, 0xdf, 0x03, 0xdf, 0x3e, 0x7e, 0x1b, 0x61, 0xbf, 0x1e, 0x00, - 0xef, 0x80, 0x73, 0x49, 0x0d, 0xdd, 0x70, 0xfa, 0x85, 0x31, 0xce, 0xd6, 0xce, 0x84, 0x65, 0x7f, - 0x6d, 0x88, 0x4a, 0x46, 0x48, 0x60, 0x1b, 0x9d, 0x0c, 0x1f, 0x83, 0x0a, 0x28, 0xd3, 0x83, 0xf6, - 0x70, 0x2f, 0xed, 0x54, 0xaf, 0x5b, 0x3e, 0xb9, 0x91, 0x04, 0x21, 0x69, 0x4c, 0xbc, 0x86, 0x16, - 0x74, 0x7f, 0x1f, 0x08, 0xf0, 0x59, 0x1d, 0x81, 0x85, 0xf5, 0x34, 0x99, 0x0c, 0xf2, 0x87, 0x10, - 0x0e, 0x04, 0x8c, 0x83, 0x13, 0x43, 0xe4, 0xd2, 0x10, 0x6f, 0xa6, 0xc9, 0x64, 0x90, 0x1f, 0x37, - 0xd1, 0xbc, 0x46, 0xd5, 0xf1, 0x2e, 0x4e, 0xcb, 0x94, 0x4d, 0x38, 0x89, 0x75, 0xd3, 0x8d, 0x6b, - 0x70, 0x3d, 0x85, 0x45, 0x06, 0xb0, 0xb1, 0x40, 0xc8, 0x8e, 0x5a, 0x5c, 0x50, 0xcc, 0x4b, 0x4d, - 0x6f, 0x3c, 0xe1, 0x1b, 0x8c, 0x7b, 0x65, 0x7f, 0x7c, 0xc5, 0x57, 0x01, 0x49, 0xe8, 0x31, 0xbf, - 0x34, 0xd0, 0xe2, 0xe0, 0x24, 0x8f, 0x77, 0x28, 0x63, 0xdf, 0x1d, 0xea, 0x36, 0x9a, 0x0d, 0xa0, - 0x09, 0xb6, 0xf0, 0xb9, 0x2e, 0x80, 0x17, 0x27, 0xec, 0x44, 0xb4, 0x0e, 0xcd, 0x2d, 0x2d, 0x5a, - 0x9b, 0x0b, 0x5b, 0x51, 0x74, 0x22, 0x31, 0xa4, 0xf9, 0x75, 0x16, 0xa1, 0x7e, 0xdd, 0xe3, 0xd5, - 0xd4, 0xe8, 0x59, 0x1e, 0x18, 0x3d, 0x8b, 0xc9, 0x85, 0x2c, 0x31, 0x66, 0x6e, 0xa2, 0xbc, 0x2f, - 0xfb, 0x81, 0xb6, 0xb0, 0x3a, 0x2e, 0x98, 0xf1, 0x84, 0x8f, 0xd1, 0x6a, 0x28, 0x6c, 0xe8, 0xba, - 0xab, 0x68, 0x34, 0x7c, 0x15, 0xe5, 0x5a, 0xbe, 0x13, 0x8d, 0xe4, 0xb1, 0x7b, 0xd2, 0x75, 0xdf, - 0x09, 0x52, 0x98, 0xb3, 0xa1, 0xed, 0xe1, 0x2d, 0x91, 0x38, 0xf8, 0x03, 0x34, 0x1b, 0xad, 0xeb, - 0xb2, 0x44, 0x0b, 0xd5, 0xd5, 0x71, 0x98, 0x44, 0xf3, 0xa7, 0x70, 0x65, 0x30, 0x23, 0x0a, 0x89, - 0x31, 0x43, 0x7c, 0xd0, 0x1b, 0x9f, 0x9c, 0x40, 0x13, 0xe0, 0x8f, 0x5a, 0x75, 0x15, 0x7e, 0x44, - 0x21, 0x31, 0xa6, 0xf9, 0x4d, 0x16, 0xcd, 0xa5, 0x56, 0xc9, 0x7f, 0x23, 0x5d, 0xea, 0xad, 0x1d, - 0x6d, 0xba, 0x14, 0xe6, 0xd1, 0xa7, 0x4b, 0xe1, 0x1e, 0x5f, 0xba, 0x12, 0xf8, 0x23, 0xd2, 0xf5, - 0x53, 0x26, 0x4a, 0x97, 0x9a, 0x7f, 0x93, 0xa5, 0x4b, 0xf1, 0x26, 0xd2, 0x75, 0x0d, 0x4d, 0x77, - 0xc2, 0x05, 0x5d, 0x67, 0xeb, 0xc0, 0x45, 0xc4, 0x8a, 0x9c, 0xb3, 0xde, 0x6d, 0x53, 0x4f, 0x30, - 0xb1, 0x57, 0x3b, 0x11, 0x2e, 0x08, 0x72, 0xc3, 0x27, 0x0a, 0x07, 0x3b, 0x68, 0x8e, 0x76, 0x80, - 0xd3, 0x06, 0xc8, 0x6b, 0x9d, 0xaf, 0xc3, 0xe2, 0x2e, 0xf6, 0xba, 0xe5, 0xb9, 0xb5, 0x04, 0x0e, - 0x49, 0xa1, 0x86, 0x63, 0x50, 0x9f, 0x6f, 0x08, 0xd6, 0x64, 0xf7, 0xd4, 0x18, 0x54, 0x93, 0x41, - 0x8e, 0xc1, 0xb5, 0x21, 0x2a, 0x19, 0x21, 0x61, 0x7e, 0x91, 0x41, 0xa7, 0x86, 0x3e, 0x53, 0xfa, - 0x41, 0x31, 0x8e, 0x29, 0x28, 0x99, 0xa7, 0x18, 0x94, 0xec, 0xa1, 0x83, 0xf2, 0x73, 0x06, 0xe1, - 0xe1, 0x26, 0x8a, 0x3f, 0x91, 0xa3, 0xd8, 0xe6, 0xac, 0x0e, 0x8e, 0x22, 0x1f, 0xc5, 0x6e, 0x97, - 0x9c, 0xe3, 0x49, 0x6c, 0x32, 0xa8, 0xec, 0x78, 0xbe, 0xa4, 0x13, 0x1f, 0xcc, 0xd9, 0xa3, 0xfd, - 0x60, 0x36, 0x7f, 0x1b, 0x0c, 0xe3, 0x33, 0xfd, 0x85, 0x3e, 0x2a, 0xfd, 0xd9, 0xa7, 0x98, 0x7e, - 0xf3, 0x47, 0x03, 0x2d, 0x0e, 0x0e, 0xe1, 0x67, 0xee, 0x7f, 0x9b, 0x5f, 0xd3, 0x4e, 0x3c, 0xdb, - 0xff, 0xd9, 0x7c, 0x6b, 0xa0, 0xd3, 0xa3, 0x56, 0x18, 0xbc, 0x9e, 0x5a, 0x3c, 0x2b, 0xc9, 0xc5, - 0xf3, 0x71, 0xb7, 0x5c, 0x1e, 0xf1, 0xaf, 0x40, 0x04, 0x93, 0xd8, 0x4d, 0x8f, 0x27, 0x01, 0xdf, - 0x0f, 0xdb, 0xac, 0x92, 0x70, 0x24, 0x36, 0x1f, 0x6b, 0xbc, 0x6b, 0x17, 0xef, 0x3f, 0x2a, 0x4d, - 0x3d, 0x78, 0x54, 0x9a, 0x7a, 0xf8, 0xa8, 0x34, 0xf5, 0x69, 0xaf, 0x64, 0xdc, 0xef, 0x95, 0x8c, - 0x07, 0xbd, 0x92, 0xf1, 0xb0, 0x57, 0x32, 0xfe, 0xea, 0x95, 0x8c, 0xcf, 0xff, 0x2e, 0x4d, 0xbd, - 0x3f, 0xa3, 0xa1, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x7e, 0xa0, 0xce, 0xf5, 0x16, 0x17, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto index b4e4c95a3..80f1d345d 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -32,7 +32,7 @@ option go_package = "v2beta2"; // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" optional string kind = 1; // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -69,12 +69,12 @@ message ExternalMetricStatus { // implementing the scale subresource based on the metrics specified. message HorizontalPodAutoscaler { // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; @@ -123,8 +123,11 @@ message HorizontalPodAutoscalerSpec { // should be collected, as well as to actually change the replica count. optional CrossVersionObjectReference scaleTargetRef = 1; - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional optional int32 minReplicas = 2; diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go index 2d3379537..314d70950 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -19,7 +19,7 @@ limitations under the License. package v2beta2 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,12 +33,12 @@ import ( type HorizontalPodAutoscaler struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -52,8 +52,11 @@ type HorizontalPodAutoscalerSpec struct { // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics // should be collected, as well as to actually change the replica count. ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. @@ -73,7 +76,7 @@ type HorizontalPodAutoscalerSpec struct { // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go index 996dc1840..bb85b9f0f 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v2beta2 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", "apiVersion": "API version of the referent", } @@ -60,8 +60,8 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", - "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", "status": "status is the current information about the autoscaler.", } @@ -95,7 +95,7 @@ func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerSpec = map[string]string{ "": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", "scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", - "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization.", } diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go index a6a95653a..2dffa3336 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go @@ -126,7 +126,7 @@ func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerC func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]HorizontalPodAutoscaler, len(*in)) diff --git a/vendor/k8s.io/api/batch/v1/generated.pb.go b/vendor/k8s.io/api/batch/v1/generated.pb.go index 3aa32b578..fb9d21e17 100644 --- a/vendor/k8s.io/api/batch/v1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1/generated.pb.go @@ -17,32 +17,22 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto - - It has these top-level messages: - Job - JobCondition - JobList - JobSpec - JobStatus -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -55,25 +45,145 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Job) Reset() { *m = Job{} } -func (*Job) ProtoMessage() {} -func (*Job) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Job) Reset() { *m = Job{} } +func (*Job) ProtoMessage() {} +func (*Job) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{0} +} +func (m *Job) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Job) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Job) XXX_Merge(src proto.Message) { + xxx_messageInfo_Job.Merge(m, src) +} +func (m *Job) XXX_Size() int { + return m.Size() +} +func (m *Job) XXX_DiscardUnknown() { + xxx_messageInfo_Job.DiscardUnknown(m) +} + +var xxx_messageInfo_Job proto.InternalMessageInfo -func (m *JobCondition) Reset() { *m = JobCondition{} } -func (*JobCondition) ProtoMessage() {} -func (*JobCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *JobCondition) Reset() { *m = JobCondition{} } +func (*JobCondition) ProtoMessage() {} +func (*JobCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{1} +} +func (m *JobCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobCondition.Merge(m, src) +} +func (m *JobCondition) XXX_Size() int { + return m.Size() +} +func (m *JobCondition) XXX_DiscardUnknown() { + xxx_messageInfo_JobCondition.DiscardUnknown(m) +} -func (m *JobList) Reset() { *m = JobList{} } -func (*JobList) ProtoMessage() {} -func (*JobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_JobCondition proto.InternalMessageInfo -func (m *JobSpec) Reset() { *m = JobSpec{} } -func (*JobSpec) ProtoMessage() {} -func (*JobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *JobList) Reset() { *m = JobList{} } +func (*JobList) ProtoMessage() {} +func (*JobList) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{2} +} +func (m *JobList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobList) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobList.Merge(m, src) +} +func (m *JobList) XXX_Size() int { + return m.Size() +} +func (m *JobList) XXX_DiscardUnknown() { + xxx_messageInfo_JobList.DiscardUnknown(m) +} -func (m *JobStatus) Reset() { *m = JobStatus{} } -func (*JobStatus) ProtoMessage() {} -func (*JobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_JobList proto.InternalMessageInfo + +func (m *JobSpec) Reset() { *m = JobSpec{} } +func (*JobSpec) ProtoMessage() {} +func (*JobSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{3} +} +func (m *JobSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobSpec.Merge(m, src) +} +func (m *JobSpec) XXX_Size() int { + return m.Size() +} +func (m *JobSpec) XXX_DiscardUnknown() { + xxx_messageInfo_JobSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_JobSpec proto.InternalMessageInfo + +func (m *JobStatus) Reset() { *m = JobStatus{} } +func (*JobStatus) ProtoMessage() {} +func (*JobStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{4} +} +func (m *JobStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobStatus.Merge(m, src) +} +func (m *JobStatus) XXX_Size() int { + return m.Size() +} +func (m *JobStatus) XXX_DiscardUnknown() { + xxx_messageInfo_JobStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_JobStatus proto.InternalMessageInfo func init() { proto.RegisterType((*Job)(nil), "k8s.io.api.batch.v1.Job") @@ -82,10 +192,78 @@ func init() { proto.RegisterType((*JobSpec)(nil), "k8s.io.api.batch.v1.JobSpec") proto.RegisterType((*JobStatus)(nil), "k8s.io.api.batch.v1.JobStatus") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto", fileDescriptor_3b52da57c93de713) +} + +var fileDescriptor_3b52da57c93de713 = []byte{ + // 929 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x5d, 0x6f, 0xe3, 0x44, + 0x14, 0xad, 0x9b, 0xa6, 0x4d, 0xa6, 0x1f, 0x5b, 0x06, 0x55, 0x1b, 0x0a, 0xb2, 0x97, 0x20, 0xa1, + 0x82, 0x84, 0x4d, 0x4b, 0x85, 0x10, 0x02, 0xa4, 0x75, 0x51, 0x25, 0xaa, 0x54, 0x5b, 0x26, 0x59, + 0x21, 0x21, 0x90, 0x18, 0xdb, 0x37, 0x89, 0x89, 0xed, 0xb1, 0x3c, 0x93, 0x48, 0x7d, 0xe3, 0x27, + 0xf0, 0x23, 0x10, 0x7f, 0x82, 0x77, 0xd4, 0xc7, 0x7d, 0xdc, 0x27, 0x8b, 0x9a, 0x1f, 0xc0, 0xfb, + 0x3e, 0xa1, 0x19, 0x3b, 0xb6, 0xd3, 0x26, 0xa2, 0xcb, 0x5b, 0xe6, 0xcc, 0x39, 0xe7, 0x5e, 0xcf, + 0x3d, 0xb9, 0xe8, 0x8b, 0xc9, 0x67, 0xdc, 0xf4, 0x99, 0x35, 0x99, 0x3a, 0x90, 0x44, 0x20, 0x80, + 0x5b, 0x33, 0x88, 0x3c, 0x96, 0x58, 0xc5, 0x05, 0x8d, 0x7d, 0xcb, 0xa1, 0xc2, 0x1d, 0x5b, 0xb3, + 0x63, 0x6b, 0x04, 0x11, 0x24, 0x54, 0x80, 0x67, 0xc6, 0x09, 0x13, 0x0c, 0xbf, 0x99, 0x93, 0x4c, + 0x1a, 0xfb, 0xa6, 0x22, 0x99, 0xb3, 0xe3, 0xc3, 0x8f, 0x46, 0xbe, 0x18, 0x4f, 0x1d, 0xd3, 0x65, + 0xa1, 0x35, 0x62, 0x23, 0x66, 0x29, 0xae, 0x33, 0x1d, 0xaa, 0x93, 0x3a, 0xa8, 0x5f, 0xb9, 0xc7, + 0x61, 0xb7, 0x56, 0xc8, 0x65, 0x09, 0x2c, 0xa9, 0x73, 0x78, 0x5a, 0x71, 0x42, 0xea, 0x8e, 0xfd, + 0x08, 0x92, 0x6b, 0x2b, 0x9e, 0x8c, 0x24, 0xc0, 0xad, 0x10, 0x04, 0x5d, 0xa6, 0xb2, 0x56, 0xa9, + 0x92, 0x69, 0x24, 0xfc, 0x10, 0xee, 0x09, 0x3e, 0xfd, 0x2f, 0x01, 0x77, 0xc7, 0x10, 0xd2, 0xbb, + 0xba, 0xee, 0x3f, 0x1a, 0x6a, 0x5c, 0x30, 0x07, 0xff, 0x84, 0x5a, 0xb2, 0x17, 0x8f, 0x0a, 0xda, + 0xd1, 0x9e, 0x68, 0x47, 0xdb, 0x27, 0x1f, 0x9b, 0xd5, 0x0b, 0x95, 0x96, 0x66, 0x3c, 0x19, 0x49, + 0x80, 0x9b, 0x92, 0x6d, 0xce, 0x8e, 0xcd, 0x67, 0xce, 0xcf, 0xe0, 0x8a, 0x4b, 0x10, 0xd4, 0xc6, + 0x37, 0xa9, 0xb1, 0x96, 0xa5, 0x06, 0xaa, 0x30, 0x52, 0xba, 0xe2, 0xaf, 0xd0, 0x06, 0x8f, 0xc1, + 0xed, 0xac, 0x2b, 0xf7, 0x77, 0xcc, 0x25, 0xef, 0x6f, 0x5e, 0x30, 0xa7, 0x1f, 0x83, 0x6b, 0xef, + 0x14, 0x4e, 0x1b, 0xf2, 0x44, 0x94, 0x0e, 0x9f, 0xa3, 0x4d, 0x2e, 0xa8, 0x98, 0xf2, 0x4e, 0x43, + 0x39, 0xe8, 0x2b, 0x1d, 0x14, 0xcb, 0xde, 0x2b, 0x3c, 0x36, 0xf3, 0x33, 0x29, 0xd4, 0xdd, 0x3f, + 0x1b, 0x68, 0xe7, 0x82, 0x39, 0x67, 0x2c, 0xf2, 0x7c, 0xe1, 0xb3, 0x08, 0x9f, 0xa2, 0x0d, 0x71, + 0x1d, 0x83, 0xfa, 0xec, 0xb6, 0xfd, 0x64, 0x5e, 0x7a, 0x70, 0x1d, 0xc3, 0xab, 0xd4, 0xd8, 0xaf, + 0x73, 0x25, 0x46, 0x14, 0x1b, 0xf7, 0xca, 0x76, 0xd6, 0x95, 0xee, 0x74, 0xb1, 0xdc, 0xab, 0xd4, + 0x58, 0x92, 0x0e, 0xb3, 0x74, 0x5a, 0x6c, 0x0a, 0x8f, 0xd0, 0x6e, 0x40, 0xb9, 0xb8, 0x4a, 0x98, + 0x03, 0x03, 0x3f, 0x84, 0xe2, 0x1b, 0x3f, 0x7c, 0xd8, 0x0c, 0xa4, 0xc2, 0x3e, 0x28, 0x1a, 0xd8, + 0xed, 0xd5, 0x8d, 0xc8, 0xa2, 0x2f, 0x9e, 0x21, 0x2c, 0x81, 0x41, 0x42, 0x23, 0x9e, 0x7f, 0x92, + 0xac, 0xb6, 0xf1, 0xda, 0xd5, 0x0e, 0x8b, 0x6a, 0xb8, 0x77, 0xcf, 0x8d, 0x2c, 0xa9, 0x80, 0xdf, + 0x47, 0x9b, 0x09, 0x50, 0xce, 0xa2, 0x4e, 0x53, 0x3d, 0x57, 0x39, 0x1d, 0xa2, 0x50, 0x52, 0xdc, + 0xe2, 0x0f, 0xd0, 0x56, 0x08, 0x9c, 0xd3, 0x11, 0x74, 0x36, 0x15, 0xf1, 0x51, 0x41, 0xdc, 0xba, + 0xcc, 0x61, 0x32, 0xbf, 0xef, 0xfe, 0xae, 0xa1, 0xad, 0x0b, 0xe6, 0xf4, 0x7c, 0x2e, 0xf0, 0x0f, + 0xf7, 0xe2, 0x6b, 0x3e, 0xec, 0x63, 0xa4, 0x5a, 0x85, 0x77, 0xbf, 0xa8, 0xd3, 0x9a, 0x23, 0xb5, + 0xe8, 0x7e, 0x89, 0x9a, 0xbe, 0x80, 0x50, 0x8e, 0xba, 0x71, 0xb4, 0x7d, 0xd2, 0x59, 0x95, 0x3c, + 0x7b, 0xb7, 0x30, 0x69, 0x7e, 0x23, 0xe9, 0x24, 0x57, 0x75, 0xff, 0xd8, 0x50, 0x8d, 0xca, 0x2c, + 0xe3, 0x63, 0xb4, 0x1d, 0xd3, 0x84, 0x06, 0x01, 0x04, 0x3e, 0x0f, 0x55, 0xaf, 0x4d, 0xfb, 0x51, + 0x96, 0x1a, 0xdb, 0x57, 0x15, 0x4c, 0xea, 0x1c, 0x29, 0x71, 0x59, 0x18, 0x07, 0x20, 0x1f, 0x33, + 0x8f, 0x5b, 0x21, 0x39, 0xab, 0x60, 0x52, 0xe7, 0xe0, 0x67, 0xe8, 0x80, 0xba, 0xc2, 0x9f, 0xc1, + 0xd7, 0x40, 0xbd, 0xc0, 0x8f, 0xa0, 0x0f, 0x2e, 0x8b, 0xbc, 0xfc, 0xaf, 0xd3, 0xb0, 0xdf, 0xca, + 0x52, 0xe3, 0xe0, 0xe9, 0x32, 0x02, 0x59, 0xae, 0xc3, 0xa7, 0x68, 0xc7, 0xa1, 0xee, 0x84, 0x0d, + 0x87, 0x3d, 0x3f, 0xf4, 0x45, 0x67, 0x4b, 0x35, 0xb1, 0x9f, 0xa5, 0xc6, 0x8e, 0x5d, 0xc3, 0xc9, + 0x02, 0x0b, 0xff, 0x88, 0x5a, 0x1c, 0x02, 0x70, 0x05, 0x4b, 0x8a, 0x88, 0x7d, 0xf2, 0xc0, 0xa9, + 0x50, 0x07, 0x82, 0x7e, 0x21, 0xb5, 0x77, 0xe4, 0x58, 0xe6, 0x27, 0x52, 0x5a, 0xe2, 0xcf, 0xd1, + 0x5e, 0x48, 0xa3, 0x29, 0x2d, 0x99, 0x2a, 0x5b, 0x2d, 0x1b, 0x67, 0xa9, 0xb1, 0x77, 0xb9, 0x70, + 0x43, 0xee, 0x30, 0xf1, 0xb7, 0xa8, 0x25, 0x20, 0x8c, 0x03, 0x2a, 0xf2, 0xa0, 0x6d, 0x9f, 0xbc, + 0x57, 0x9f, 0xaa, 0xfc, 0xbf, 0xca, 0x46, 0xae, 0x98, 0x37, 0x28, 0x68, 0x6a, 0x31, 0x95, 0x29, + 0x99, 0xa3, 0xa4, 0xb4, 0xc1, 0xcf, 0xd1, 0x63, 0x21, 0x82, 0xe2, 0xc5, 0x9e, 0x0e, 0x05, 0x24, + 0xe7, 0x7e, 0xe4, 0xf3, 0x31, 0x78, 0x9d, 0x96, 0x7a, 0xae, 0xb7, 0xb3, 0xd4, 0x78, 0x3c, 0x18, + 0xf4, 0x96, 0x51, 0xc8, 0x2a, 0x6d, 0xf7, 0xb7, 0x06, 0x6a, 0x97, 0x5b, 0x0d, 0x3f, 0x47, 0xc8, + 0x9d, 0xef, 0x10, 0xde, 0xd1, 0x54, 0x1e, 0xdf, 0x5d, 0x95, 0xc7, 0x72, 0xdb, 0x54, 0xab, 0xb9, + 0x84, 0x38, 0xa9, 0x19, 0xe1, 0xef, 0x50, 0x9b, 0x0b, 0x9a, 0x08, 0xb5, 0x0d, 0xd6, 0x5f, 0x7b, + 0x1b, 0xec, 0x66, 0xa9, 0xd1, 0xee, 0xcf, 0x0d, 0x48, 0xe5, 0x85, 0x87, 0x68, 0xaf, 0x0a, 0xe6, + 0xff, 0xdc, 0x6c, 0x6a, 0x9e, 0x67, 0x0b, 0x2e, 0xe4, 0x8e, 0xab, 0xdc, 0x2f, 0x79, 0x72, 0x55, + 0xd0, 0x9a, 0xd5, 0x7e, 0xc9, 0x63, 0x4e, 0x8a, 0x5b, 0x6c, 0xa1, 0x36, 0x9f, 0xba, 0x2e, 0x80, + 0x07, 0x9e, 0x8a, 0x4b, 0xd3, 0x7e, 0xa3, 0xa0, 0xb6, 0xfb, 0xf3, 0x0b, 0x52, 0x71, 0xa4, 0xf1, + 0x90, 0xfa, 0x01, 0x78, 0x2a, 0x26, 0x35, 0xe3, 0x73, 0x85, 0x92, 0xe2, 0xd6, 0x3e, 0xba, 0xb9, + 0xd5, 0xd7, 0x5e, 0xdc, 0xea, 0x6b, 0x2f, 0x6f, 0xf5, 0xb5, 0x5f, 0x32, 0x5d, 0xbb, 0xc9, 0x74, + 0xed, 0x45, 0xa6, 0x6b, 0x2f, 0x33, 0x5d, 0xfb, 0x2b, 0xd3, 0xb5, 0x5f, 0xff, 0xd6, 0xd7, 0xbe, + 0x5f, 0x9f, 0x1d, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x73, 0xe7, 0x7a, 0xb8, 0x08, 0x00, + 0x00, +} + func (m *Job) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -93,41 +271,52 @@ func (m *Job) Marshal() (dAtA []byte, err error) { } func (m *Job) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Job) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JobCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -135,49 +324,62 @@ func (m *JobCondition) Marshal() (dAtA []byte, err error) { } func (m *JobCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n4, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n5, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JobList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -185,37 +387,46 @@ func (m *JobList) Marshal() (dAtA []byte, err error) { } func (m *JobList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JobSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -223,70 +434,79 @@ func (m *JobSpec) Marshal() (dAtA []byte, err error) { } func (m *JobSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Parallelism != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Parallelism)) - } - if m.Completions != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Completions)) + if m.TTLSecondsAfterFinished != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TTLSecondsAfterFinished)) + i-- + dAtA[i] = 0x40 } - if m.ActiveDeadlineSeconds != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + if m.BackoffLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.BackoffLimit)) + i-- + dAtA[i] = 0x38 } - if m.Selector != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n7, err := m.Selector.MarshalTo(dAtA[i:]) + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n7 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x32 if m.ManualSelector != nil { - dAtA[i] = 0x28 - i++ + i-- if *m.ManualSelector { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x28 } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - i += n8 - if m.BackoffLimit != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.BackoffLimit)) + if m.ActiveDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + i-- + dAtA[i] = 0x18 } - if m.TTLSecondsAfterFinished != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TTLSecondsAfterFinished)) + if m.Completions != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Completions)) + i-- + dAtA[i] = 0x10 + } + if m.Parallelism != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Parallelism)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *JobStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -294,64 +514,80 @@ func (m *JobStatus) Marshal() (dAtA []byte, err error) { } func (m *JobStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Failed)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.Succeeded)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.Active)) + i-- + dAtA[i] = 0x20 + if m.CompletionTime != nil { + { + size, err := m.CompletionTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } if m.StartTime != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) - n9, err := m.StartTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 + i-- + dAtA[i] = 0x12 } - if m.CompletionTime != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CompletionTime.Size())) - n10, err := m.CompletionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n10 } - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Active)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Succeeded)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Failed)) - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Job) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -364,6 +600,9 @@ func (m *Job) Size() (n int) { } func (m *JobCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -382,6 +621,9 @@ func (m *JobCondition) Size() (n int) { } func (m *JobList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -396,6 +638,9 @@ func (m *JobList) Size() (n int) { } func (m *JobSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Parallelism != nil { @@ -426,6 +671,9 @@ func (m *JobSpec) Size() (n int) { } func (m *JobStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Conditions) > 0 { @@ -449,14 +697,7 @@ func (m *JobStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -466,7 +707,7 @@ func (this *Job) String() string { return "nil" } s := strings.Join([]string{`&Job{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "JobSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "JobStatus", "JobStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -480,8 +721,8 @@ func (this *JobCondition) String() string { s := strings.Join([]string{`&JobCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -492,9 +733,14 @@ func (this *JobList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Job{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Job", "Job", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&JobList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Job", "Job", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -507,9 +753,9 @@ func (this *JobSpec) String() string { `Parallelism:` + valueToStringGenerated(this.Parallelism) + `,`, `Completions:` + valueToStringGenerated(this.Completions) + `,`, `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `ManualSelector:` + valueToStringGenerated(this.ManualSelector) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `BackoffLimit:` + valueToStringGenerated(this.BackoffLimit) + `,`, `TTLSecondsAfterFinished:` + valueToStringGenerated(this.TTLSecondsAfterFinished) + `,`, `}`, @@ -520,10 +766,15 @@ func (this *JobStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]JobCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "JobCondition", "JobCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&JobStatus{`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "JobCondition", "JobCondition", 1), `&`, ``, 1) + `,`, - `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `CompletionTime:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1) + `,`, + `CompletionTime:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTime), "Time", "v1.Time", 1) + `,`, `Active:` + fmt.Sprintf("%v", this.Active) + `,`, `Succeeded:` + fmt.Sprintf("%v", this.Succeeded) + `,`, `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, @@ -554,7 +805,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -582,7 +833,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -591,6 +842,9 @@ func (m *Job) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -612,7 +866,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -621,6 +875,9 @@ func (m *Job) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -642,7 +899,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -651,6 +908,9 @@ func (m *Job) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -667,6 +927,9 @@ func (m *Job) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -694,7 +957,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -722,7 +985,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -732,6 +995,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -751,7 +1017,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -761,6 +1027,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -780,7 +1049,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -789,6 +1058,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -810,7 +1082,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -819,6 +1091,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -840,7 +1115,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -850,6 +1125,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -869,7 +1147,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -879,6 +1157,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -893,6 +1174,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -920,7 +1204,7 @@ func (m *JobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -948,7 +1232,7 @@ func (m *JobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -957,6 +1241,9 @@ func (m *JobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -978,7 +1265,7 @@ func (m *JobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -987,6 +1274,9 @@ func (m *JobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1004,6 +1294,9 @@ func (m *JobList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1031,7 +1324,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1059,7 +1352,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1079,7 +1372,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1099,7 +1392,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -1119,7 +1412,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1128,11 +1421,14 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1152,7 +1448,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1173,7 +1469,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1182,6 +1478,9 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1203,7 +1502,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1223,7 +1522,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1238,6 +1537,9 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1265,7 +1567,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1293,7 +1595,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1302,6 +1604,9 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1324,7 +1629,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1333,11 +1638,14 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.StartTime == nil { - m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.StartTime = &v1.Time{} } if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1357,7 +1665,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1366,11 +1674,14 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.CompletionTime == nil { - m.CompletionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.CompletionTime = &v1.Time{} } if err := m.CompletionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1390,7 +1701,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Active |= (int32(b) & 0x7F) << shift + m.Active |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1409,7 +1720,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Succeeded |= (int32(b) & 0x7F) << shift + m.Succeeded |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1428,7 +1739,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Failed |= (int32(b) & 0x7F) << shift + m.Failed |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1442,6 +1753,9 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1508,10 +1822,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1540,6 +1857,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1558,70 +1878,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 929 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x5d, 0x6f, 0xe3, 0x44, - 0x14, 0xad, 0x9b, 0xa6, 0x4d, 0xa6, 0x1f, 0x5b, 0x06, 0x55, 0x1b, 0x0a, 0xb2, 0x97, 0x20, 0xa1, - 0x82, 0x84, 0x4d, 0x4b, 0x85, 0x10, 0x02, 0xa4, 0x75, 0x51, 0x25, 0xaa, 0x54, 0x5b, 0x26, 0x59, - 0x21, 0x21, 0x90, 0x18, 0xdb, 0x37, 0x89, 0x89, 0xed, 0xb1, 0x3c, 0x93, 0x48, 0x7d, 0xe3, 0x27, - 0xf0, 0x23, 0x10, 0x7f, 0x82, 0x77, 0xd4, 0xc7, 0x7d, 0xdc, 0x27, 0x8b, 0x9a, 0x1f, 0xc0, 0xfb, - 0x3e, 0xa1, 0x19, 0x3b, 0xb6, 0xd3, 0x26, 0xa2, 0xcb, 0x5b, 0xe6, 0xcc, 0x39, 0xe7, 0x5e, 0xdf, - 0x39, 0xb9, 0xe8, 0x8b, 0xc9, 0x67, 0xdc, 0xf4, 0x99, 0x35, 0x99, 0x3a, 0x90, 0x44, 0x20, 0x80, - 0x5b, 0x33, 0x88, 0x3c, 0x96, 0x58, 0xc5, 0x05, 0x8d, 0x7d, 0xcb, 0xa1, 0xc2, 0x1d, 0x5b, 0xb3, - 0x63, 0x6b, 0x04, 0x11, 0x24, 0x54, 0x80, 0x67, 0xc6, 0x09, 0x13, 0x0c, 0xbf, 0x99, 0x93, 0x4c, - 0x1a, 0xfb, 0xa6, 0x22, 0x99, 0xb3, 0xe3, 0xc3, 0x8f, 0x46, 0xbe, 0x18, 0x4f, 0x1d, 0xd3, 0x65, - 0xa1, 0x35, 0x62, 0x23, 0x66, 0x29, 0xae, 0x33, 0x1d, 0xaa, 0x93, 0x3a, 0xa8, 0x5f, 0xb9, 0xc7, - 0x61, 0xb7, 0x56, 0xc8, 0x65, 0x09, 0x2c, 0xa9, 0x73, 0x78, 0x5a, 0x71, 0x42, 0xea, 0x8e, 0xfd, - 0x08, 0x92, 0x6b, 0x2b, 0x9e, 0x8c, 0x24, 0xc0, 0xad, 0x10, 0x04, 0x5d, 0xa6, 0xb2, 0x56, 0xa9, - 0x92, 0x69, 0x24, 0xfc, 0x10, 0xee, 0x09, 0x3e, 0xfd, 0x2f, 0x01, 0x77, 0xc7, 0x10, 0xd2, 0xbb, - 0xba, 0xee, 0x3f, 0x1a, 0x6a, 0x5c, 0x30, 0x07, 0xff, 0x84, 0x5a, 0xb2, 0x17, 0x8f, 0x0a, 0xda, - 0xd1, 0x9e, 0x68, 0x47, 0xdb, 0x27, 0x1f, 0x9b, 0xd5, 0x84, 0x4a, 0x4b, 0x33, 0x9e, 0x8c, 0x24, - 0xc0, 0x4d, 0xc9, 0x36, 0x67, 0xc7, 0xe6, 0x33, 0xe7, 0x67, 0x70, 0xc5, 0x25, 0x08, 0x6a, 0xe3, - 0x9b, 0xd4, 0x58, 0xcb, 0x52, 0x03, 0x55, 0x18, 0x29, 0x5d, 0xf1, 0x57, 0x68, 0x83, 0xc7, 0xe0, - 0x76, 0xd6, 0x95, 0xfb, 0x3b, 0xe6, 0x92, 0xf9, 0x9b, 0x17, 0xcc, 0xe9, 0xc7, 0xe0, 0xda, 0x3b, - 0x85, 0xd3, 0x86, 0x3c, 0x11, 0xa5, 0xc3, 0xe7, 0x68, 0x93, 0x0b, 0x2a, 0xa6, 0xbc, 0xd3, 0x50, - 0x0e, 0xfa, 0x4a, 0x07, 0xc5, 0xb2, 0xf7, 0x0a, 0x8f, 0xcd, 0xfc, 0x4c, 0x0a, 0x75, 0xf7, 0xcf, - 0x06, 0xda, 0xb9, 0x60, 0xce, 0x19, 0x8b, 0x3c, 0x5f, 0xf8, 0x2c, 0xc2, 0xa7, 0x68, 0x43, 0x5c, - 0xc7, 0xa0, 0x3e, 0xbb, 0x6d, 0x3f, 0x99, 0x97, 0x1e, 0x5c, 0xc7, 0xf0, 0x2a, 0x35, 0xf6, 0xeb, - 0x5c, 0x89, 0x11, 0xc5, 0xc6, 0xbd, 0xb2, 0x9d, 0x75, 0xa5, 0x3b, 0x5d, 0x2c, 0xf7, 0x2a, 0x35, - 0x96, 0xa4, 0xc3, 0x2c, 0x9d, 0x16, 0x9b, 0xc2, 0x23, 0xb4, 0x1b, 0x50, 0x2e, 0xae, 0x12, 0xe6, - 0xc0, 0xc0, 0x0f, 0xa1, 0xf8, 0xc6, 0x0f, 0x1f, 0xf6, 0x06, 0x52, 0x61, 0x1f, 0x14, 0x0d, 0xec, - 0xf6, 0xea, 0x46, 0x64, 0xd1, 0x17, 0xcf, 0x10, 0x96, 0xc0, 0x20, 0xa1, 0x11, 0xcf, 0x3f, 0x49, - 0x56, 0xdb, 0x78, 0xed, 0x6a, 0x87, 0x45, 0x35, 0xdc, 0xbb, 0xe7, 0x46, 0x96, 0x54, 0xc0, 0xef, - 0xa3, 0xcd, 0x04, 0x28, 0x67, 0x51, 0xa7, 0xa9, 0xc6, 0x55, 0xbe, 0x0e, 0x51, 0x28, 0x29, 0x6e, - 0xf1, 0x07, 0x68, 0x2b, 0x04, 0xce, 0xe9, 0x08, 0x3a, 0x9b, 0x8a, 0xf8, 0xa8, 0x20, 0x6e, 0x5d, - 0xe6, 0x30, 0x99, 0xdf, 0x77, 0x7f, 0xd7, 0xd0, 0xd6, 0x05, 0x73, 0x7a, 0x3e, 0x17, 0xf8, 0x87, - 0x7b, 0xf1, 0x35, 0x1f, 0xf6, 0x31, 0x52, 0xad, 0xc2, 0xbb, 0x5f, 0xd4, 0x69, 0xcd, 0x91, 0x5a, - 0x74, 0xbf, 0x44, 0x4d, 0x5f, 0x40, 0x28, 0x9f, 0xba, 0x71, 0xb4, 0x7d, 0xd2, 0x59, 0x95, 0x3c, - 0x7b, 0xb7, 0x30, 0x69, 0x7e, 0x23, 0xe9, 0x24, 0x57, 0x75, 0xff, 0xd8, 0x50, 0x8d, 0xca, 0x2c, - 0xe3, 0x63, 0xb4, 0x1d, 0xd3, 0x84, 0x06, 0x01, 0x04, 0x3e, 0x0f, 0x55, 0xaf, 0x4d, 0xfb, 0x51, - 0x96, 0x1a, 0xdb, 0x57, 0x15, 0x4c, 0xea, 0x1c, 0x29, 0x71, 0x59, 0x18, 0x07, 0x20, 0x87, 0x99, - 0xc7, 0xad, 0x90, 0x9c, 0x55, 0x30, 0xa9, 0x73, 0xf0, 0x33, 0x74, 0x40, 0x5d, 0xe1, 0xcf, 0xe0, - 0x6b, 0xa0, 0x5e, 0xe0, 0x47, 0xd0, 0x07, 0x97, 0x45, 0x5e, 0xfe, 0xd7, 0x69, 0xd8, 0x6f, 0x65, - 0xa9, 0x71, 0xf0, 0x74, 0x19, 0x81, 0x2c, 0xd7, 0xe1, 0x1f, 0x51, 0x8b, 0x43, 0x00, 0xae, 0x60, - 0x49, 0x11, 0x96, 0x4f, 0x1e, 0x38, 0x5f, 0xea, 0x40, 0xd0, 0x2f, 0xa4, 0xf6, 0x8e, 0x1c, 0xf0, - 0xfc, 0x44, 0x4a, 0x4b, 0xfc, 0x39, 0xda, 0x0b, 0x69, 0x34, 0xa5, 0x25, 0x53, 0xa5, 0xa4, 0x65, - 0xe3, 0x2c, 0x35, 0xf6, 0x2e, 0x17, 0x6e, 0xc8, 0x1d, 0x26, 0xfe, 0x16, 0xb5, 0x04, 0x84, 0x71, - 0x40, 0x45, 0x1e, 0x99, 0xed, 0x93, 0xf7, 0xea, 0xef, 0x23, 0xff, 0x79, 0xb2, 0x91, 0x2b, 0xe6, - 0x0d, 0x0a, 0x9a, 0x5a, 0x31, 0xe5, 0x7b, 0xcf, 0x51, 0x52, 0xda, 0xe0, 0x53, 0xb4, 0xe3, 0x50, - 0x77, 0xc2, 0x86, 0xc3, 0x9e, 0x1f, 0xfa, 0xa2, 0xb3, 0xa5, 0x46, 0xbe, 0x9f, 0xa5, 0xc6, 0x8e, - 0x5d, 0xc3, 0xc9, 0x02, 0x0b, 0x3f, 0x47, 0x8f, 0x85, 0x08, 0x8a, 0x89, 0x3d, 0x1d, 0x0a, 0x48, - 0xce, 0xfd, 0xc8, 0xe7, 0x63, 0xf0, 0x3a, 0x2d, 0x65, 0xf0, 0x76, 0x96, 0x1a, 0x8f, 0x07, 0x83, - 0xde, 0x32, 0x0a, 0x59, 0xa5, 0xed, 0xfe, 0xd6, 0x40, 0xed, 0x72, 0xab, 0xe1, 0xe7, 0x08, 0xb9, - 0xf3, 0x1d, 0xc2, 0x3b, 0x9a, 0xca, 0xe3, 0xbb, 0xab, 0xf2, 0x58, 0x6e, 0x9b, 0x6a, 0x35, 0x97, - 0x10, 0x27, 0x35, 0x23, 0xfc, 0x1d, 0x6a, 0x73, 0x41, 0x13, 0xa1, 0xb6, 0xc1, 0xfa, 0x6b, 0x6f, - 0x83, 0xdd, 0x2c, 0x35, 0xda, 0xfd, 0xb9, 0x01, 0xa9, 0xbc, 0xf0, 0x10, 0xed, 0x55, 0xc1, 0xfc, - 0x9f, 0x9b, 0x4d, 0xa5, 0xe0, 0x6c, 0xc1, 0x85, 0xdc, 0x71, 0x95, 0xfb, 0x25, 0x4f, 0xae, 0x8a, - 0x67, 0xb3, 0xda, 0x2f, 0x79, 0xcc, 0x49, 0x71, 0x8b, 0x2d, 0xd4, 0xe6, 0x53, 0xd7, 0x05, 0xf0, - 0xc0, 0x53, 0x21, 0x6b, 0xda, 0x6f, 0x14, 0xd4, 0x76, 0x7f, 0x7e, 0x41, 0x2a, 0x8e, 0x34, 0x1e, - 0x52, 0x3f, 0x00, 0x4f, 0x85, 0xab, 0x66, 0x7c, 0xae, 0x50, 0x52, 0xdc, 0xda, 0x47, 0x37, 0xb7, - 0xfa, 0xda, 0x8b, 0x5b, 0x7d, 0xed, 0xe5, 0xad, 0xbe, 0xf6, 0x4b, 0xa6, 0x6b, 0x37, 0x99, 0xae, - 0xbd, 0xc8, 0x74, 0xed, 0x65, 0xa6, 0x6b, 0x7f, 0x65, 0xba, 0xf6, 0xeb, 0xdf, 0xfa, 0xda, 0xf7, - 0xeb, 0xb3, 0xe3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x13, 0xdb, 0x98, 0xf9, 0xb8, 0x08, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index 039149dab..75de45f17 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -32,17 +32,17 @@ option go_package = "v1"; // Job represents the configuration of a single job. message Job { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional JobSpec spec = 2; // Current status of a job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional JobStatus status = 3; } @@ -75,7 +75,7 @@ message JobCondition { // JobList is a collection of jobs. message JobList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index 8dad9043d..646a3cd7e 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -28,17 +28,17 @@ import ( type Job struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of a job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Current status of a job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -49,7 +49,7 @@ type Job struct { type JobList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index d8e2bdd78..0120e07d4 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -29,9 +29,9 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Job = map[string]string{ "": "Job represents the configuration of a single job.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Current status of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Current status of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Job) SwaggerDoc() map[string]string { @@ -54,7 +54,7 @@ func (JobCondition) SwaggerDoc() map[string]string { var map_JobList = map[string]string{ "": "JobList is a collection of jobs.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of Jobs.", } diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go index 88cb01678..beba55ace 100644 --- a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go @@ -75,7 +75,7 @@ func (in *JobCondition) DeepCopy() *JobCondition { func (in *JobList) DeepCopyInto(out *JobList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Job, len(*in)) diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go index 36342a3af..837a2f9c1 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go @@ -17,33 +17,22 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto - - It has these top-level messages: - CronJob - CronJobList - CronJobSpec - CronJobStatus - JobTemplate - JobTemplateSpec -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -56,29 +45,173 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *CronJob) Reset() { *m = CronJob{} } -func (*CronJob) ProtoMessage() {} -func (*CronJob) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *CronJob) Reset() { *m = CronJob{} } +func (*CronJob) ProtoMessage() {} +func (*CronJob) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{0} +} +func (m *CronJob) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJob) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJob.Merge(m, src) +} +func (m *CronJob) XXX_Size() int { + return m.Size() +} +func (m *CronJob) XXX_DiscardUnknown() { + xxx_messageInfo_CronJob.DiscardUnknown(m) +} + +var xxx_messageInfo_CronJob proto.InternalMessageInfo + +func (m *CronJobList) Reset() { *m = CronJobList{} } +func (*CronJobList) ProtoMessage() {} +func (*CronJobList) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{1} +} +func (m *CronJobList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobList.Merge(m, src) +} +func (m *CronJobList) XXX_Size() int { + return m.Size() +} +func (m *CronJobList) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobList.DiscardUnknown(m) +} -func (m *CronJobList) Reset() { *m = CronJobList{} } -func (*CronJobList) ProtoMessage() {} -func (*CronJobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_CronJobList proto.InternalMessageInfo -func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } -func (*CronJobSpec) ProtoMessage() {} -func (*CronJobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } +func (*CronJobSpec) ProtoMessage() {} +func (*CronJobSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{2} +} +func (m *CronJobSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobSpec.Merge(m, src) +} +func (m *CronJobSpec) XXX_Size() int { + return m.Size() +} +func (m *CronJobSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobSpec.DiscardUnknown(m) +} -func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } -func (*CronJobStatus) ProtoMessage() {} -func (*CronJobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_CronJobSpec proto.InternalMessageInfo -func (m *JobTemplate) Reset() { *m = JobTemplate{} } -func (*JobTemplate) ProtoMessage() {} -func (*JobTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } +func (*CronJobStatus) ProtoMessage() {} +func (*CronJobStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{3} +} +func (m *CronJobStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobStatus.Merge(m, src) +} +func (m *CronJobStatus) XXX_Size() int { + return m.Size() +} +func (m *CronJobStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobStatus.DiscardUnknown(m) +} -func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } -func (*JobTemplateSpec) ProtoMessage() {} -func (*JobTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo + +func (m *JobTemplate) Reset() { *m = JobTemplate{} } +func (*JobTemplate) ProtoMessage() {} +func (*JobTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{4} +} +func (m *JobTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobTemplate.Merge(m, src) +} +func (m *JobTemplate) XXX_Size() int { + return m.Size() +} +func (m *JobTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_JobTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_JobTemplate proto.InternalMessageInfo + +func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } +func (*JobTemplateSpec) ProtoMessage() {} +func (*JobTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{5} +} +func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobTemplateSpec.Merge(m, src) +} +func (m *JobTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *JobTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_JobTemplateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_JobTemplateSpec proto.InternalMessageInfo func init() { proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v1beta1.CronJob") @@ -88,10 +221,68 @@ func init() { proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v1beta1.JobTemplate") proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v1beta1.JobTemplateSpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto", fileDescriptor_e57b277b05179ae7) +} + +var fileDescriptor_e57b277b05179ae7 = []byte{ + // 771 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xc7, 0xe3, 0x34, 0xbf, 0x76, 0xc2, 0x42, 0xd7, 0xa0, 0x5d, 0x2b, 0x20, 0x27, 0x64, 0xb5, + 0x22, 0x20, 0x76, 0x4c, 0x2b, 0x84, 0x38, 0x21, 0xad, 0x17, 0x2d, 0x50, 0x8a, 0x16, 0x39, 0x45, + 0x48, 0xa8, 0x42, 0x1d, 0x8f, 0x5f, 0x92, 0x69, 0x6c, 0x8f, 0xe5, 0x19, 0x47, 0xca, 0x8d, 0x0b, + 0x77, 0xfe, 0x11, 0x4e, 0xfc, 0x13, 0x11, 0xa7, 0x1e, 0x7b, 0x8a, 0xa8, 0xf9, 0x2f, 0x38, 0x21, + 0x4f, 0x9c, 0x1f, 0xcd, 0x8f, 0xb6, 0x7b, 0xe9, 0xcd, 0xf3, 0xe6, 0xfb, 0xfd, 0xcc, 0xf3, 0x7b, + 0x6f, 0x06, 0xbd, 0x18, 0x7e, 0x29, 0x30, 0xe3, 0xd6, 0x30, 0x71, 0x21, 0x0e, 0x41, 0x82, 0xb0, + 0x46, 0x10, 0x7a, 0x3c, 0xb6, 0xf2, 0x0d, 0x12, 0x31, 0xcb, 0x25, 0x92, 0x0e, 0xac, 0xd1, 0x81, + 0x0b, 0x92, 0x1c, 0x58, 0x7d, 0x08, 0x21, 0x26, 0x12, 0x3c, 0x1c, 0xc5, 0x5c, 0x72, 0xdd, 0x98, + 0x29, 0x31, 0x89, 0x18, 0x56, 0x4a, 0x9c, 0x2b, 0x1b, 0xcf, 0xfb, 0x4c, 0x0e, 0x12, 0x17, 0x53, + 0x1e, 0x58, 0x7d, 0xde, 0xe7, 0x96, 0x32, 0xb8, 0x49, 0x4f, 0xad, 0xd4, 0x42, 0x7d, 0xcd, 0x40, + 0x8d, 0xa7, 0x5b, 0x8e, 0x5c, 0x3f, 0xad, 0xd1, 0x5e, 0x11, 0x51, 0x1e, 0xc3, 0x36, 0xcd, 0xe7, + 0x4b, 0x4d, 0x40, 0xe8, 0x80, 0x85, 0x10, 0x8f, 0xad, 0x68, 0xd8, 0xcf, 0x02, 0xc2, 0x0a, 0x40, + 0x92, 0x6d, 0x2e, 0x6b, 0x97, 0x2b, 0x4e, 0x42, 0xc9, 0x02, 0xd8, 0x30, 0x7c, 0x71, 0x9b, 0x41, + 0xd0, 0x01, 0x04, 0x64, 0xdd, 0xd7, 0xfe, 0xbd, 0x88, 0xaa, 0x2f, 0x63, 0x1e, 0x1e, 0x71, 0x57, + 0x3f, 0x43, 0xb5, 0x2c, 0x1f, 0x8f, 0x48, 0x62, 0x68, 0x2d, 0xad, 0x53, 0x3f, 0xfc, 0x0c, 0x2f, + 0xeb, 0xb9, 0xc0, 0xe2, 0x68, 0xd8, 0xcf, 0x02, 0x02, 0x67, 0x6a, 0x3c, 0x3a, 0xc0, 0xaf, 0xdd, + 0x73, 0xa0, 0xf2, 0x07, 0x90, 0xc4, 0xd6, 0x27, 0xd3, 0x66, 0x21, 0x9d, 0x36, 0xd1, 0x32, 0xe6, + 0x2c, 0xa8, 0xfa, 0x37, 0xa8, 0x24, 0x22, 0xa0, 0x46, 0x51, 0xd1, 0x9f, 0xe1, 0x5d, 0xdd, 0xc2, + 0x79, 0x4a, 0xdd, 0x08, 0xa8, 0xfd, 0x56, 0x8e, 0x2c, 0x65, 0x2b, 0x47, 0x01, 0xf4, 0xd7, 0xa8, + 0x22, 0x24, 0x91, 0x89, 0x30, 0xf6, 0x14, 0xea, 0xa3, 0xdb, 0x51, 0x4a, 0x6e, 0xbf, 0x9d, 0xc3, + 0x2a, 0xb3, 0xb5, 0x93, 0x63, 0xda, 0x7f, 0x69, 0xa8, 0x9e, 0x2b, 0x8f, 0x99, 0x90, 0xfa, 0xe9, + 0x46, 0x2d, 0xf0, 0xdd, 0x6a, 0x91, 0xb9, 0x55, 0x25, 0xf6, 0xf3, 0x93, 0x6a, 0xf3, 0xc8, 0x4a, + 0x1d, 0x5e, 0xa1, 0x32, 0x93, 0x10, 0x08, 0xa3, 0xd8, 0xda, 0xeb, 0xd4, 0x0f, 0x3f, 0xbc, 0x35, + 0x7b, 0xfb, 0x61, 0x4e, 0x2b, 0x7f, 0x97, 0xf9, 0x9c, 0x99, 0xbd, 0xfd, 0x67, 0x69, 0x91, 0x75, + 0x56, 0x1c, 0xfd, 0x53, 0x54, 0xcb, 0xfa, 0xec, 0x25, 0x3e, 0xa8, 0xac, 0x1f, 0x2c, 0xb3, 0xe8, + 0xe6, 0x71, 0x67, 0xa1, 0xd0, 0x7f, 0x42, 0x4f, 0x84, 0x24, 0xb1, 0x64, 0x61, 0xff, 0x6b, 0x20, + 0x9e, 0xcf, 0x42, 0xe8, 0x02, 0xe5, 0xa1, 0x27, 0x54, 0x83, 0xf6, 0xec, 0xf7, 0xd3, 0x69, 0xf3, + 0x49, 0x77, 0xbb, 0xc4, 0xd9, 0xe5, 0xd5, 0x4f, 0xd1, 0x23, 0xca, 0x43, 0x9a, 0xc4, 0x31, 0x84, + 0x74, 0xfc, 0x23, 0xf7, 0x19, 0x1d, 0xab, 0x36, 0x3d, 0xb0, 0x71, 0x9e, 0xcd, 0xa3, 0x97, 0xeb, + 0x82, 0xff, 0xb6, 0x05, 0x9d, 0x4d, 0x90, 0xfe, 0x0c, 0x55, 0x45, 0x22, 0x22, 0x08, 0x3d, 0xa3, + 0xd4, 0xd2, 0x3a, 0x35, 0xbb, 0x9e, 0x4e, 0x9b, 0xd5, 0xee, 0x2c, 0xe4, 0xcc, 0xf7, 0xf4, 0x33, + 0x54, 0x3f, 0xe7, 0xee, 0x09, 0x04, 0x91, 0x4f, 0x24, 0x18, 0x65, 0xd5, 0xc2, 0x8f, 0x77, 0xd7, + 0xf9, 0x68, 0x29, 0x56, 0x43, 0xf7, 0x6e, 0x9e, 0x69, 0x7d, 0x65, 0xc3, 0x59, 0x45, 0xea, 0xbf, + 0xa2, 0x86, 0x48, 0x28, 0x05, 0x21, 0x7a, 0x89, 0x7f, 0xc4, 0x5d, 0xf1, 0x2d, 0x13, 0x92, 0xc7, + 0xe3, 0x63, 0x16, 0x30, 0x69, 0x54, 0x5a, 0x5a, 0xa7, 0x6c, 0x9b, 0xe9, 0xb4, 0xd9, 0xe8, 0xee, + 0x54, 0x39, 0x37, 0x10, 0x74, 0x07, 0x3d, 0xee, 0x11, 0xe6, 0x83, 0xb7, 0xc1, 0xae, 0x2a, 0x76, + 0x23, 0x9d, 0x36, 0x1f, 0xbf, 0xda, 0xaa, 0x70, 0x76, 0x38, 0xdb, 0x7f, 0x6b, 0xe8, 0xe1, 0xb5, + 0xfb, 0xa0, 0x7f, 0x8f, 0x2a, 0x84, 0x4a, 0x36, 0xca, 0xe6, 0x25, 0x1b, 0xc5, 0xa7, 0xab, 0x25, + 0xca, 0xde, 0xb4, 0xe5, 0xfd, 0x76, 0xa0, 0x07, 0x59, 0x27, 0x60, 0x79, 0x89, 0x5e, 0x28, 0xab, + 0x93, 0x23, 0x74, 0x1f, 0xed, 0xfb, 0x44, 0xc8, 0xf9, 0xa8, 0x9d, 0xb0, 0x00, 0x54, 0x93, 0xea, + 0x87, 0x9f, 0xdc, 0xed, 0xf2, 0x64, 0x0e, 0xfb, 0xbd, 0x74, 0xda, 0xdc, 0x3f, 0x5e, 0xe3, 0x38, + 0x1b, 0xe4, 0xf6, 0x44, 0x43, 0xab, 0xdd, 0xb9, 0x87, 0xe7, 0xeb, 0x67, 0x54, 0x93, 0xf3, 0x89, + 0x2a, 0xbe, 0xe9, 0x44, 0x2d, 0x6e, 0xe2, 0x62, 0x9c, 0x16, 0xb0, 0xec, 0xf5, 0x79, 0x67, 0x4d, + 0x7f, 0x0f, 0xbf, 0xf3, 0xd5, 0xb5, 0xd7, 0xf8, 0x83, 0x6d, 0xbf, 0x82, 0x6f, 0x78, 0x84, 0xed, + 0xe7, 0x93, 0x2b, 0xb3, 0x70, 0x71, 0x65, 0x16, 0x2e, 0xaf, 0xcc, 0xc2, 0x6f, 0xa9, 0xa9, 0x4d, + 0x52, 0x53, 0xbb, 0x48, 0x4d, 0xed, 0x32, 0x35, 0xb5, 0x7f, 0x52, 0x53, 0xfb, 0xe3, 0x5f, 0xb3, + 0xf0, 0x4b, 0x35, 0x2f, 0xc8, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x9f, 0xb3, 0xdd, 0xdf, + 0x07, 0x00, 0x00, +} + func (m *CronJob) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -99,41 +290,52 @@ func (m *CronJob) Marshal() (dAtA []byte, err error) { } func (m *CronJob) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJob) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -141,37 +343,46 @@ func (m *CronJobList) Marshal() (dAtA []byte, err error) { } func (m *CronJobList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -179,58 +390,67 @@ func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { } func (m *CronJobSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) - i += copy(dAtA[i:], m.Schedule) - if m.StartingDeadlineSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) + if m.FailedJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + i-- + dAtA[i] = 0x38 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) - i += copy(dAtA[i:], m.ConcurrencyPolicy) + if m.SuccessfulJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + { + size, err := m.JobTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a if m.Suspend != nil { - dAtA[i] = 0x20 - i++ + i-- if *m.Suspend { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.JobTemplate.Size())) - n5, err := m.JobTemplate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.SuccessfulJobsHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + i-- + dAtA[i] = 0x20 } - if m.FailedJobsHistoryLimit != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + i -= len(m.ConcurrencyPolicy) + copy(dAtA[i:], m.ConcurrencyPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) + i-- + dAtA[i] = 0x1a + if m.StartingDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) + i-- + dAtA[i] = 0x10 } - return i, nil + i -= len(m.Schedule) + copy(dAtA[i:], m.Schedule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -238,39 +458,48 @@ func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { } func (m *CronJobStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Active) > 0 { - for _, msg := range m.Active { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.LastScheduleTime != nil { + { + size, err := m.LastScheduleTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.LastScheduleTime != nil { + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScheduleTime.Size())) - n6, err := m.LastScheduleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if len(m.Active) > 0 { + for iNdEx := len(m.Active) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Active[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n6 } - return i, nil + return len(dAtA) - i, nil } func (m *JobTemplate) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -278,33 +507,42 @@ func (m *JobTemplate) Marshal() (dAtA []byte, err error) { } func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -312,39 +550,53 @@ func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { } func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CronJob) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -357,6 +609,9 @@ func (m *CronJob) Size() (n int) { } func (m *CronJobList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -371,6 +626,9 @@ func (m *CronJobList) Size() (n int) { } func (m *CronJobSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Schedule) @@ -395,6 +653,9 @@ func (m *CronJobSpec) Size() (n int) { } func (m *CronJobStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Active) > 0 { @@ -411,6 +672,9 @@ func (m *CronJobStatus) Size() (n int) { } func (m *JobTemplate) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -421,6 +685,9 @@ func (m *JobTemplate) Size() (n int) { } func (m *JobTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -431,14 +698,7 @@ func (m *JobTemplateSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -448,7 +708,7 @@ func (this *CronJob) String() string { return "nil" } s := strings.Join([]string{`&CronJob{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronJobSpec", "CronJobSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronJobStatus", "CronJobStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -459,9 +719,14 @@ func (this *CronJobList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]CronJob{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CronJob", "CronJob", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&CronJobList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CronJob", "CronJob", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -486,9 +751,14 @@ func (this *CronJobStatus) String() string { if this == nil { return "nil" } + repeatedStringForActive := "[]ObjectReference{" + for _, f := range this.Active { + repeatedStringForActive += fmt.Sprintf("%v", f) + "," + } + repeatedStringForActive += "}" s := strings.Join([]string{`&CronJobStatus{`, - `Active:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Active), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, - `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `Active:` + repeatedStringForActive + `,`, + `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "v1.Time", 1) + `,`, `}`, }, "") return s @@ -498,7 +768,7 @@ func (this *JobTemplate) String() string { return "nil" } s := strings.Join([]string{`&JobTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -509,8 +779,8 @@ func (this *JobTemplateSpec) String() string { return "nil" } s := strings.Join([]string{`&JobTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "k8s_io_api_batch_v1.JobSpec", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Spec), "JobSpec", "v12.JobSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -538,7 +808,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -566,7 +836,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -575,6 +845,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -596,7 +869,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -605,6 +878,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -626,7 +902,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -635,6 +911,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -651,6 +930,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -678,7 +960,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -706,7 +988,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -715,6 +997,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -736,7 +1021,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -745,6 +1030,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -762,6 +1050,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -789,7 +1080,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -817,7 +1108,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -827,6 +1118,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -846,7 +1140,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -866,7 +1160,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -876,6 +1170,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -895,7 +1192,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -916,7 +1213,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -925,6 +1222,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -946,7 +1246,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -966,7 +1266,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -981,6 +1281,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1008,7 +1311,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1036,7 +1339,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1045,10 +1348,13 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Active = append(m.Active, k8s_io_api_core_v1.ObjectReference{}) + m.Active = append(m.Active, v11.ObjectReference{}) if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1067,7 +1373,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1076,11 +1382,14 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScheduleTime == nil { - m.LastScheduleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.LastScheduleTime = &v1.Time{} } if err := m.LastScheduleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1095,6 +1404,9 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1122,7 +1434,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1150,7 +1462,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1159,6 +1471,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1180,7 +1495,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1189,6 +1504,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1205,6 +1523,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1232,7 +1553,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1260,7 +1581,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1269,6 +1590,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1290,7 +1614,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1299,6 +1623,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1315,6 +1642,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1381,10 +1711,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1413,6 +1746,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1431,60 +1767,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 771 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0xc7, 0xe3, 0x34, 0xbf, 0x76, 0xc2, 0x42, 0xd7, 0xa0, 0x5d, 0x2b, 0x20, 0x27, 0x64, 0xb5, - 0x22, 0x20, 0x76, 0x4c, 0x2b, 0x84, 0x38, 0x21, 0xad, 0x17, 0x2d, 0x50, 0x8a, 0x16, 0x39, 0x45, - 0x48, 0xa8, 0x42, 0x1d, 0x8f, 0x5f, 0x92, 0x69, 0x6c, 0x8f, 0xe5, 0x19, 0x47, 0xca, 0x8d, 0x0b, - 0x77, 0xfe, 0x11, 0x4e, 0xfc, 0x13, 0x11, 0xa7, 0x1e, 0x7b, 0x8a, 0xa8, 0xf9, 0x2f, 0x38, 0x21, - 0x4f, 0x9c, 0x1f, 0xcd, 0x8f, 0xb6, 0x7b, 0xe9, 0xcd, 0xf3, 0xe6, 0xfb, 0xfd, 0xcc, 0xf3, 0x7b, - 0x6f, 0x06, 0xbd, 0x18, 0x7e, 0x29, 0x30, 0xe3, 0xd6, 0x30, 0x71, 0x21, 0x0e, 0x41, 0x82, 0xb0, - 0x46, 0x10, 0x7a, 0x3c, 0xb6, 0xf2, 0x0d, 0x12, 0x31, 0xcb, 0x25, 0x92, 0x0e, 0xac, 0xd1, 0x81, - 0x0b, 0x92, 0x1c, 0x58, 0x7d, 0x08, 0x21, 0x26, 0x12, 0x3c, 0x1c, 0xc5, 0x5c, 0x72, 0xdd, 0x98, - 0x29, 0x31, 0x89, 0x18, 0x56, 0x4a, 0x9c, 0x2b, 0x1b, 0xcf, 0xfb, 0x4c, 0x0e, 0x12, 0x17, 0x53, - 0x1e, 0x58, 0x7d, 0xde, 0xe7, 0x96, 0x32, 0xb8, 0x49, 0x4f, 0xad, 0xd4, 0x42, 0x7d, 0xcd, 0x40, - 0x8d, 0xa7, 0x5b, 0x8e, 0x5c, 0x3f, 0xad, 0xd1, 0x5e, 0x11, 0x51, 0x1e, 0xc3, 0x36, 0xcd, 0xe7, - 0x4b, 0x4d, 0x40, 0xe8, 0x80, 0x85, 0x10, 0x8f, 0xad, 0x68, 0xd8, 0xcf, 0x02, 0xc2, 0x0a, 0x40, - 0x92, 0x6d, 0x2e, 0x6b, 0x97, 0x2b, 0x4e, 0x42, 0xc9, 0x02, 0xd8, 0x30, 0x7c, 0x71, 0x9b, 0x41, - 0xd0, 0x01, 0x04, 0x64, 0xdd, 0xd7, 0xfe, 0xbd, 0x88, 0xaa, 0x2f, 0x63, 0x1e, 0x1e, 0x71, 0x57, - 0x3f, 0x43, 0xb5, 0x2c, 0x1f, 0x8f, 0x48, 0x62, 0x68, 0x2d, 0xad, 0x53, 0x3f, 0xfc, 0x0c, 0x2f, - 0xeb, 0xb9, 0xc0, 0xe2, 0x68, 0xd8, 0xcf, 0x02, 0x02, 0x67, 0x6a, 0x3c, 0x3a, 0xc0, 0xaf, 0xdd, - 0x73, 0xa0, 0xf2, 0x07, 0x90, 0xc4, 0xd6, 0x27, 0xd3, 0x66, 0x21, 0x9d, 0x36, 0xd1, 0x32, 0xe6, - 0x2c, 0xa8, 0xfa, 0x37, 0xa8, 0x24, 0x22, 0xa0, 0x46, 0x51, 0xd1, 0x9f, 0xe1, 0x5d, 0xdd, 0xc2, - 0x79, 0x4a, 0xdd, 0x08, 0xa8, 0xfd, 0x56, 0x8e, 0x2c, 0x65, 0x2b, 0x47, 0x01, 0xf4, 0xd7, 0xa8, - 0x22, 0x24, 0x91, 0x89, 0x30, 0xf6, 0x14, 0xea, 0xa3, 0xdb, 0x51, 0x4a, 0x6e, 0xbf, 0x9d, 0xc3, - 0x2a, 0xb3, 0xb5, 0x93, 0x63, 0xda, 0x7f, 0x69, 0xa8, 0x9e, 0x2b, 0x8f, 0x99, 0x90, 0xfa, 0xe9, - 0x46, 0x2d, 0xf0, 0xdd, 0x6a, 0x91, 0xb9, 0x55, 0x25, 0xf6, 0xf3, 0x93, 0x6a, 0xf3, 0xc8, 0x4a, - 0x1d, 0x5e, 0xa1, 0x32, 0x93, 0x10, 0x08, 0xa3, 0xd8, 0xda, 0xeb, 0xd4, 0x0f, 0x3f, 0xbc, 0x35, - 0x7b, 0xfb, 0x61, 0x4e, 0x2b, 0x7f, 0x97, 0xf9, 0x9c, 0x99, 0xbd, 0xfd, 0x67, 0x69, 0x91, 0x75, - 0x56, 0x1c, 0xfd, 0x53, 0x54, 0xcb, 0xfa, 0xec, 0x25, 0x3e, 0xa8, 0xac, 0x1f, 0x2c, 0xb3, 0xe8, - 0xe6, 0x71, 0x67, 0xa1, 0xd0, 0x7f, 0x42, 0x4f, 0x84, 0x24, 0xb1, 0x64, 0x61, 0xff, 0x6b, 0x20, - 0x9e, 0xcf, 0x42, 0xe8, 0x02, 0xe5, 0xa1, 0x27, 0x54, 0x83, 0xf6, 0xec, 0xf7, 0xd3, 0x69, 0xf3, - 0x49, 0x77, 0xbb, 0xc4, 0xd9, 0xe5, 0xd5, 0x4f, 0xd1, 0x23, 0xca, 0x43, 0x9a, 0xc4, 0x31, 0x84, - 0x74, 0xfc, 0x23, 0xf7, 0x19, 0x1d, 0xab, 0x36, 0x3d, 0xb0, 0x71, 0x9e, 0xcd, 0xa3, 0x97, 0xeb, - 0x82, 0xff, 0xb6, 0x05, 0x9d, 0x4d, 0x90, 0xfe, 0x0c, 0x55, 0x45, 0x22, 0x22, 0x08, 0x3d, 0xa3, - 0xd4, 0xd2, 0x3a, 0x35, 0xbb, 0x9e, 0x4e, 0x9b, 0xd5, 0xee, 0x2c, 0xe4, 0xcc, 0xf7, 0xf4, 0x33, - 0x54, 0x3f, 0xe7, 0xee, 0x09, 0x04, 0x91, 0x4f, 0x24, 0x18, 0x65, 0xd5, 0xc2, 0x8f, 0x77, 0xd7, - 0xf9, 0x68, 0x29, 0x56, 0x43, 0xf7, 0x6e, 0x9e, 0x69, 0x7d, 0x65, 0xc3, 0x59, 0x45, 0xea, 0xbf, - 0xa2, 0x86, 0x48, 0x28, 0x05, 0x21, 0x7a, 0x89, 0x7f, 0xc4, 0x5d, 0xf1, 0x2d, 0x13, 0x92, 0xc7, - 0xe3, 0x63, 0x16, 0x30, 0x69, 0x54, 0x5a, 0x5a, 0xa7, 0x6c, 0x9b, 0xe9, 0xb4, 0xd9, 0xe8, 0xee, - 0x54, 0x39, 0x37, 0x10, 0x74, 0x07, 0x3d, 0xee, 0x11, 0xe6, 0x83, 0xb7, 0xc1, 0xae, 0x2a, 0x76, - 0x23, 0x9d, 0x36, 0x1f, 0xbf, 0xda, 0xaa, 0x70, 0x76, 0x38, 0xdb, 0x7f, 0x6b, 0xe8, 0xe1, 0xb5, - 0xfb, 0xa0, 0x7f, 0x8f, 0x2a, 0x84, 0x4a, 0x36, 0xca, 0xe6, 0x25, 0x1b, 0xc5, 0xa7, 0xab, 0x25, - 0xca, 0xde, 0xb4, 0xe5, 0xfd, 0x76, 0xa0, 0x07, 0x59, 0x27, 0x60, 0x79, 0x89, 0x5e, 0x28, 0xab, - 0x93, 0x23, 0x74, 0x1f, 0xed, 0xfb, 0x44, 0xc8, 0xf9, 0xa8, 0x9d, 0xb0, 0x00, 0x54, 0x93, 0xea, - 0x87, 0x9f, 0xdc, 0xed, 0xf2, 0x64, 0x0e, 0xfb, 0xbd, 0x74, 0xda, 0xdc, 0x3f, 0x5e, 0xe3, 0x38, - 0x1b, 0xe4, 0xf6, 0x44, 0x43, 0xab, 0xdd, 0xb9, 0x87, 0xe7, 0xeb, 0x67, 0x54, 0x93, 0xf3, 0x89, - 0x2a, 0xbe, 0xe9, 0x44, 0x2d, 0x6e, 0xe2, 0x62, 0x9c, 0x16, 0xb0, 0xec, 0xf5, 0x79, 0x67, 0x4d, - 0x7f, 0x0f, 0xbf, 0xf3, 0xd5, 0xb5, 0xd7, 0xf8, 0x83, 0x6d, 0xbf, 0x82, 0x6f, 0x78, 0x84, 0xed, - 0xe7, 0x93, 0x2b, 0xb3, 0x70, 0x71, 0x65, 0x16, 0x2e, 0xaf, 0xcc, 0xc2, 0x6f, 0xa9, 0xa9, 0x4d, - 0x52, 0x53, 0xbb, 0x48, 0x4d, 0xed, 0x32, 0x35, 0xb5, 0x7f, 0x52, 0x53, 0xfb, 0xe3, 0x5f, 0xb3, - 0xf0, 0x4b, 0x35, 0x2f, 0xc8, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x9f, 0xb3, 0xdd, 0xdf, - 0x07, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto index 043b3551b..995b4f3f9 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.proto +++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -33,17 +33,17 @@ option go_package = "v1beta1"; // CronJob represents the configuration of a single cron job. message CronJob { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional CronJobSpec spec = 2; // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional CronJobStatus status = 3; } @@ -51,7 +51,7 @@ message CronJob { // CronJobList is a collection of cron jobs. message CronJobList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -112,12 +112,12 @@ message CronJobStatus { // JobTemplate describes a template for creating copies of a predefined pod. message JobTemplate { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional JobTemplateSpec template = 2; } @@ -125,12 +125,12 @@ message JobTemplate { // JobTemplateSpec describes the data a Job should have when created from a template message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional k8s.io.api.batch.v1.JobSpec spec = 2; } diff --git a/vendor/k8s.io/api/batch/v1beta1/types.go b/vendor/k8s.io/api/batch/v1beta1/types.go index cb5c9bad2..2978747a4 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types.go +++ b/vendor/k8s.io/api/batch/v1beta1/types.go @@ -28,12 +28,12 @@ import ( type JobTemplate struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` } @@ -41,12 +41,12 @@ type JobTemplate struct { // JobTemplateSpec describes the data a Job should have when created from a template type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -58,17 +58,17 @@ type JobTemplateSpec struct { type CronJob struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -80,7 +80,7 @@ type CronJobList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go index abbdfec01..ecc914446 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go @@ -29,9 +29,9 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ "": "CronJob represents the configuration of a single cron job.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (CronJob) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (CronJob) SwaggerDoc() map[string]string { var map_CronJobList = map[string]string{ "": "CronJobList is a collection of cron jobs.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of CronJobs.", } @@ -75,8 +75,8 @@ func (CronJobStatus) SwaggerDoc() map[string]string { var map_JobTemplate = map[string]string{ "": "JobTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (JobTemplate) SwaggerDoc() map[string]string { @@ -85,8 +85,8 @@ func (JobTemplate) SwaggerDoc() map[string]string { var map_JobTemplateSpec = map[string]string{ "": "JobTemplateSpec describes the data a Job should have when created from a template", - "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (JobTemplateSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go index 1c8bc4478..7c9dcb742 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go @@ -57,7 +57,7 @@ func (in *CronJob) DeepCopyObject() runtime.Object { func (in *CronJobList) DeepCopyInto(out *CronJobList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CronJob, len(*in)) diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go index 4d9ba5c00..8271c8411 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go @@ -17,33 +17,22 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto -/* - Package v2alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto - - It has these top-level messages: - CronJob - CronJobList - CronJobSpec - CronJobStatus - JobTemplate - JobTemplateSpec -*/ package v2alpha1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -56,29 +45,173 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *CronJob) Reset() { *m = CronJob{} } -func (*CronJob) ProtoMessage() {} -func (*CronJob) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *CronJob) Reset() { *m = CronJob{} } +func (*CronJob) ProtoMessage() {} +func (*CronJob) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{0} +} +func (m *CronJob) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJob) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJob.Merge(m, src) +} +func (m *CronJob) XXX_Size() int { + return m.Size() +} +func (m *CronJob) XXX_DiscardUnknown() { + xxx_messageInfo_CronJob.DiscardUnknown(m) +} + +var xxx_messageInfo_CronJob proto.InternalMessageInfo + +func (m *CronJobList) Reset() { *m = CronJobList{} } +func (*CronJobList) ProtoMessage() {} +func (*CronJobList) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{1} +} +func (m *CronJobList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobList.Merge(m, src) +} +func (m *CronJobList) XXX_Size() int { + return m.Size() +} +func (m *CronJobList) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobList.DiscardUnknown(m) +} -func (m *CronJobList) Reset() { *m = CronJobList{} } -func (*CronJobList) ProtoMessage() {} -func (*CronJobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_CronJobList proto.InternalMessageInfo -func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } -func (*CronJobSpec) ProtoMessage() {} -func (*CronJobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } +func (*CronJobSpec) ProtoMessage() {} +func (*CronJobSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{2} +} +func (m *CronJobSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobSpec.Merge(m, src) +} +func (m *CronJobSpec) XXX_Size() int { + return m.Size() +} +func (m *CronJobSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobSpec.DiscardUnknown(m) +} -func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } -func (*CronJobStatus) ProtoMessage() {} -func (*CronJobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_CronJobSpec proto.InternalMessageInfo -func (m *JobTemplate) Reset() { *m = JobTemplate{} } -func (*JobTemplate) ProtoMessage() {} -func (*JobTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } +func (*CronJobStatus) ProtoMessage() {} +func (*CronJobStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{3} +} +func (m *CronJobStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobStatus.Merge(m, src) +} +func (m *CronJobStatus) XXX_Size() int { + return m.Size() +} +func (m *CronJobStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobStatus.DiscardUnknown(m) +} -func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } -func (*JobTemplateSpec) ProtoMessage() {} -func (*JobTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo + +func (m *JobTemplate) Reset() { *m = JobTemplate{} } +func (*JobTemplate) ProtoMessage() {} +func (*JobTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{4} +} +func (m *JobTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobTemplate.Merge(m, src) +} +func (m *JobTemplate) XXX_Size() int { + return m.Size() +} +func (m *JobTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_JobTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_JobTemplate proto.InternalMessageInfo + +func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } +func (*JobTemplateSpec) ProtoMessage() {} +func (*JobTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{5} +} +func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobTemplateSpec.Merge(m, src) +} +func (m *JobTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *JobTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_JobTemplateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_JobTemplateSpec proto.InternalMessageInfo func init() { proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v2alpha1.CronJob") @@ -88,10 +221,68 @@ func init() { proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v2alpha1.JobTemplate") proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v2alpha1.JobTemplateSpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto", fileDescriptor_5495a0550fe29c46) +} + +var fileDescriptor_5495a0550fe29c46 = []byte{ + // 774 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x6f, 0xdb, 0x36, + 0x18, 0xc7, 0x2d, 0xc7, 0x6f, 0xa1, 0x97, 0x2d, 0xd1, 0x86, 0xc4, 0xf3, 0x06, 0xd9, 0x50, 0xb0, + 0xc1, 0x18, 0x36, 0x6a, 0x09, 0x86, 0x61, 0xa7, 0x01, 0x53, 0x86, 0x36, 0x4d, 0x53, 0x34, 0x90, + 0x53, 0xa0, 0x28, 0x82, 0xa2, 0x14, 0x45, 0xdb, 0x8c, 0x25, 0x51, 0x10, 0x29, 0x03, 0xbe, 0xf5, + 0xd6, 0x6b, 0x3f, 0x49, 0x2f, 0xed, 0x87, 0x48, 0x7b, 0xca, 0x31, 0x27, 0xa3, 0x51, 0xbf, 0x45, + 0x4f, 0x85, 0x68, 0xf9, 0x25, 0x7e, 0x49, 0xd2, 0x4b, 0x6e, 0xe2, 0xa3, 0xff, 0xff, 0xc7, 0x87, + 0xcf, 0xf3, 0x90, 0xc0, 0xec, 0xfe, 0xc3, 0x21, 0x65, 0x46, 0x37, 0xb2, 0x49, 0xe8, 0x13, 0x41, + 0xb8, 0xd1, 0x23, 0xbe, 0xc3, 0x42, 0x23, 0xfd, 0x81, 0x02, 0x6a, 0xd8, 0x48, 0xe0, 0x8e, 0xd1, + 0xdb, 0x45, 0x6e, 0xd0, 0x41, 0x3b, 0x46, 0x9b, 0xf8, 0x24, 0x44, 0x82, 0x38, 0x30, 0x08, 0x99, + 0x60, 0xea, 0x8f, 0x43, 0x29, 0x44, 0x01, 0x85, 0x52, 0x0a, 0x47, 0xd2, 0xea, 0x1f, 0x6d, 0x2a, + 0x3a, 0x91, 0x0d, 0x31, 0xf3, 0x8c, 0x36, 0x6b, 0x33, 0x43, 0x3a, 0xec, 0xa8, 0x25, 0x57, 0x72, + 0x21, 0xbf, 0x86, 0xa4, 0xea, 0xf6, 0xfc, 0xa6, 0x73, 0xdb, 0x55, 0xf5, 0x29, 0x11, 0x66, 0x21, + 0x59, 0xa4, 0xf9, 0x6b, 0xa2, 0xf1, 0x10, 0xee, 0x50, 0x9f, 0x84, 0x7d, 0x23, 0xe8, 0xb6, 0x93, + 0x00, 0x37, 0x3c, 0x22, 0xd0, 0x22, 0x97, 0xb1, 0xcc, 0x15, 0x46, 0xbe, 0xa0, 0x1e, 0x99, 0x33, + 0xfc, 0x7d, 0x93, 0x81, 0xe3, 0x0e, 0xf1, 0xd0, 0xac, 0x4f, 0x7f, 0x95, 0x05, 0xc5, 0xbd, 0x90, + 0xf9, 0x07, 0xcc, 0x56, 0x5f, 0x80, 0x52, 0x92, 0x8f, 0x83, 0x04, 0xaa, 0x28, 0x75, 0xa5, 0x51, + 0xde, 0xfd, 0x13, 0x4e, 0x0a, 0x3a, 0xc6, 0xc2, 0xa0, 0xdb, 0x4e, 0x02, 0x1c, 0x26, 0x6a, 0xd8, + 0xdb, 0x81, 0x8f, 0xed, 0x53, 0x82, 0xc5, 0x23, 0x22, 0x90, 0xa9, 0x9e, 0x0d, 0x6a, 0x99, 0x78, + 0x50, 0x03, 0x93, 0x98, 0x35, 0xa6, 0xaa, 0xfb, 0x20, 0xc7, 0x03, 0x82, 0x2b, 0x59, 0x49, 0xff, + 0x15, 0x2e, 0x6d, 0x17, 0x4c, 0x73, 0x6a, 0x06, 0x04, 0x9b, 0xdf, 0xa4, 0xcc, 0x5c, 0xb2, 0xb2, + 0x24, 0x41, 0x3d, 0x02, 0x05, 0x2e, 0x90, 0x88, 0x78, 0x65, 0x45, 0xb2, 0x1a, 0xb7, 0x60, 0x49, + 0xbd, 0xf9, 0x6d, 0x4a, 0x2b, 0x0c, 0xd7, 0x56, 0xca, 0xd1, 0xdf, 0x29, 0xa0, 0x9c, 0x2a, 0x0f, + 0x29, 0x17, 0xea, 0xc9, 0x5c, 0x35, 0xe0, 0xed, 0xaa, 0x91, 0xb8, 0x65, 0x2d, 0xd6, 0xd3, 0x9d, + 0x4a, 0xa3, 0xc8, 0x54, 0x25, 0xee, 0x83, 0x3c, 0x15, 0xc4, 0xe3, 0x95, 0x6c, 0x7d, 0xa5, 0x51, + 0xde, 0xd5, 0x6f, 0x4e, 0xdf, 0x5c, 0x4b, 0x71, 0xf9, 0x07, 0x89, 0xd1, 0x1a, 0xfa, 0xf5, 0x37, + 0xb9, 0x71, 0xda, 0x49, 0x79, 0xd4, 0xdf, 0x41, 0x29, 0x69, 0xb5, 0x13, 0xb9, 0x44, 0xa6, 0xbd, + 0x3a, 0x49, 0xa3, 0x99, 0xc6, 0xad, 0xb1, 0x42, 0x7d, 0x02, 0xb6, 0xb8, 0x40, 0xa1, 0xa0, 0x7e, + 0xfb, 0x7f, 0x82, 0x1c, 0x97, 0xfa, 0xa4, 0x49, 0x30, 0xf3, 0x1d, 0x2e, 0x7b, 0xb4, 0x62, 0xfe, + 0x14, 0x0f, 0x6a, 0x5b, 0xcd, 0xc5, 0x12, 0x6b, 0x99, 0x57, 0x3d, 0x01, 0x1b, 0x98, 0xf9, 0x38, + 0x0a, 0x43, 0xe2, 0xe3, 0xfe, 0x11, 0x73, 0x29, 0xee, 0xcb, 0x46, 0xad, 0x9a, 0x30, 0xcd, 0x66, + 0x63, 0x6f, 0x56, 0xf0, 0x79, 0x51, 0xd0, 0x9a, 0x07, 0xa9, 0xbf, 0x80, 0x22, 0x8f, 0x78, 0x40, + 0x7c, 0xa7, 0x92, 0xab, 0x2b, 0x8d, 0x92, 0x59, 0x8e, 0x07, 0xb5, 0x62, 0x73, 0x18, 0xb2, 0x46, + 0xff, 0x54, 0x04, 0xca, 0xa7, 0xcc, 0x3e, 0x26, 0x5e, 0xe0, 0x22, 0x41, 0x2a, 0x79, 0xd9, 0xc3, + 0xdf, 0xae, 0x29, 0xf4, 0xc1, 0x44, 0x2d, 0xe7, 0xee, 0xfb, 0x34, 0xd5, 0xf2, 0xd4, 0x0f, 0x6b, + 0x9a, 0xa9, 0x3e, 0x07, 0x55, 0x1e, 0x61, 0x4c, 0x38, 0x6f, 0x45, 0xee, 0x01, 0xb3, 0xf9, 0x3e, + 0xe5, 0x82, 0x85, 0xfd, 0x43, 0xea, 0x51, 0x51, 0x29, 0xd4, 0x95, 0x46, 0xde, 0xd4, 0xe2, 0x41, + 0xad, 0xda, 0x5c, 0xaa, 0xb2, 0xae, 0x21, 0xa8, 0x16, 0xd8, 0x6c, 0x21, 0xea, 0x12, 0x67, 0x8e, + 0x5d, 0x94, 0xec, 0x6a, 0x3c, 0xa8, 0x6d, 0xde, 0x5b, 0xa8, 0xb0, 0x96, 0x38, 0xf5, 0x0f, 0x0a, + 0x58, 0xbb, 0x72, 0x23, 0xd4, 0x87, 0xa0, 0x80, 0xb0, 0xa0, 0xbd, 0x64, 0x60, 0x92, 0x61, 0xdc, + 0x9e, 0xae, 0x51, 0xf2, 0xae, 0x4d, 0xee, 0xb8, 0x45, 0x5a, 0x24, 0x69, 0x05, 0x99, 0x5c, 0xa3, + 0xff, 0xa4, 0xd5, 0x4a, 0x11, 0xaa, 0x0b, 0xd6, 0x5d, 0xc4, 0xc5, 0x68, 0xd6, 0x8e, 0xa9, 0x47, + 0x64, 0x97, 0xae, 0x96, 0xfe, 0x9a, 0xeb, 0x93, 0x38, 0xcc, 0x1f, 0xe2, 0x41, 0x6d, 0xfd, 0x70, + 0x86, 0x63, 0xcd, 0x91, 0xf5, 0xf7, 0x0a, 0x98, 0xee, 0xce, 0x1d, 0x3c, 0x61, 0x4f, 0x41, 0x49, + 0x8c, 0x46, 0x2a, 0xfb, 0xd5, 0x23, 0x35, 0xbe, 0x8b, 0xe3, 0x79, 0x1a, 0xd3, 0xf4, 0xb7, 0x0a, + 0xf8, 0x6e, 0x46, 0x7f, 0x07, 0xe7, 0xf9, 0xf7, 0xca, 0x93, 0xfc, 0xf3, 0x82, 0xb3, 0xc8, 0x53, + 0x2c, 0x7b, 0x88, 0x4d, 0x78, 0x76, 0xa9, 0x65, 0xce, 0x2f, 0xb5, 0xcc, 0xc5, 0xa5, 0x96, 0x79, + 0x19, 0x6b, 0xca, 0x59, 0xac, 0x29, 0xe7, 0xb1, 0xa6, 0x5c, 0xc4, 0x9a, 0xf2, 0x31, 0xd6, 0x94, + 0xd7, 0x9f, 0xb4, 0xcc, 0xb3, 0xd2, 0xa8, 0x22, 0x5f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x20, 0x1c, + 0xcf, 0x94, 0xe7, 0x07, 0x00, 0x00, +} + func (m *CronJob) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -99,41 +290,52 @@ func (m *CronJob) Marshal() (dAtA []byte, err error) { } func (m *CronJob) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJob) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -141,37 +343,46 @@ func (m *CronJobList) Marshal() (dAtA []byte, err error) { } func (m *CronJobList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -179,58 +390,67 @@ func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { } func (m *CronJobSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) - i += copy(dAtA[i:], m.Schedule) - if m.StartingDeadlineSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) + if m.FailedJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + i-- + dAtA[i] = 0x38 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) - i += copy(dAtA[i:], m.ConcurrencyPolicy) + if m.SuccessfulJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + { + size, err := m.JobTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a if m.Suspend != nil { - dAtA[i] = 0x20 - i++ + i-- if *m.Suspend { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.JobTemplate.Size())) - n5, err := m.JobTemplate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.SuccessfulJobsHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + i-- + dAtA[i] = 0x20 } - if m.FailedJobsHistoryLimit != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + i -= len(m.ConcurrencyPolicy) + copy(dAtA[i:], m.ConcurrencyPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) + i-- + dAtA[i] = 0x1a + if m.StartingDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) + i-- + dAtA[i] = 0x10 } - return i, nil + i -= len(m.Schedule) + copy(dAtA[i:], m.Schedule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -238,39 +458,48 @@ func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { } func (m *CronJobStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Active) > 0 { - for _, msg := range m.Active { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.LastScheduleTime != nil { + { + size, err := m.LastScheduleTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.LastScheduleTime != nil { + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScheduleTime.Size())) - n6, err := m.LastScheduleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if len(m.Active) > 0 { + for iNdEx := len(m.Active) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Active[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n6 } - return i, nil + return len(dAtA) - i, nil } func (m *JobTemplate) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -278,33 +507,42 @@ func (m *JobTemplate) Marshal() (dAtA []byte, err error) { } func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -312,39 +550,53 @@ func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { } func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CronJob) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -357,6 +609,9 @@ func (m *CronJob) Size() (n int) { } func (m *CronJobList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -371,6 +626,9 @@ func (m *CronJobList) Size() (n int) { } func (m *CronJobSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Schedule) @@ -395,6 +653,9 @@ func (m *CronJobSpec) Size() (n int) { } func (m *CronJobStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Active) > 0 { @@ -411,6 +672,9 @@ func (m *CronJobStatus) Size() (n int) { } func (m *JobTemplate) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -421,6 +685,9 @@ func (m *JobTemplate) Size() (n int) { } func (m *JobTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -431,14 +698,7 @@ func (m *JobTemplateSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -448,7 +708,7 @@ func (this *CronJob) String() string { return "nil" } s := strings.Join([]string{`&CronJob{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronJobSpec", "CronJobSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronJobStatus", "CronJobStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -459,9 +719,14 @@ func (this *CronJobList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]CronJob{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CronJob", "CronJob", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&CronJobList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CronJob", "CronJob", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -486,9 +751,14 @@ func (this *CronJobStatus) String() string { if this == nil { return "nil" } + repeatedStringForActive := "[]ObjectReference{" + for _, f := range this.Active { + repeatedStringForActive += fmt.Sprintf("%v", f) + "," + } + repeatedStringForActive += "}" s := strings.Join([]string{`&CronJobStatus{`, - `Active:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Active), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, - `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `Active:` + repeatedStringForActive + `,`, + `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "v1.Time", 1) + `,`, `}`, }, "") return s @@ -498,7 +768,7 @@ func (this *JobTemplate) String() string { return "nil" } s := strings.Join([]string{`&JobTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -509,8 +779,8 @@ func (this *JobTemplateSpec) String() string { return "nil" } s := strings.Join([]string{`&JobTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "k8s_io_api_batch_v1.JobSpec", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Spec), "JobSpec", "v12.JobSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -538,7 +808,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -566,7 +836,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -575,6 +845,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -596,7 +869,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -605,6 +878,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -626,7 +902,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -635,6 +911,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -651,6 +930,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -678,7 +960,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -706,7 +988,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -715,6 +997,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -736,7 +1021,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -745,6 +1030,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -762,6 +1050,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -789,7 +1080,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -817,7 +1108,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -827,6 +1118,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -846,7 +1140,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -866,7 +1160,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -876,6 +1170,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -895,7 +1192,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -916,7 +1213,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -925,6 +1222,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -946,7 +1246,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -966,7 +1266,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -981,6 +1281,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1008,7 +1311,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1036,7 +1339,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1045,10 +1348,13 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Active = append(m.Active, k8s_io_api_core_v1.ObjectReference{}) + m.Active = append(m.Active, v11.ObjectReference{}) if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1067,7 +1373,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1076,11 +1382,14 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScheduleTime == nil { - m.LastScheduleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.LastScheduleTime = &v1.Time{} } if err := m.LastScheduleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1095,6 +1404,9 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1122,7 +1434,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1150,7 +1462,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1159,6 +1471,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1180,7 +1495,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1189,6 +1504,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1205,6 +1523,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1232,7 +1553,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1260,7 +1581,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1269,6 +1590,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1290,7 +1614,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1299,6 +1623,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1315,6 +1642,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1381,10 +1711,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1413,6 +1746,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1431,60 +1767,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 774 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x6f, 0xdb, 0x36, - 0x18, 0xc7, 0x2d, 0xc7, 0x6f, 0xa1, 0x97, 0x2d, 0xd1, 0x86, 0xc4, 0xf3, 0x06, 0xd9, 0x50, 0xb0, - 0xc1, 0x18, 0x36, 0x6a, 0x09, 0x86, 0x61, 0xa7, 0x01, 0x53, 0x86, 0x36, 0x4d, 0x53, 0x34, 0x90, - 0x53, 0xa0, 0x28, 0x82, 0xa2, 0x14, 0x45, 0xdb, 0x8c, 0x25, 0x51, 0x10, 0x29, 0x03, 0xbe, 0xf5, - 0xd6, 0x6b, 0x3f, 0x49, 0x2f, 0xed, 0x87, 0x48, 0x7b, 0xca, 0x31, 0x27, 0xa3, 0x51, 0xbf, 0x45, - 0x4f, 0x85, 0x68, 0xf9, 0x25, 0x7e, 0x49, 0xd2, 0x4b, 0x6e, 0xe2, 0xa3, 0xff, 0xff, 0xc7, 0x87, - 0xcf, 0xf3, 0x90, 0xc0, 0xec, 0xfe, 0xc3, 0x21, 0x65, 0x46, 0x37, 0xb2, 0x49, 0xe8, 0x13, 0x41, - 0xb8, 0xd1, 0x23, 0xbe, 0xc3, 0x42, 0x23, 0xfd, 0x81, 0x02, 0x6a, 0xd8, 0x48, 0xe0, 0x8e, 0xd1, - 0xdb, 0x45, 0x6e, 0xd0, 0x41, 0x3b, 0x46, 0x9b, 0xf8, 0x24, 0x44, 0x82, 0x38, 0x30, 0x08, 0x99, - 0x60, 0xea, 0x8f, 0x43, 0x29, 0x44, 0x01, 0x85, 0x52, 0x0a, 0x47, 0xd2, 0xea, 0x1f, 0x6d, 0x2a, - 0x3a, 0x91, 0x0d, 0x31, 0xf3, 0x8c, 0x36, 0x6b, 0x33, 0x43, 0x3a, 0xec, 0xa8, 0x25, 0x57, 0x72, - 0x21, 0xbf, 0x86, 0xa4, 0xea, 0xf6, 0xfc, 0xa6, 0x73, 0xdb, 0x55, 0xf5, 0x29, 0x11, 0x66, 0x21, - 0x59, 0xa4, 0xf9, 0x6b, 0xa2, 0xf1, 0x10, 0xee, 0x50, 0x9f, 0x84, 0x7d, 0x23, 0xe8, 0xb6, 0x93, - 0x00, 0x37, 0x3c, 0x22, 0xd0, 0x22, 0x97, 0xb1, 0xcc, 0x15, 0x46, 0xbe, 0xa0, 0x1e, 0x99, 0x33, - 0xfc, 0x7d, 0x93, 0x81, 0xe3, 0x0e, 0xf1, 0xd0, 0xac, 0x4f, 0x7f, 0x95, 0x05, 0xc5, 0xbd, 0x90, - 0xf9, 0x07, 0xcc, 0x56, 0x5f, 0x80, 0x52, 0x92, 0x8f, 0x83, 0x04, 0xaa, 0x28, 0x75, 0xa5, 0x51, - 0xde, 0xfd, 0x13, 0x4e, 0x0a, 0x3a, 0xc6, 0xc2, 0xa0, 0xdb, 0x4e, 0x02, 0x1c, 0x26, 0x6a, 0xd8, - 0xdb, 0x81, 0x8f, 0xed, 0x53, 0x82, 0xc5, 0x23, 0x22, 0x90, 0xa9, 0x9e, 0x0d, 0x6a, 0x99, 0x78, - 0x50, 0x03, 0x93, 0x98, 0x35, 0xa6, 0xaa, 0xfb, 0x20, 0xc7, 0x03, 0x82, 0x2b, 0x59, 0x49, 0xff, - 0x15, 0x2e, 0x6d, 0x17, 0x4c, 0x73, 0x6a, 0x06, 0x04, 0x9b, 0xdf, 0xa4, 0xcc, 0x5c, 0xb2, 0xb2, - 0x24, 0x41, 0x3d, 0x02, 0x05, 0x2e, 0x90, 0x88, 0x78, 0x65, 0x45, 0xb2, 0x1a, 0xb7, 0x60, 0x49, - 0xbd, 0xf9, 0x6d, 0x4a, 0x2b, 0x0c, 0xd7, 0x56, 0xca, 0xd1, 0xdf, 0x29, 0xa0, 0x9c, 0x2a, 0x0f, - 0x29, 0x17, 0xea, 0xc9, 0x5c, 0x35, 0xe0, 0xed, 0xaa, 0x91, 0xb8, 0x65, 0x2d, 0xd6, 0xd3, 0x9d, - 0x4a, 0xa3, 0xc8, 0x54, 0x25, 0xee, 0x83, 0x3c, 0x15, 0xc4, 0xe3, 0x95, 0x6c, 0x7d, 0xa5, 0x51, - 0xde, 0xd5, 0x6f, 0x4e, 0xdf, 0x5c, 0x4b, 0x71, 0xf9, 0x07, 0x89, 0xd1, 0x1a, 0xfa, 0xf5, 0x37, - 0xb9, 0x71, 0xda, 0x49, 0x79, 0xd4, 0xdf, 0x41, 0x29, 0x69, 0xb5, 0x13, 0xb9, 0x44, 0xa6, 0xbd, - 0x3a, 0x49, 0xa3, 0x99, 0xc6, 0xad, 0xb1, 0x42, 0x7d, 0x02, 0xb6, 0xb8, 0x40, 0xa1, 0xa0, 0x7e, - 0xfb, 0x7f, 0x82, 0x1c, 0x97, 0xfa, 0xa4, 0x49, 0x30, 0xf3, 0x1d, 0x2e, 0x7b, 0xb4, 0x62, 0xfe, - 0x14, 0x0f, 0x6a, 0x5b, 0xcd, 0xc5, 0x12, 0x6b, 0x99, 0x57, 0x3d, 0x01, 0x1b, 0x98, 0xf9, 0x38, - 0x0a, 0x43, 0xe2, 0xe3, 0xfe, 0x11, 0x73, 0x29, 0xee, 0xcb, 0x46, 0xad, 0x9a, 0x30, 0xcd, 0x66, - 0x63, 0x6f, 0x56, 0xf0, 0x79, 0x51, 0xd0, 0x9a, 0x07, 0xa9, 0xbf, 0x80, 0x22, 0x8f, 0x78, 0x40, - 0x7c, 0xa7, 0x92, 0xab, 0x2b, 0x8d, 0x92, 0x59, 0x8e, 0x07, 0xb5, 0x62, 0x73, 0x18, 0xb2, 0x46, - 0xff, 0x54, 0x04, 0xca, 0xa7, 0xcc, 0x3e, 0x26, 0x5e, 0xe0, 0x22, 0x41, 0x2a, 0x79, 0xd9, 0xc3, - 0xdf, 0xae, 0x29, 0xf4, 0xc1, 0x44, 0x2d, 0xe7, 0xee, 0xfb, 0x34, 0xd5, 0xf2, 0xd4, 0x0f, 0x6b, - 0x9a, 0xa9, 0x3e, 0x07, 0x55, 0x1e, 0x61, 0x4c, 0x38, 0x6f, 0x45, 0xee, 0x01, 0xb3, 0xf9, 0x3e, - 0xe5, 0x82, 0x85, 0xfd, 0x43, 0xea, 0x51, 0x51, 0x29, 0xd4, 0x95, 0x46, 0xde, 0xd4, 0xe2, 0x41, - 0xad, 0xda, 0x5c, 0xaa, 0xb2, 0xae, 0x21, 0xa8, 0x16, 0xd8, 0x6c, 0x21, 0xea, 0x12, 0x67, 0x8e, - 0x5d, 0x94, 0xec, 0x6a, 0x3c, 0xa8, 0x6d, 0xde, 0x5b, 0xa8, 0xb0, 0x96, 0x38, 0xf5, 0x0f, 0x0a, - 0x58, 0xbb, 0x72, 0x23, 0xd4, 0x87, 0xa0, 0x80, 0xb0, 0xa0, 0xbd, 0x64, 0x60, 0x92, 0x61, 0xdc, - 0x9e, 0xae, 0x51, 0xf2, 0xae, 0x4d, 0xee, 0xb8, 0x45, 0x5a, 0x24, 0x69, 0x05, 0x99, 0x5c, 0xa3, - 0xff, 0xa4, 0xd5, 0x4a, 0x11, 0xaa, 0x0b, 0xd6, 0x5d, 0xc4, 0xc5, 0x68, 0xd6, 0x8e, 0xa9, 0x47, - 0x64, 0x97, 0xae, 0x96, 0xfe, 0x9a, 0xeb, 0x93, 0x38, 0xcc, 0x1f, 0xe2, 0x41, 0x6d, 0xfd, 0x70, - 0x86, 0x63, 0xcd, 0x91, 0xf5, 0xf7, 0x0a, 0x98, 0xee, 0xce, 0x1d, 0x3c, 0x61, 0x4f, 0x41, 0x49, - 0x8c, 0x46, 0x2a, 0xfb, 0xd5, 0x23, 0x35, 0xbe, 0x8b, 0xe3, 0x79, 0x1a, 0xd3, 0xf4, 0xb7, 0x0a, - 0xf8, 0x6e, 0x46, 0x7f, 0x07, 0xe7, 0xf9, 0xf7, 0xca, 0x93, 0xfc, 0xf3, 0x82, 0xb3, 0xc8, 0x53, - 0x2c, 0x7b, 0x88, 0x4d, 0x78, 0x76, 0xa9, 0x65, 0xce, 0x2f, 0xb5, 0xcc, 0xc5, 0xa5, 0x96, 0x79, - 0x19, 0x6b, 0xca, 0x59, 0xac, 0x29, 0xe7, 0xb1, 0xa6, 0x5c, 0xc4, 0x9a, 0xf2, 0x31, 0xd6, 0x94, - 0xd7, 0x9f, 0xb4, 0xcc, 0xb3, 0xd2, 0xa8, 0x22, 0x5f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x20, 0x1c, - 0xcf, 0x94, 0xe7, 0x07, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.proto b/vendor/k8s.io/api/batch/v2alpha1/generated.proto index 4321c3361..0bba13b86 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.proto +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.proto @@ -33,17 +33,17 @@ option go_package = "v2alpha1"; // CronJob represents the configuration of a single cron job. message CronJob { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional CronJobSpec spec = 2; // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional CronJobStatus status = 3; } @@ -51,7 +51,7 @@ message CronJob { // CronJobList is a collection of cron jobs. message CronJobList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -110,12 +110,12 @@ message CronJobStatus { // JobTemplate describes a template for creating copies of a predefined pod. message JobTemplate { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional JobTemplateSpec template = 2; } @@ -123,12 +123,12 @@ message JobTemplate { // JobTemplateSpec describes the data a Job should have when created from a template message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional k8s.io.api.batch.v1.JobSpec spec = 2; } diff --git a/vendor/k8s.io/api/batch/v2alpha1/types.go b/vendor/k8s.io/api/batch/v2alpha1/types.go index cccff94ff..465e614ae 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/types.go +++ b/vendor/k8s.io/api/batch/v2alpha1/types.go @@ -28,12 +28,12 @@ import ( type JobTemplate struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` } @@ -41,12 +41,12 @@ type JobTemplate struct { // JobTemplateSpec describes the data a Job should have when created from a template type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -58,17 +58,17 @@ type JobTemplateSpec struct { type CronJob struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -80,7 +80,7 @@ type CronJobList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go index f448a92cf..bc80eca48 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go @@ -29,9 +29,9 @@ package v2alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ "": "CronJob represents the configuration of a single cron job.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (CronJob) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (CronJob) SwaggerDoc() map[string]string { var map_CronJobList = map[string]string{ "": "CronJobList is a collection of cron jobs.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of CronJobs.", } @@ -75,8 +75,8 @@ func (CronJobStatus) SwaggerDoc() map[string]string { var map_JobTemplate = map[string]string{ "": "JobTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (JobTemplate) SwaggerDoc() map[string]string { @@ -85,8 +85,8 @@ func (JobTemplate) SwaggerDoc() map[string]string { var map_JobTemplateSpec = map[string]string{ "": "JobTemplateSpec describes the data a Job should have when created from a template", - "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (JobTemplateSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go index 20d87e7e7..1b03f6745 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go @@ -57,7 +57,7 @@ func (in *CronJob) DeepCopyObject() runtime.Object { func (in *CronJobList) DeepCopyInto(out *CronJobList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CronJob, len(*in)) diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go index 19bf225fa..2e61b568e 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go @@ -17,32 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto - - It has these top-level messages: - CertificateSigningRequest - CertificateSigningRequestCondition - CertificateSigningRequestList - CertificateSigningRequestSpec - CertificateSigningRequestStatus - ExtraValue -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -58,49 +47,244 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} } func (*CertificateSigningRequest) ProtoMessage() {} func (*CertificateSigningRequest) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_09d156762b8218ef, []int{0} +} +func (m *CertificateSigningRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequest.Merge(m, src) +} +func (m *CertificateSigningRequest) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequest.DiscardUnknown(m) } +var xxx_messageInfo_CertificateSigningRequest proto.InternalMessageInfo + func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} } func (*CertificateSigningRequestCondition) ProtoMessage() {} func (*CertificateSigningRequestCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptor_09d156762b8218ef, []int{1} +} +func (m *CertificateSigningRequestCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequestCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequestCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequestCondition.Merge(m, src) +} +func (m *CertificateSigningRequestCondition) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequestCondition) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequestCondition.DiscardUnknown(m) } +var xxx_messageInfo_CertificateSigningRequestCondition proto.InternalMessageInfo + func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} } func (*CertificateSigningRequestList) ProtoMessage() {} func (*CertificateSigningRequestList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} + return fileDescriptor_09d156762b8218ef, []int{2} +} +func (m *CertificateSigningRequestList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *CertificateSigningRequestList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequestList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequestList.Merge(m, src) +} +func (m *CertificateSigningRequestList) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequestList) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequestList.DiscardUnknown(m) +} + +var xxx_messageInfo_CertificateSigningRequestList proto.InternalMessageInfo func (m *CertificateSigningRequestSpec) Reset() { *m = CertificateSigningRequestSpec{} } func (*CertificateSigningRequestSpec) ProtoMessage() {} func (*CertificateSigningRequestSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} + return fileDescriptor_09d156762b8218ef, []int{3} } +func (m *CertificateSigningRequestSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequestSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequestSpec.Merge(m, src) +} +func (m *CertificateSigningRequestSpec) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequestSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequestSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CertificateSigningRequestSpec proto.InternalMessageInfo func (m *CertificateSigningRequestStatus) Reset() { *m = CertificateSigningRequestStatus{} } func (*CertificateSigningRequestStatus) ProtoMessage() {} func (*CertificateSigningRequestStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptor_09d156762b8218ef, []int{4} +} +func (m *CertificateSigningRequestStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequestStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequestStatus.Merge(m, src) +} +func (m *CertificateSigningRequestStatus) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequestStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequestStatus.DiscardUnknown(m) } -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_CertificateSigningRequestStatus proto.InternalMessageInfo + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_09d156762b8218ef, []int{5} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo func init() { proto.RegisterType((*CertificateSigningRequest)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequest") proto.RegisterType((*CertificateSigningRequestCondition)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestCondition") proto.RegisterType((*CertificateSigningRequestList)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestList") proto.RegisterType((*CertificateSigningRequestSpec)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec.ExtraEntry") proto.RegisterType((*CertificateSigningRequestStatus)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestStatus") proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.ExtraValue") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto", fileDescriptor_09d156762b8218ef) +} + +var fileDescriptor_09d156762b8218ef = []byte{ + // 805 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4b, 0x8f, 0x1b, 0x45, + 0x10, 0xf6, 0xf8, 0xb5, 0x76, 0x7b, 0xd9, 0x44, 0x2d, 0x14, 0x0d, 0x2b, 0x65, 0x66, 0x35, 0x02, + 0xb4, 0x3c, 0xd2, 0xc3, 0x46, 0x08, 0x56, 0x7b, 0x40, 0x30, 0x4b, 0x04, 0x2b, 0x12, 0x21, 0x75, + 0x62, 0x0e, 0x08, 0x89, 0xb4, 0xc7, 0x95, 0x71, 0xc7, 0x99, 0x07, 0xd3, 0x3d, 0x06, 0xdf, 0xf2, + 0x13, 0x38, 0x72, 0x41, 0xe2, 0x97, 0x70, 0x5e, 0x0e, 0x48, 0x39, 0xe6, 0x80, 0x2c, 0xd6, 0xfc, + 0x8b, 0x9c, 0x50, 0xf7, 0xb4, 0x3d, 0xc6, 0x2b, 0xe3, 0x28, 0x7b, 0x9b, 0xfa, 0xaa, 0xbe, 0xaf, + 0x1e, 0x5d, 0x35, 0xe8, 0xcb, 0xf1, 0xb1, 0x20, 0x3c, 0xf5, 0xc7, 0xc5, 0x00, 0xf2, 0x04, 0x24, + 0x08, 0x7f, 0x02, 0xc9, 0x30, 0xcd, 0x7d, 0xe3, 0x60, 0x19, 0xf7, 0x43, 0xc8, 0x25, 0x7f, 0xc4, + 0x43, 0xa6, 0xdd, 0x47, 0x03, 0x90, 0xec, 0xc8, 0x8f, 0x20, 0x81, 0x9c, 0x49, 0x18, 0x92, 0x2c, + 0x4f, 0x65, 0x8a, 0xdd, 0x92, 0x40, 0x58, 0xc6, 0xc9, 0x2a, 0x81, 0x18, 0xc2, 0xfe, 0xad, 0x88, + 0xcb, 0x51, 0x31, 0x20, 0x61, 0x1a, 0xfb, 0x51, 0x1a, 0xa5, 0xbe, 0xe6, 0x0d, 0x8a, 0x47, 0xda, + 0xd2, 0x86, 0xfe, 0x2a, 0xf5, 0xf6, 0x3f, 0xac, 0x0a, 0x88, 0x59, 0x38, 0xe2, 0x09, 0xe4, 0x53, + 0x3f, 0x1b, 0x47, 0x0a, 0x10, 0x7e, 0x0c, 0x92, 0xf9, 0x93, 0x4b, 0x55, 0xec, 0xfb, 0x9b, 0x58, + 0x79, 0x91, 0x48, 0x1e, 0xc3, 0x25, 0xc2, 0x47, 0xdb, 0x08, 0x22, 0x1c, 0x41, 0xcc, 0xd6, 0x79, + 0xde, 0x1f, 0x75, 0xf4, 0xc6, 0x69, 0xd5, 0xe6, 0x7d, 0x1e, 0x25, 0x3c, 0x89, 0x28, 0xfc, 0x50, + 0x80, 0x90, 0xf8, 0x21, 0xea, 0xa8, 0x0a, 0x87, 0x4c, 0x32, 0xdb, 0x3a, 0xb0, 0x0e, 0x7b, 0xb7, + 0x3f, 0x20, 0xd5, 0x7c, 0x96, 0x89, 0x48, 0x36, 0x8e, 0x14, 0x20, 0x88, 0x8a, 0x26, 0x93, 0x23, + 0xf2, 0xf5, 0xe0, 0x31, 0x84, 0xf2, 0x1e, 0x48, 0x16, 0xe0, 0xf3, 0x99, 0x5b, 0x9b, 0xcf, 0x5c, + 0x54, 0x61, 0x74, 0xa9, 0x8a, 0x1f, 0xa2, 0xa6, 0xc8, 0x20, 0xb4, 0xeb, 0x5a, 0xfd, 0x13, 0xb2, + 0x65, 0xfa, 0x64, 0x63, 0xad, 0xf7, 0x33, 0x08, 0x83, 0x5d, 0x93, 0xab, 0xa9, 0x2c, 0xaa, 0x95, + 0xf1, 0x08, 0xb5, 0x85, 0x64, 0xb2, 0x10, 0x76, 0x43, 0xe7, 0xf8, 0xf4, 0x0a, 0x39, 0xb4, 0x4e, + 0xb0, 0x67, 0xb2, 0xb4, 0x4b, 0x9b, 0x1a, 0x7d, 0xef, 0xd7, 0x3a, 0xf2, 0x36, 0x72, 0x4f, 0xd3, + 0x64, 0xc8, 0x25, 0x4f, 0x13, 0x7c, 0x8c, 0x9a, 0x72, 0x9a, 0x81, 0x1e, 0x68, 0x37, 0x78, 0x73, + 0x51, 0xf2, 0x83, 0x69, 0x06, 0x2f, 0x66, 0xee, 0xeb, 0xeb, 0xf1, 0x0a, 0xa7, 0x9a, 0x81, 0xdf, + 0x46, 0xed, 0x1c, 0x98, 0x48, 0x13, 0x3d, 0xae, 0x6e, 0x55, 0x08, 0xd5, 0x28, 0x35, 0x5e, 0xfc, + 0x0e, 0xda, 0x89, 0x41, 0x08, 0x16, 0x81, 0xee, 0xb9, 0x1b, 0x5c, 0x33, 0x81, 0x3b, 0xf7, 0x4a, + 0x98, 0x2e, 0xfc, 0xf8, 0x31, 0xda, 0x7b, 0xc2, 0x84, 0xec, 0x67, 0x43, 0x26, 0xe1, 0x01, 0x8f, + 0xc1, 0x6e, 0xea, 0x29, 0xbd, 0xfb, 0x72, 0xef, 0xac, 0x18, 0xc1, 0x0d, 0xa3, 0xbe, 0x77, 0xf7, + 0x3f, 0x4a, 0x74, 0x4d, 0xd9, 0x9b, 0x59, 0xe8, 0xe6, 0xc6, 0xf9, 0xdc, 0xe5, 0x42, 0xe2, 0xef, + 0x2e, 0xed, 0x1b, 0x79, 0xb9, 0x3a, 0x14, 0x5b, 0x6f, 0xdb, 0x75, 0x53, 0x4b, 0x67, 0x81, 0xac, + 0xec, 0xda, 0xf7, 0xa8, 0xc5, 0x25, 0xc4, 0xc2, 0xae, 0x1f, 0x34, 0x0e, 0x7b, 0xb7, 0x4f, 0x5e, + 0x7d, 0x11, 0x82, 0xd7, 0x4c, 0x9a, 0xd6, 0x99, 0x12, 0xa4, 0xa5, 0xae, 0xf7, 0x7b, 0xe3, 0x7f, + 0x1a, 0x54, 0x2b, 0x89, 0xdf, 0x42, 0x3b, 0x79, 0x69, 0xea, 0xfe, 0x76, 0x83, 0x9e, 0x7a, 0x15, + 0x13, 0x41, 0x17, 0x3e, 0x4c, 0x50, 0xbb, 0x50, 0xcf, 0x23, 0xec, 0xd6, 0x41, 0xe3, 0xb0, 0x1b, + 0xdc, 0x50, 0x8f, 0xdc, 0xd7, 0xc8, 0x8b, 0x99, 0xdb, 0xf9, 0x0a, 0xa6, 0xda, 0xa0, 0x26, 0x0a, + 0xbf, 0x8f, 0x3a, 0x85, 0x80, 0x3c, 0x61, 0x31, 0x98, 0xd5, 0x58, 0xce, 0xa1, 0x6f, 0x70, 0xba, + 0x8c, 0xc0, 0x37, 0x51, 0xa3, 0xe0, 0x43, 0xb3, 0x1a, 0x3d, 0x13, 0xd8, 0xe8, 0x9f, 0x7d, 0x4e, + 0x15, 0x8e, 0x3d, 0xd4, 0x8e, 0xf2, 0xb4, 0xc8, 0x84, 0xdd, 0xd4, 0xc9, 0x91, 0x4a, 0xfe, 0x85, + 0x46, 0xa8, 0xf1, 0xe0, 0x04, 0xb5, 0xe0, 0x27, 0x99, 0x33, 0xbb, 0xad, 0x47, 0x79, 0x76, 0xb5, + 0xbb, 0x25, 0x77, 0x94, 0xd6, 0x9d, 0x44, 0xe6, 0xd3, 0x6a, 0xb2, 0x1a, 0xa3, 0x65, 0x9a, 0x7d, + 0x40, 0xa8, 0x8a, 0xc1, 0xd7, 0x51, 0x63, 0x0c, 0xd3, 0xf2, 0x80, 0xa8, 0xfa, 0xc4, 0x9f, 0xa1, + 0xd6, 0x84, 0x3d, 0x29, 0xc0, 0xfc, 0x47, 0xde, 0xdb, 0x5a, 0x8f, 0x56, 0xfb, 0x46, 0x51, 0x68, + 0xc9, 0x3c, 0xa9, 0x1f, 0x5b, 0xde, 0x9f, 0x16, 0x72, 0xb7, 0x5c, 0x3f, 0xfe, 0x11, 0xa1, 0x70, + 0x71, 0x9b, 0xc2, 0xb6, 0x74, 0xff, 0xa7, 0xaf, 0xde, 0xff, 0xf2, 0xce, 0xab, 0x1f, 0xe5, 0x12, + 0x12, 0x74, 0x25, 0x15, 0x3e, 0x42, 0xbd, 0x15, 0x69, 0xdd, 0xe9, 0x6e, 0x70, 0x6d, 0x3e, 0x73, + 0x7b, 0x2b, 0xe2, 0x74, 0x35, 0xc6, 0xfb, 0xd8, 0x8c, 0x4d, 0x37, 0x8a, 0xdd, 0xc5, 0xfe, 0x5b, + 0xfa, 0x5d, 0xbb, 0xeb, 0xfb, 0x7b, 0xd2, 0xf9, 0xe5, 0x37, 0xb7, 0xf6, 0xf4, 0xaf, 0x83, 0x5a, + 0x70, 0xeb, 0xfc, 0xc2, 0xa9, 0x3d, 0xbb, 0x70, 0x6a, 0xcf, 0x2f, 0x9c, 0xda, 0xd3, 0xb9, 0x63, + 0x9d, 0xcf, 0x1d, 0xeb, 0xd9, 0xdc, 0xb1, 0x9e, 0xcf, 0x1d, 0xeb, 0xef, 0xb9, 0x63, 0xfd, 0xfc, + 0x8f, 0x53, 0xfb, 0x76, 0xc7, 0x74, 0xf7, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x39, 0x0e, 0xb6, + 0xcd, 0x7f, 0x07, 0x00, 0x00, +} + func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -108,41 +292,52 @@ func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { } func (m *CertificateSigningRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CertificateSigningRequestCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -150,37 +345,47 @@ func (m *CertificateSigningRequestCondition) Marshal() (dAtA []byte, err error) } func (m *CertificateSigningRequestCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequestCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n4, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CertificateSigningRequestList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -188,37 +393,46 @@ func (m *CertificateSigningRequestList) Marshal() (dAtA []byte, err error) { } func (m *CertificateSigningRequestList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequestList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CertificateSigningRequestSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -226,92 +440,86 @@ func (m *CertificateSigningRequestSpec) Marshal() (dAtA []byte, err error) { } func (m *CertificateSigningRequestSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Request != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) - i += copy(dAtA[i:], m.Request) - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Usages) > 0 { - for _, s := range m.Usages { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if len(m.Extra) > 0 { keysForExtra := make([]string, 0, len(m.Extra)) for k := range m.Extra { keysForExtra = append(keysForExtra, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x32 - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n6, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Usages) > 0 { + for iNdEx := len(m.Usages) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Usages[iNdEx]) + copy(dAtA[i:], m.Usages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Usages[iNdEx]))) + i-- + dAtA[i] = 0x2a } } - return i, nil + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x1a + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0x12 + if m.Request != nil { + i -= len(m.Request) + copy(dAtA[i:], m.Request) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *CertificateSigningRequestStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -319,35 +527,43 @@ func (m *CertificateSigningRequestStatus) Marshal() (dAtA []byte, err error) { } func (m *CertificateSigningRequestStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.Certificate != nil { + i -= len(m.Certificate) + copy(dAtA[i:], m.Certificate) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Certificate))) + i-- + dAtA[i] = 0x12 + } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - if m.Certificate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Certificate))) - i += copy(dAtA[i:], m.Certificate) - } - return i, nil + return len(dAtA) - i, nil } func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -355,38 +571,42 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CertificateSigningRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -399,6 +619,9 @@ func (m *CertificateSigningRequest) Size() (n int) { } func (m *CertificateSigningRequestCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -413,6 +636,9 @@ func (m *CertificateSigningRequestCondition) Size() (n int) { } func (m *CertificateSigningRequestList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -427,6 +653,9 @@ func (m *CertificateSigningRequestList) Size() (n int) { } func (m *CertificateSigningRequestSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Request != nil { @@ -462,6 +691,9 @@ func (m *CertificateSigningRequestSpec) Size() (n int) { } func (m *CertificateSigningRequestStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Conditions) > 0 { @@ -478,6 +710,9 @@ func (m *CertificateSigningRequestStatus) Size() (n int) { } func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -490,14 +725,7 @@ func (m ExtraValue) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -507,7 +735,7 @@ func (this *CertificateSigningRequest) String() string { return "nil" } s := strings.Join([]string{`&CertificateSigningRequest{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CertificateSigningRequestSpec", "CertificateSigningRequestSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CertificateSigningRequestStatus", "CertificateSigningRequestStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -522,7 +750,7 @@ func (this *CertificateSigningRequestCondition) String() string { `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -531,9 +759,14 @@ func (this *CertificateSigningRequestList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]CertificateSigningRequest{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CertificateSigningRequest", "CertificateSigningRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&CertificateSigningRequestList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CertificateSigningRequest", "CertificateSigningRequest", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -567,8 +800,13 @@ func (this *CertificateSigningRequestStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]CertificateSigningRequestCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "CertificateSigningRequestCondition", "CertificateSigningRequestCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&CertificateSigningRequestStatus{`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "CertificateSigningRequestCondition", "CertificateSigningRequestCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `Certificate:` + valueToStringGenerated(this.Certificate) + `,`, `}`, }, "") @@ -597,7 +835,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -625,7 +863,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -634,6 +872,9 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -655,7 +896,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -664,6 +905,9 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -685,7 +929,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -694,6 +938,9 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -710,6 +957,9 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -737,7 +987,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -765,7 +1015,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -775,6 +1025,9 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -794,7 +1047,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -804,6 +1057,9 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -823,7 +1079,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -833,6 +1089,9 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -852,7 +1111,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -861,6 +1120,9 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -877,6 +1139,9 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -904,7 +1169,7 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -932,7 +1197,7 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -941,6 +1206,9 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -962,7 +1230,7 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -971,6 +1239,9 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -988,6 +1259,9 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1015,7 +1289,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1043,7 +1317,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1052,6 +1326,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1074,7 +1351,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1084,6 +1361,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1103,7 +1383,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1113,6 +1393,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1132,7 +1415,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1142,6 +1425,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1161,7 +1447,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1171,6 +1457,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1190,7 +1479,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1199,6 +1488,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1219,7 +1511,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1236,7 +1528,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1246,6 +1538,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -1262,7 +1557,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1271,7 +1566,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -1308,6 +1603,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1335,7 +1633,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1363,7 +1661,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1372,6 +1670,9 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1394,7 +1695,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1403,6 +1704,9 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1420,6 +1724,9 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1447,7 +1754,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1475,7 +1782,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1485,6 +1792,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1499,6 +1809,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1565,10 +1878,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1597,6 +1913,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1615,62 +1934,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 804 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4d, 0x8f, 0xdb, 0x44, - 0x18, 0x8e, 0xf3, 0xb5, 0xc9, 0x64, 0xd9, 0x56, 0x23, 0x54, 0x99, 0x95, 0x6a, 0xaf, 0x2c, 0x40, - 0xcb, 0x47, 0xc7, 0x6c, 0x85, 0x60, 0xb5, 0x07, 0x04, 0x5e, 0x2a, 0x58, 0xd1, 0x0a, 0x69, 0xda, - 0x70, 0x40, 0x48, 0x74, 0xe2, 0xbc, 0x75, 0xa6, 0xa9, 0x3f, 0xf0, 0x8c, 0x03, 0xb9, 0xf5, 0x27, - 0x70, 0xe4, 0x82, 0xc4, 0x2f, 0xe1, 0xbc, 0x1c, 0x90, 0x7a, 0xec, 0x01, 0x45, 0x6c, 0xf8, 0x17, - 0x3d, 0xa1, 0x19, 0x4f, 0xe2, 0x90, 0x55, 0x48, 0xd5, 0xbd, 0x79, 0x9e, 0xf7, 0x79, 0x9e, 0xf7, - 0x63, 0xde, 0x31, 0xfa, 0x72, 0x7c, 0x2c, 0x08, 0x4f, 0xfd, 0x71, 0x31, 0x80, 0x3c, 0x01, 0x09, - 0xc2, 0x9f, 0x40, 0x32, 0x4c, 0x73, 0xdf, 0x04, 0x58, 0xc6, 0xfd, 0x10, 0x72, 0xc9, 0x1f, 0xf1, - 0x90, 0xe9, 0xf0, 0xd1, 0x00, 0x24, 0x3b, 0xf2, 0x23, 0x48, 0x20, 0x67, 0x12, 0x86, 0x24, 0xcb, - 0x53, 0x99, 0x62, 0xb7, 0x14, 0x10, 0x96, 0x71, 0xb2, 0x2a, 0x20, 0x46, 0xb0, 0x7f, 0x2b, 0xe2, - 0x72, 0x54, 0x0c, 0x48, 0x98, 0xc6, 0x7e, 0x94, 0x46, 0xa9, 0xaf, 0x75, 0x83, 0xe2, 0x91, 0x3e, - 0xe9, 0x83, 0xfe, 0x2a, 0xfd, 0xf6, 0x3f, 0xac, 0x0a, 0x88, 0x59, 0x38, 0xe2, 0x09, 0xe4, 0x53, - 0x3f, 0x1b, 0x47, 0x0a, 0x10, 0x7e, 0x0c, 0x92, 0xf9, 0x93, 0x4b, 0x55, 0xec, 0xfb, 0x9b, 0x54, - 0x79, 0x91, 0x48, 0x1e, 0xc3, 0x25, 0xc1, 0x47, 0xdb, 0x04, 0x22, 0x1c, 0x41, 0xcc, 0xd6, 0x75, - 0xde, 0x1f, 0x75, 0xf4, 0xc6, 0x69, 0xd5, 0xe6, 0x7d, 0x1e, 0x25, 0x3c, 0x89, 0x28, 0xfc, 0x50, - 0x80, 0x90, 0xf8, 0x21, 0xea, 0xa8, 0x0a, 0x87, 0x4c, 0x32, 0xdb, 0x3a, 0xb0, 0x0e, 0x7b, 0xb7, - 0x3f, 0x20, 0xd5, 0x7c, 0x96, 0x89, 0x48, 0x36, 0x8e, 0x14, 0x20, 0x88, 0x62, 0x93, 0xc9, 0x11, - 0xf9, 0x7a, 0xf0, 0x18, 0x42, 0x79, 0x0f, 0x24, 0x0b, 0xf0, 0xf9, 0xcc, 0xad, 0xcd, 0x67, 0x2e, - 0xaa, 0x30, 0xba, 0x74, 0xc5, 0x0f, 0x51, 0x53, 0x64, 0x10, 0xda, 0x75, 0xed, 0xfe, 0x09, 0xd9, - 0x32, 0x7d, 0xb2, 0xb1, 0xd6, 0xfb, 0x19, 0x84, 0xc1, 0xae, 0xc9, 0xd5, 0x54, 0x27, 0xaa, 0x9d, - 0xf1, 0x08, 0xb5, 0x85, 0x64, 0xb2, 0x10, 0x76, 0x43, 0xe7, 0xf8, 0xf4, 0x0a, 0x39, 0xb4, 0x4f, - 0xb0, 0x67, 0xb2, 0xb4, 0xcb, 0x33, 0x35, 0xfe, 0xde, 0xaf, 0x75, 0xe4, 0x6d, 0xd4, 0x9e, 0xa6, - 0xc9, 0x90, 0x4b, 0x9e, 0x26, 0xf8, 0x18, 0x35, 0xe5, 0x34, 0x03, 0x3d, 0xd0, 0x6e, 0xf0, 0xe6, - 0xa2, 0xe4, 0x07, 0xd3, 0x0c, 0x5e, 0xcc, 0xdc, 0xd7, 0xd7, 0xf9, 0x0a, 0xa7, 0x5a, 0x81, 0xdf, - 0x46, 0xed, 0x1c, 0x98, 0x48, 0x13, 0x3d, 0xae, 0x6e, 0x55, 0x08, 0xd5, 0x28, 0x35, 0x51, 0xfc, - 0x0e, 0xda, 0x89, 0x41, 0x08, 0x16, 0x81, 0xee, 0xb9, 0x1b, 0x5c, 0x33, 0xc4, 0x9d, 0x7b, 0x25, - 0x4c, 0x17, 0x71, 0xfc, 0x18, 0xed, 0x3d, 0x61, 0x42, 0xf6, 0xb3, 0x21, 0x93, 0xf0, 0x80, 0xc7, - 0x60, 0x37, 0xf5, 0x94, 0xde, 0x7d, 0xb9, 0x7b, 0x56, 0x8a, 0xe0, 0x86, 0x71, 0xdf, 0xbb, 0xfb, - 0x1f, 0x27, 0xba, 0xe6, 0xec, 0xcd, 0x2c, 0x74, 0x73, 0xe3, 0x7c, 0xee, 0x72, 0x21, 0xf1, 0x77, - 0x97, 0xf6, 0x8d, 0xbc, 0x5c, 0x1d, 0x4a, 0xad, 0xb7, 0xed, 0xba, 0xa9, 0xa5, 0xb3, 0x40, 0x56, - 0x76, 0xed, 0x7b, 0xd4, 0xe2, 0x12, 0x62, 0x61, 0xd7, 0x0f, 0x1a, 0x87, 0xbd, 0xdb, 0x27, 0xaf, - 0xbe, 0x08, 0xc1, 0x6b, 0x26, 0x4d, 0xeb, 0x4c, 0x19, 0xd2, 0xd2, 0xd7, 0xfb, 0xbd, 0xf1, 0x3f, - 0x0d, 0xaa, 0x95, 0xc4, 0x6f, 0xa1, 0x9d, 0xbc, 0x3c, 0xea, 0xfe, 0x76, 0x83, 0x9e, 0xba, 0x15, - 0xc3, 0xa0, 0x8b, 0x18, 0x7e, 0x1f, 0x75, 0x0a, 0x01, 0x79, 0xc2, 0x62, 0x30, 0x57, 0xbd, 0xec, - 0xab, 0x6f, 0x70, 0xba, 0x64, 0xe0, 0x9b, 0xa8, 0x51, 0xf0, 0xa1, 0xb9, 0xea, 0x9e, 0x21, 0x36, - 0xfa, 0x67, 0x9f, 0x53, 0x85, 0x63, 0x0f, 0xb5, 0xa3, 0x3c, 0x2d, 0x32, 0x61, 0x37, 0x0f, 0x1a, - 0x87, 0xdd, 0x00, 0xa9, 0x8d, 0xf9, 0x42, 0x23, 0xd4, 0x44, 0x30, 0x41, 0xed, 0x42, 0xed, 0x83, - 0xb0, 0x5b, 0x9a, 0x73, 0x43, 0x71, 0xfa, 0x1a, 0x79, 0x31, 0x73, 0x3b, 0x5f, 0xc1, 0x54, 0x1f, - 0xa8, 0x61, 0xe1, 0x04, 0xb5, 0xe0, 0x27, 0x99, 0x33, 0xbb, 0xad, 0x47, 0x79, 0x76, 0xb5, 0x77, - 0x4b, 0xee, 0x28, 0xaf, 0x3b, 0x89, 0xcc, 0xa7, 0xd5, 0x64, 0x35, 0x46, 0xcb, 0x34, 0xfb, 0x80, - 0x50, 0xc5, 0xc1, 0xd7, 0x51, 0x63, 0x0c, 0xd3, 0xf2, 0x01, 0x51, 0xf5, 0x89, 0x3f, 0x43, 0xad, - 0x09, 0x7b, 0x52, 0x80, 0xf9, 0x8f, 0xbc, 0xb7, 0xb5, 0x1e, 0xed, 0xf6, 0x8d, 0x92, 0xd0, 0x52, - 0x79, 0x52, 0x3f, 0xb6, 0xbc, 0x3f, 0x2d, 0xe4, 0x6e, 0x79, 0xfd, 0xf8, 0x47, 0x84, 0xc2, 0xc5, - 0xdb, 0x14, 0xb6, 0xa5, 0xfb, 0x3f, 0x7d, 0xf5, 0xfe, 0x97, 0xef, 0xbc, 0xfa, 0x51, 0x2e, 0x21, - 0x41, 0x57, 0x52, 0xe1, 0x23, 0xd4, 0x5b, 0xb1, 0xd6, 0x9d, 0xee, 0x06, 0xd7, 0xe6, 0x33, 0xb7, - 0xb7, 0x62, 0x4e, 0x57, 0x39, 0xde, 0xc7, 0x66, 0x6c, 0xba, 0x51, 0xec, 0x2e, 0xf6, 0xdf, 0xd2, - 0x77, 0xdc, 0x5d, 0xdf, 0xdf, 0x93, 0xce, 0x2f, 0xbf, 0xb9, 0xb5, 0xa7, 0x7f, 0x1d, 0xd4, 0x82, - 0x5b, 0xe7, 0x17, 0x4e, 0xed, 0xd9, 0x85, 0x53, 0x7b, 0x7e, 0xe1, 0xd4, 0x9e, 0xce, 0x1d, 0xeb, - 0x7c, 0xee, 0x58, 0xcf, 0xe6, 0x8e, 0xf5, 0x7c, 0xee, 0x58, 0x7f, 0xcf, 0x1d, 0xeb, 0xe7, 0x7f, - 0x9c, 0xda, 0xb7, 0x3b, 0xa6, 0xbb, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x6b, 0x5b, 0xf9, - 0x7f, 0x07, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go index 1b103f155..b3e0aeb50 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go @@ -73,7 +73,7 @@ func (in *CertificateSigningRequestCondition) DeepCopy() *CertificateSigningRequ func (in *CertificateSigningRequestList) DeepCopyInto(out *CertificateSigningRequestList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CertificateSigningRequest, len(*in)) diff --git a/vendor/k8s.io/api/coordination/v1/generated.pb.go b/vendor/k8s.io/api/coordination/v1/generated.pb.go index 349c68574..7e78be191 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1/generated.pb.go @@ -17,29 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto - - It has these top-level messages: - Lease - LeaseList - LeaseSpec -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -52,27 +44,142 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Lease) Reset() { *m = Lease{} } -func (*Lease) ProtoMessage() {} -func (*Lease) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Lease) Reset() { *m = Lease{} } +func (*Lease) ProtoMessage() {} +func (*Lease) Descriptor() ([]byte, []int) { + return fileDescriptor_929e1148ad9baca3, []int{0} +} +func (m *Lease) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Lease) XXX_Merge(src proto.Message) { + xxx_messageInfo_Lease.Merge(m, src) +} +func (m *Lease) XXX_Size() int { + return m.Size() +} +func (m *Lease) XXX_DiscardUnknown() { + xxx_messageInfo_Lease.DiscardUnknown(m) +} + +var xxx_messageInfo_Lease proto.InternalMessageInfo + +func (m *LeaseList) Reset() { *m = LeaseList{} } +func (*LeaseList) ProtoMessage() {} +func (*LeaseList) Descriptor() ([]byte, []int) { + return fileDescriptor_929e1148ad9baca3, []int{1} +} +func (m *LeaseList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseList) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseList.Merge(m, src) +} +func (m *LeaseList) XXX_Size() int { + return m.Size() +} +func (m *LeaseList) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseList.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseList proto.InternalMessageInfo -func (m *LeaseList) Reset() { *m = LeaseList{} } -func (*LeaseList) ProtoMessage() {} -func (*LeaseList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } +func (*LeaseSpec) ProtoMessage() {} +func (*LeaseSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_929e1148ad9baca3, []int{2} +} +func (m *LeaseSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseSpec.Merge(m, src) +} +func (m *LeaseSpec) XXX_Size() int { + return m.Size() +} +func (m *LeaseSpec) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseSpec.DiscardUnknown(m) +} -func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } -func (*LeaseSpec) ProtoMessage() {} -func (*LeaseSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_LeaseSpec proto.InternalMessageInfo func init() { proto.RegisterType((*Lease)(nil), "k8s.io.api.coordination.v1.Lease") proto.RegisterType((*LeaseList)(nil), "k8s.io.api.coordination.v1.LeaseList") proto.RegisterType((*LeaseSpec)(nil), "k8s.io.api.coordination.v1.LeaseSpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto", fileDescriptor_929e1148ad9baca3) +} + +var fileDescriptor_929e1148ad9baca3 = []byte{ + // 535 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x90, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0xe3, 0x36, 0x91, 0x9a, 0x0d, 0x2d, 0x91, 0x95, 0x83, 0x95, 0x83, 0x5d, 0x22, 0x21, + 0xe5, 0xc2, 0x2e, 0xa9, 0x10, 0x42, 0x9c, 0xc0, 0x20, 0xa0, 0x52, 0x2a, 0x24, 0xb7, 0x27, 0xd4, + 0x03, 0x1b, 0x7b, 0x70, 0x96, 0xd4, 0x5e, 0xb3, 0xbb, 0x0e, 0xea, 0x8d, 0x47, 0xe0, 0xca, 0x63, + 0xc0, 0x53, 0xe4, 0xd8, 0x63, 0x4f, 0x16, 0x31, 0x2f, 0x82, 0x76, 0x93, 0x36, 0x21, 0x49, 0xd5, + 0x8a, 0xdb, 0xee, 0xcc, 0xfc, 0xdf, 0xfc, 0xf3, 0xa3, 0x57, 0xa3, 0x67, 0x12, 0x33, 0x4e, 0x46, + 0xf9, 0x00, 0x44, 0x0a, 0x0a, 0x24, 0x19, 0x43, 0x1a, 0x71, 0x41, 0xe6, 0x0d, 0x9a, 0x31, 0x12, + 0x72, 0x2e, 0x22, 0x96, 0x52, 0xc5, 0x78, 0x4a, 0xc6, 0x3d, 0x12, 0x43, 0x0a, 0x82, 0x2a, 0x88, + 0x70, 0x26, 0xb8, 0xe2, 0x76, 0x7b, 0x36, 0x8b, 0x69, 0xc6, 0xf0, 0xf2, 0x2c, 0x1e, 0xf7, 0xda, + 0x8f, 0x62, 0xa6, 0x86, 0xf9, 0x00, 0x87, 0x3c, 0x21, 0x31, 0x8f, 0x39, 0x31, 0x92, 0x41, 0xfe, + 0xc9, 0xfc, 0xcc, 0xc7, 0xbc, 0x66, 0xa8, 0xf6, 0x93, 0xc5, 0xda, 0x84, 0x86, 0x43, 0x96, 0x82, + 0x38, 0x27, 0xd9, 0x28, 0xd6, 0x05, 0x49, 0x12, 0x50, 0x74, 0x83, 0x81, 0x36, 0xb9, 0x49, 0x25, + 0xf2, 0x54, 0xb1, 0x04, 0xd6, 0x04, 0x4f, 0x6f, 0x13, 0xc8, 0x70, 0x08, 0x09, 0x5d, 0xd5, 0x75, + 0x7e, 0x59, 0xa8, 0xd6, 0x07, 0x2a, 0xc1, 0xfe, 0x88, 0x76, 0xb4, 0x9b, 0x88, 0x2a, 0xea, 0x58, + 0xfb, 0x56, 0xb7, 0x71, 0xf0, 0x18, 0x2f, 0x62, 0xb8, 0x86, 0xe2, 0x6c, 0x14, 0xeb, 0x82, 0xc4, + 0x7a, 0x1a, 0x8f, 0x7b, 0xf8, 0xfd, 0xe0, 0x33, 0x84, 0xea, 0x08, 0x14, 0xf5, 0xed, 0x49, 0xe1, + 0x55, 0xca, 0xc2, 0x43, 0x8b, 0x5a, 0x70, 0x4d, 0xb5, 0xdf, 0xa2, 0xaa, 0xcc, 0x20, 0x74, 0xb6, + 0x0c, 0xfd, 0x21, 0xbe, 0x39, 0x64, 0x6c, 0x2c, 0x1d, 0x67, 0x10, 0xfa, 0xf7, 0xe6, 0xc8, 0xaa, + 0xfe, 0x05, 0x06, 0xd0, 0xf9, 0x69, 0xa1, 0xba, 0x99, 0xe8, 0x33, 0xa9, 0xec, 0xd3, 0x35, 0xe3, + 0xf8, 0x6e, 0xc6, 0xb5, 0xda, 0xd8, 0x6e, 0xce, 0x77, 0xec, 0x5c, 0x55, 0x96, 0x4c, 0xbf, 0x41, + 0x35, 0xa6, 0x20, 0x91, 0xce, 0xd6, 0xfe, 0x76, 0xb7, 0x71, 0xf0, 0xe0, 0x56, 0xd7, 0xfe, 0xee, + 0x9c, 0x56, 0x3b, 0xd4, 0xba, 0x60, 0x26, 0xef, 0xfc, 0xd8, 0x9e, 0x7b, 0xd6, 0x77, 0xd8, 0xcf, + 0xd1, 0xde, 0x90, 0x9f, 0x45, 0x20, 0x0e, 0x23, 0x48, 0x15, 0x53, 0xe7, 0xc6, 0x79, 0xdd, 0xb7, + 0xcb, 0xc2, 0xdb, 0x7b, 0xf7, 0x4f, 0x27, 0x58, 0x99, 0xb4, 0xfb, 0xa8, 0x75, 0xa6, 0x41, 0xaf, + 0x73, 0x61, 0x36, 0x1f, 0x43, 0xc8, 0xd3, 0x48, 0x9a, 0x58, 0x6b, 0xbe, 0x53, 0x16, 0x5e, 0xab, + 0xbf, 0xa1, 0x1f, 0x6c, 0x54, 0xd9, 0x03, 0xd4, 0xa0, 0xe1, 0x97, 0x9c, 0x09, 0x38, 0x61, 0x09, + 0x38, 0xdb, 0x26, 0x40, 0x72, 0xb7, 0x00, 0x8f, 0x58, 0x28, 0xb8, 0x96, 0xf9, 0xf7, 0xcb, 0xc2, + 0x6b, 0xbc, 0x5c, 0x70, 0x82, 0x65, 0xa8, 0x7d, 0x8a, 0xea, 0x02, 0x52, 0xf8, 0x6a, 0x36, 0x54, + 0xff, 0x6f, 0xc3, 0x6e, 0x59, 0x78, 0xf5, 0xe0, 0x8a, 0x12, 0x2c, 0x80, 0xf6, 0x0b, 0xd4, 0x34, + 0x97, 0x9d, 0x08, 0x9a, 0x4a, 0xa6, 0x6f, 0x93, 0x4e, 0xcd, 0x64, 0xd1, 0x2a, 0x0b, 0xaf, 0xd9, + 0x5f, 0xe9, 0x05, 0x6b, 0xd3, 0x7e, 0x77, 0x32, 0x75, 0x2b, 0x17, 0x53, 0xb7, 0x72, 0x39, 0x75, + 0x2b, 0xdf, 0x4a, 0xd7, 0x9a, 0x94, 0xae, 0x75, 0x51, 0xba, 0xd6, 0x65, 0xe9, 0x5a, 0xbf, 0x4b, + 0xd7, 0xfa, 0xfe, 0xc7, 0xad, 0x7c, 0xd8, 0x1a, 0xf7, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x41, + 0x5e, 0x94, 0x96, 0x5e, 0x04, 0x00, 0x00, +} + func (m *Lease) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -80,33 +187,42 @@ func (m *Lease) Marshal() (dAtA []byte, err error) { } func (m *Lease) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *LeaseList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -114,37 +230,46 @@ func (m *LeaseList) Marshal() (dAtA []byte, err error) { } func (m *LeaseList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -152,59 +277,74 @@ func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { } func (m *LeaseSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.HolderIdentity != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) - i += copy(dAtA[i:], *m.HolderIdentity) + if m.LeaseTransitions != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) + i-- + dAtA[i] = 0x28 } - if m.LeaseDurationSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) + if m.RenewTime != nil { + { + size, err := m.RenewTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } if m.AcquireTime != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AcquireTime.Size())) - n4, err := m.AcquireTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.AcquireTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x1a } - if m.RenewTime != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RenewTime.Size())) - n5, err := m.RenewTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 + if m.LeaseDurationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) + i-- + dAtA[i] = 0x10 } - if m.LeaseTransitions != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) + if m.HolderIdentity != nil { + i -= len(*m.HolderIdentity) + copy(dAtA[i:], *m.HolderIdentity) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Lease) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -215,6 +355,9 @@ func (m *Lease) Size() (n int) { } func (m *LeaseList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -229,6 +372,9 @@ func (m *LeaseList) Size() (n int) { } func (m *LeaseSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.HolderIdentity != nil { @@ -253,14 +399,7 @@ func (m *LeaseSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -270,7 +409,7 @@ func (this *Lease) String() string { return "nil" } s := strings.Join([]string{`&Lease{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseSpec", "LeaseSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -280,9 +419,14 @@ func (this *LeaseList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Lease{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Lease", "Lease", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&LeaseList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Lease", "Lease", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -294,8 +438,8 @@ func (this *LeaseSpec) String() string { s := strings.Join([]string{`&LeaseSpec{`, `HolderIdentity:` + valueToStringGenerated(this.HolderIdentity) + `,`, `LeaseDurationSeconds:` + valueToStringGenerated(this.LeaseDurationSeconds) + `,`, - `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, - `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, + `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, `}`, }, "") @@ -324,7 +468,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -352,7 +496,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -361,6 +505,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -382,7 +529,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -391,6 +538,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -407,6 +557,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -434,7 +587,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -462,7 +615,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -471,6 +624,9 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -492,7 +648,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -501,6 +657,9 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -518,6 +677,9 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -545,7 +707,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -573,7 +735,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -583,6 +745,9 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -603,7 +768,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -623,7 +788,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -632,11 +797,14 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AcquireTime == nil { - m.AcquireTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} + m.AcquireTime = &v1.MicroTime{} } if err := m.AcquireTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -656,7 +824,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -665,11 +833,14 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.RenewTime == nil { - m.RenewTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} + m.RenewTime = &v1.MicroTime{} } if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -689,7 +860,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -704,6 +875,9 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -770,10 +944,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -802,6 +979,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -820,45 +1000,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 535 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x90, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0x86, 0xe3, 0x36, 0x91, 0x9a, 0x0d, 0x2d, 0x91, 0x95, 0x83, 0x95, 0x83, 0x5d, 0x22, 0x21, - 0xe5, 0xc2, 0x2e, 0xa9, 0x10, 0x42, 0x9c, 0xc0, 0x20, 0xa0, 0x52, 0x2a, 0x24, 0xb7, 0x27, 0xd4, - 0x03, 0x1b, 0x7b, 0x70, 0x96, 0xd4, 0x5e, 0xb3, 0xbb, 0x0e, 0xea, 0x8d, 0x47, 0xe0, 0xca, 0x63, - 0xc0, 0x53, 0xe4, 0xd8, 0x63, 0x4f, 0x16, 0x31, 0x2f, 0x82, 0x76, 0x93, 0x36, 0x21, 0x49, 0xd5, - 0x8a, 0xdb, 0xee, 0xcc, 0xfc, 0xdf, 0xfc, 0xf3, 0xa3, 0x57, 0xa3, 0x67, 0x12, 0x33, 0x4e, 0x46, - 0xf9, 0x00, 0x44, 0x0a, 0x0a, 0x24, 0x19, 0x43, 0x1a, 0x71, 0x41, 0xe6, 0x0d, 0x9a, 0x31, 0x12, - 0x72, 0x2e, 0x22, 0x96, 0x52, 0xc5, 0x78, 0x4a, 0xc6, 0x3d, 0x12, 0x43, 0x0a, 0x82, 0x2a, 0x88, - 0x70, 0x26, 0xb8, 0xe2, 0x76, 0x7b, 0x36, 0x8b, 0x69, 0xc6, 0xf0, 0xf2, 0x2c, 0x1e, 0xf7, 0xda, - 0x8f, 0x62, 0xa6, 0x86, 0xf9, 0x00, 0x87, 0x3c, 0x21, 0x31, 0x8f, 0x39, 0x31, 0x92, 0x41, 0xfe, - 0xc9, 0xfc, 0xcc, 0xc7, 0xbc, 0x66, 0xa8, 0xf6, 0x93, 0xc5, 0xda, 0x84, 0x86, 0x43, 0x96, 0x82, - 0x38, 0x27, 0xd9, 0x28, 0xd6, 0x05, 0x49, 0x12, 0x50, 0x74, 0x83, 0x81, 0x36, 0xb9, 0x49, 0x25, - 0xf2, 0x54, 0xb1, 0x04, 0xd6, 0x04, 0x4f, 0x6f, 0x13, 0xc8, 0x70, 0x08, 0x09, 0x5d, 0xd5, 0x75, - 0x7e, 0x59, 0xa8, 0xd6, 0x07, 0x2a, 0xc1, 0xfe, 0x88, 0x76, 0xb4, 0x9b, 0x88, 0x2a, 0xea, 0x58, - 0xfb, 0x56, 0xb7, 0x71, 0xf0, 0x18, 0x2f, 0x62, 0xb8, 0x86, 0xe2, 0x6c, 0x14, 0xeb, 0x82, 0xc4, - 0x7a, 0x1a, 0x8f, 0x7b, 0xf8, 0xfd, 0xe0, 0x33, 0x84, 0xea, 0x08, 0x14, 0xf5, 0xed, 0x49, 0xe1, - 0x55, 0xca, 0xc2, 0x43, 0x8b, 0x5a, 0x70, 0x4d, 0xb5, 0xdf, 0xa2, 0xaa, 0xcc, 0x20, 0x74, 0xb6, - 0x0c, 0xfd, 0x21, 0xbe, 0x39, 0x64, 0x6c, 0x2c, 0x1d, 0x67, 0x10, 0xfa, 0xf7, 0xe6, 0xc8, 0xaa, - 0xfe, 0x05, 0x06, 0xd0, 0xf9, 0x69, 0xa1, 0xba, 0x99, 0xe8, 0x33, 0xa9, 0xec, 0xd3, 0x35, 0xe3, - 0xf8, 0x6e, 0xc6, 0xb5, 0xda, 0xd8, 0x6e, 0xce, 0x77, 0xec, 0x5c, 0x55, 0x96, 0x4c, 0xbf, 0x41, - 0x35, 0xa6, 0x20, 0x91, 0xce, 0xd6, 0xfe, 0x76, 0xb7, 0x71, 0xf0, 0xe0, 0x56, 0xd7, 0xfe, 0xee, - 0x9c, 0x56, 0x3b, 0xd4, 0xba, 0x60, 0x26, 0xef, 0xfc, 0xd8, 0x9e, 0x7b, 0xd6, 0x77, 0xd8, 0xcf, - 0xd1, 0xde, 0x90, 0x9f, 0x45, 0x20, 0x0e, 0x23, 0x48, 0x15, 0x53, 0xe7, 0xc6, 0x79, 0xdd, 0xb7, - 0xcb, 0xc2, 0xdb, 0x7b, 0xf7, 0x4f, 0x27, 0x58, 0x99, 0xb4, 0xfb, 0xa8, 0x75, 0xa6, 0x41, 0xaf, - 0x73, 0x61, 0x36, 0x1f, 0x43, 0xc8, 0xd3, 0x48, 0x9a, 0x58, 0x6b, 0xbe, 0x53, 0x16, 0x5e, 0xab, - 0xbf, 0xa1, 0x1f, 0x6c, 0x54, 0xd9, 0x03, 0xd4, 0xa0, 0xe1, 0x97, 0x9c, 0x09, 0x38, 0x61, 0x09, - 0x38, 0xdb, 0x26, 0x40, 0x72, 0xb7, 0x00, 0x8f, 0x58, 0x28, 0xb8, 0x96, 0xf9, 0xf7, 0xcb, 0xc2, - 0x6b, 0xbc, 0x5c, 0x70, 0x82, 0x65, 0xa8, 0x7d, 0x8a, 0xea, 0x02, 0x52, 0xf8, 0x6a, 0x36, 0x54, - 0xff, 0x6f, 0xc3, 0x6e, 0x59, 0x78, 0xf5, 0xe0, 0x8a, 0x12, 0x2c, 0x80, 0xf6, 0x0b, 0xd4, 0x34, - 0x97, 0x9d, 0x08, 0x9a, 0x4a, 0xa6, 0x6f, 0x93, 0x4e, 0xcd, 0x64, 0xd1, 0x2a, 0x0b, 0xaf, 0xd9, - 0x5f, 0xe9, 0x05, 0x6b, 0xd3, 0x7e, 0x77, 0x32, 0x75, 0x2b, 0x17, 0x53, 0xb7, 0x72, 0x39, 0x75, - 0x2b, 0xdf, 0x4a, 0xd7, 0x9a, 0x94, 0xae, 0x75, 0x51, 0xba, 0xd6, 0x65, 0xe9, 0x5a, 0xbf, 0x4b, - 0xd7, 0xfa, 0xfe, 0xc7, 0xad, 0x7c, 0xd8, 0x1a, 0xf7, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x41, - 0x5e, 0x94, 0x96, 0x5e, 0x04, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/coordination/v1/generated.proto b/vendor/k8s.io/api/coordination/v1/generated.proto index 99692e958..4206746d8 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1/generated.proto @@ -30,12 +30,12 @@ option go_package = "v1"; // Lease defines a lease concept. message Lease { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the Lease. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LeaseSpec spec = 2; } @@ -43,7 +43,7 @@ message Lease { // LeaseList is a list of Lease objects. message LeaseList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/coordination/v1/types.go b/vendor/k8s.io/api/coordination/v1/types.go index 8f9f24d04..7a5605ace 100644 --- a/vendor/k8s.io/api/coordination/v1/types.go +++ b/vendor/k8s.io/api/coordination/v1/types.go @@ -26,12 +26,12 @@ import ( // Lease defines a lease concept. type Lease struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the Lease. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -65,7 +65,7 @@ type LeaseSpec struct { type LeaseList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go index bd02ad5da..0f1440430 100644 --- a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go @@ -29,8 +29,8 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Lease = map[string]string{ "": "Lease defines a lease concept.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Lease) SwaggerDoc() map[string]string { @@ -39,7 +39,7 @@ func (Lease) SwaggerDoc() map[string]string { var map_LeaseList = map[string]string{ "": "LeaseList is a list of Lease objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } diff --git a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go index 0f534055f..2dd7eddbc 100644 --- a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go @@ -55,7 +55,7 @@ func (in *Lease) DeepCopyObject() runtime.Object { func (in *LeaseList) DeepCopyInto(out *LeaseList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Lease, len(*in)) diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go index aa57e9dd6..2463d6258 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go @@ -17,29 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto - - It has these top-level messages: - Lease - LeaseList - LeaseSpec -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -52,27 +44,142 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Lease) Reset() { *m = Lease{} } -func (*Lease) ProtoMessage() {} -func (*Lease) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Lease) Reset() { *m = Lease{} } +func (*Lease) ProtoMessage() {} +func (*Lease) Descriptor() ([]byte, []int) { + return fileDescriptor_daca6bcd2ff63a80, []int{0} +} +func (m *Lease) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Lease) XXX_Merge(src proto.Message) { + xxx_messageInfo_Lease.Merge(m, src) +} +func (m *Lease) XXX_Size() int { + return m.Size() +} +func (m *Lease) XXX_DiscardUnknown() { + xxx_messageInfo_Lease.DiscardUnknown(m) +} + +var xxx_messageInfo_Lease proto.InternalMessageInfo + +func (m *LeaseList) Reset() { *m = LeaseList{} } +func (*LeaseList) ProtoMessage() {} +func (*LeaseList) Descriptor() ([]byte, []int) { + return fileDescriptor_daca6bcd2ff63a80, []int{1} +} +func (m *LeaseList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseList) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseList.Merge(m, src) +} +func (m *LeaseList) XXX_Size() int { + return m.Size() +} +func (m *LeaseList) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseList.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseList proto.InternalMessageInfo -func (m *LeaseList) Reset() { *m = LeaseList{} } -func (*LeaseList) ProtoMessage() {} -func (*LeaseList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } +func (*LeaseSpec) ProtoMessage() {} +func (*LeaseSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_daca6bcd2ff63a80, []int{2} +} +func (m *LeaseSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseSpec.Merge(m, src) +} +func (m *LeaseSpec) XXX_Size() int { + return m.Size() +} +func (m *LeaseSpec) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseSpec.DiscardUnknown(m) +} -func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } -func (*LeaseSpec) ProtoMessage() {} -func (*LeaseSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_LeaseSpec proto.InternalMessageInfo func init() { proto.RegisterType((*Lease)(nil), "k8s.io.api.coordination.v1beta1.Lease") proto.RegisterType((*LeaseList)(nil), "k8s.io.api.coordination.v1beta1.LeaseList") proto.RegisterType((*LeaseSpec)(nil), "k8s.io.api.coordination.v1beta1.LeaseSpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto", fileDescriptor_daca6bcd2ff63a80) +} + +var fileDescriptor_daca6bcd2ff63a80 = []byte{ + // 540 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x91, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0xe3, 0xb6, 0x11, 0xcd, 0x86, 0x96, 0xc8, 0xca, 0xc1, 0xca, 0xc1, 0xae, 0x72, 0x40, + 0x15, 0x52, 0x77, 0x49, 0x85, 0x10, 0xe2, 0x04, 0x16, 0x87, 0x56, 0xb8, 0x42, 0x72, 0x7b, 0x42, + 0x3d, 0xb0, 0xb6, 0x07, 0x67, 0x49, 0xed, 0x35, 0xbb, 0xeb, 0xa0, 0xde, 0x78, 0x04, 0xae, 0xbc, + 0x08, 0xbc, 0x42, 0x8e, 0x3d, 0xf6, 0x64, 0x11, 0xf3, 0x22, 0xc8, 0x1b, 0xb7, 0x09, 0x49, 0x51, + 0x23, 0x6e, 0xde, 0x99, 0xf9, 0xbf, 0xf9, 0xe7, 0x37, 0x3a, 0x1a, 0xbd, 0x90, 0x98, 0x71, 0x32, + 0xca, 0x03, 0x10, 0x29, 0x28, 0x90, 0x64, 0x0c, 0x69, 0xc4, 0x05, 0xa9, 0x1b, 0x34, 0x63, 0x24, + 0xe4, 0x5c, 0x44, 0x2c, 0xa5, 0x8a, 0xf1, 0x94, 0x8c, 0x07, 0x01, 0x28, 0x3a, 0x20, 0x31, 0xa4, + 0x20, 0xa8, 0x82, 0x08, 0x67, 0x82, 0x2b, 0x6e, 0x3a, 0x33, 0x01, 0xa6, 0x19, 0xc3, 0x8b, 0x02, + 0x5c, 0x0b, 0x7a, 0x07, 0x31, 0x53, 0xc3, 0x3c, 0xc0, 0x21, 0x4f, 0x48, 0xcc, 0x63, 0x4e, 0xb4, + 0x2e, 0xc8, 0x3f, 0xea, 0x97, 0x7e, 0xe8, 0xaf, 0x19, 0xaf, 0xf7, 0x6c, 0x6e, 0x20, 0xa1, 0xe1, + 0x90, 0xa5, 0x20, 0x2e, 0x49, 0x36, 0x8a, 0xab, 0x82, 0x24, 0x09, 0x28, 0x4a, 0xc6, 0x2b, 0x2e, + 0x7a, 0xe4, 0x5f, 0x2a, 0x91, 0xa7, 0x8a, 0x25, 0xb0, 0x22, 0x78, 0x7e, 0x9f, 0x40, 0x86, 0x43, + 0x48, 0xe8, 0xb2, 0xae, 0xff, 0xd3, 0x40, 0x4d, 0x0f, 0xa8, 0x04, 0xf3, 0x03, 0xda, 0xae, 0xdc, + 0x44, 0x54, 0x51, 0xcb, 0xd8, 0x33, 0xf6, 0xdb, 0x87, 0x4f, 0xf1, 0x3c, 0x8b, 0x5b, 0x28, 0xce, + 0x46, 0x71, 0x55, 0x90, 0xb8, 0x9a, 0xc6, 0xe3, 0x01, 0x7e, 0x17, 0x7c, 0x82, 0x50, 0x9d, 0x80, + 0xa2, 0xae, 0x39, 0x29, 0x9c, 0x46, 0x59, 0x38, 0x68, 0x5e, 0xf3, 0x6f, 0xa9, 0xa6, 0x87, 0xb6, + 0x64, 0x06, 0xa1, 0xb5, 0xa1, 0xe9, 0x4f, 0xf0, 0x3d, 0x49, 0x63, 0xed, 0xeb, 0x34, 0x83, 0xd0, + 0x7d, 0x58, 0x73, 0xb7, 0xaa, 0x97, 0xaf, 0x29, 0xfd, 0x1f, 0x06, 0x6a, 0xe9, 0x09, 0x8f, 0x49, + 0x65, 0x9e, 0xaf, 0xb8, 0xc7, 0xeb, 0xb9, 0xaf, 0xd4, 0xda, 0x7b, 0xa7, 0xde, 0xb1, 0x7d, 0x53, + 0x59, 0x70, 0xfe, 0x16, 0x35, 0x99, 0x82, 0x44, 0x5a, 0x1b, 0x7b, 0x9b, 0xfb, 0xed, 0xc3, 0xc7, + 0xeb, 0x59, 0x77, 0x77, 0x6a, 0x64, 0xf3, 0xb8, 0x12, 0xfb, 0x33, 0x46, 0xff, 0xfb, 0x66, 0x6d, + 0xbc, 0x3a, 0xc6, 0x7c, 0x89, 0x76, 0x87, 0xfc, 0x22, 0x02, 0x71, 0x1c, 0x41, 0xaa, 0x98, 0xba, + 0xd4, 0xf6, 0x5b, 0xae, 0x59, 0x16, 0xce, 0xee, 0xd1, 0x5f, 0x1d, 0x7f, 0x69, 0xd2, 0xf4, 0x50, + 0xf7, 0xa2, 0x02, 0xbd, 0xc9, 0x85, 0x5e, 0x7f, 0x0a, 0x21, 0x4f, 0x23, 0xa9, 0x03, 0x6e, 0xba, + 0x56, 0x59, 0x38, 0x5d, 0xef, 0x8e, 0xbe, 0x7f, 0xa7, 0xca, 0x0c, 0x50, 0x9b, 0x86, 0x9f, 0x73, + 0x26, 0xe0, 0x8c, 0x25, 0x60, 0x6d, 0xea, 0x14, 0xc9, 0x7a, 0x29, 0x9e, 0xb0, 0x50, 0xf0, 0x4a, + 0xe6, 0x3e, 0x2a, 0x0b, 0xa7, 0xfd, 0x7a, 0xce, 0xf1, 0x17, 0xa1, 0xe6, 0x39, 0x6a, 0x09, 0x48, + 0xe1, 0x8b, 0xde, 0xb0, 0xf5, 0x7f, 0x1b, 0x76, 0xca, 0xc2, 0x69, 0xf9, 0x37, 0x14, 0x7f, 0x0e, + 0x34, 0x5f, 0xa1, 0x8e, 0xbe, 0xec, 0x4c, 0xd0, 0x54, 0xb2, 0xea, 0x36, 0x69, 0x35, 0x75, 0x16, + 0xdd, 0xb2, 0x70, 0x3a, 0xde, 0x52, 0xcf, 0x5f, 0x99, 0x76, 0x0f, 0x26, 0x53, 0xbb, 0x71, 0x35, + 0xb5, 0x1b, 0xd7, 0x53, 0xbb, 0xf1, 0xb5, 0xb4, 0x8d, 0x49, 0x69, 0x1b, 0x57, 0xa5, 0x6d, 0x5c, + 0x97, 0xb6, 0xf1, 0xab, 0xb4, 0x8d, 0x6f, 0xbf, 0xed, 0xc6, 0xfb, 0x07, 0xf5, 0x6f, 0xfe, 0x13, + 0x00, 0x00, 0xff, 0xff, 0x51, 0x34, 0x6a, 0x0f, 0x77, 0x04, 0x00, 0x00, +} + func (m *Lease) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -80,33 +187,42 @@ func (m *Lease) Marshal() (dAtA []byte, err error) { } func (m *Lease) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *LeaseList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -114,37 +230,46 @@ func (m *LeaseList) Marshal() (dAtA []byte, err error) { } func (m *LeaseList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -152,59 +277,74 @@ func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { } func (m *LeaseSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.HolderIdentity != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) - i += copy(dAtA[i:], *m.HolderIdentity) + if m.LeaseTransitions != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) + i-- + dAtA[i] = 0x28 } - if m.LeaseDurationSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) + if m.RenewTime != nil { + { + size, err := m.RenewTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } if m.AcquireTime != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AcquireTime.Size())) - n4, err := m.AcquireTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.AcquireTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x1a } - if m.RenewTime != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RenewTime.Size())) - n5, err := m.RenewTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 + if m.LeaseDurationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) + i-- + dAtA[i] = 0x10 } - if m.LeaseTransitions != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) + if m.HolderIdentity != nil { + i -= len(*m.HolderIdentity) + copy(dAtA[i:], *m.HolderIdentity) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Lease) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -215,6 +355,9 @@ func (m *Lease) Size() (n int) { } func (m *LeaseList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -229,6 +372,9 @@ func (m *LeaseList) Size() (n int) { } func (m *LeaseSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.HolderIdentity != nil { @@ -253,14 +399,7 @@ func (m *LeaseSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -270,7 +409,7 @@ func (this *Lease) String() string { return "nil" } s := strings.Join([]string{`&Lease{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseSpec", "LeaseSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -280,9 +419,14 @@ func (this *LeaseList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Lease{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Lease", "Lease", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&LeaseList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Lease", "Lease", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -294,8 +438,8 @@ func (this *LeaseSpec) String() string { s := strings.Join([]string{`&LeaseSpec{`, `HolderIdentity:` + valueToStringGenerated(this.HolderIdentity) + `,`, `LeaseDurationSeconds:` + valueToStringGenerated(this.LeaseDurationSeconds) + `,`, - `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, - `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, + `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, `}`, }, "") @@ -324,7 +468,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -352,7 +496,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -361,6 +505,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -382,7 +529,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -391,6 +538,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -407,6 +557,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -434,7 +587,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -462,7 +615,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -471,6 +624,9 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -492,7 +648,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -501,6 +657,9 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -518,6 +677,9 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -545,7 +707,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -573,7 +735,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -583,6 +745,9 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -603,7 +768,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -623,7 +788,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -632,11 +797,14 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AcquireTime == nil { - m.AcquireTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} + m.AcquireTime = &v1.MicroTime{} } if err := m.AcquireTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -656,7 +824,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -665,11 +833,14 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.RenewTime == nil { - m.RenewTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} + m.RenewTime = &v1.MicroTime{} } if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -689,7 +860,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -704,6 +875,9 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -770,10 +944,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -802,6 +979,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -820,45 +1000,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 540 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x91, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0x86, 0xe3, 0xb6, 0x11, 0xcd, 0x86, 0x96, 0xc8, 0xca, 0xc1, 0xca, 0xc1, 0xae, 0x72, 0x40, - 0x15, 0x52, 0x77, 0x49, 0x85, 0x10, 0xe2, 0x04, 0x16, 0x87, 0x56, 0xb8, 0x42, 0x72, 0x7b, 0x42, - 0x3d, 0xb0, 0xb6, 0x07, 0x67, 0x49, 0xed, 0x35, 0xbb, 0xeb, 0xa0, 0xde, 0x78, 0x04, 0xae, 0xbc, - 0x08, 0xbc, 0x42, 0x8e, 0x3d, 0xf6, 0x64, 0x11, 0xf3, 0x22, 0xc8, 0x1b, 0xb7, 0x09, 0x49, 0x51, - 0x23, 0x6e, 0xde, 0x99, 0xf9, 0xbf, 0xf9, 0xe7, 0x37, 0x3a, 0x1a, 0xbd, 0x90, 0x98, 0x71, 0x32, - 0xca, 0x03, 0x10, 0x29, 0x28, 0x90, 0x64, 0x0c, 0x69, 0xc4, 0x05, 0xa9, 0x1b, 0x34, 0x63, 0x24, - 0xe4, 0x5c, 0x44, 0x2c, 0xa5, 0x8a, 0xf1, 0x94, 0x8c, 0x07, 0x01, 0x28, 0x3a, 0x20, 0x31, 0xa4, - 0x20, 0xa8, 0x82, 0x08, 0x67, 0x82, 0x2b, 0x6e, 0x3a, 0x33, 0x01, 0xa6, 0x19, 0xc3, 0x8b, 0x02, - 0x5c, 0x0b, 0x7a, 0x07, 0x31, 0x53, 0xc3, 0x3c, 0xc0, 0x21, 0x4f, 0x48, 0xcc, 0x63, 0x4e, 0xb4, - 0x2e, 0xc8, 0x3f, 0xea, 0x97, 0x7e, 0xe8, 0xaf, 0x19, 0xaf, 0xf7, 0x6c, 0x6e, 0x20, 0xa1, 0xe1, - 0x90, 0xa5, 0x20, 0x2e, 0x49, 0x36, 0x8a, 0xab, 0x82, 0x24, 0x09, 0x28, 0x4a, 0xc6, 0x2b, 0x2e, - 0x7a, 0xe4, 0x5f, 0x2a, 0x91, 0xa7, 0x8a, 0x25, 0xb0, 0x22, 0x78, 0x7e, 0x9f, 0x40, 0x86, 0x43, - 0x48, 0xe8, 0xb2, 0xae, 0xff, 0xd3, 0x40, 0x4d, 0x0f, 0xa8, 0x04, 0xf3, 0x03, 0xda, 0xae, 0xdc, - 0x44, 0x54, 0x51, 0xcb, 0xd8, 0x33, 0xf6, 0xdb, 0x87, 0x4f, 0xf1, 0x3c, 0x8b, 0x5b, 0x28, 0xce, - 0x46, 0x71, 0x55, 0x90, 0xb8, 0x9a, 0xc6, 0xe3, 0x01, 0x7e, 0x17, 0x7c, 0x82, 0x50, 0x9d, 0x80, - 0xa2, 0xae, 0x39, 0x29, 0x9c, 0x46, 0x59, 0x38, 0x68, 0x5e, 0xf3, 0x6f, 0xa9, 0xa6, 0x87, 0xb6, - 0x64, 0x06, 0xa1, 0xb5, 0xa1, 0xe9, 0x4f, 0xf0, 0x3d, 0x49, 0x63, 0xed, 0xeb, 0x34, 0x83, 0xd0, - 0x7d, 0x58, 0x73, 0xb7, 0xaa, 0x97, 0xaf, 0x29, 0xfd, 0x1f, 0x06, 0x6a, 0xe9, 0x09, 0x8f, 0x49, - 0x65, 0x9e, 0xaf, 0xb8, 0xc7, 0xeb, 0xb9, 0xaf, 0xd4, 0xda, 0x7b, 0xa7, 0xde, 0xb1, 0x7d, 0x53, - 0x59, 0x70, 0xfe, 0x16, 0x35, 0x99, 0x82, 0x44, 0x5a, 0x1b, 0x7b, 0x9b, 0xfb, 0xed, 0xc3, 0xc7, - 0xeb, 0x59, 0x77, 0x77, 0x6a, 0x64, 0xf3, 0xb8, 0x12, 0xfb, 0x33, 0x46, 0xff, 0xfb, 0x66, 0x6d, - 0xbc, 0x3a, 0xc6, 0x7c, 0x89, 0x76, 0x87, 0xfc, 0x22, 0x02, 0x71, 0x1c, 0x41, 0xaa, 0x98, 0xba, - 0xd4, 0xf6, 0x5b, 0xae, 0x59, 0x16, 0xce, 0xee, 0xd1, 0x5f, 0x1d, 0x7f, 0x69, 0xd2, 0xf4, 0x50, - 0xf7, 0xa2, 0x02, 0xbd, 0xc9, 0x85, 0x5e, 0x7f, 0x0a, 0x21, 0x4f, 0x23, 0xa9, 0x03, 0x6e, 0xba, - 0x56, 0x59, 0x38, 0x5d, 0xef, 0x8e, 0xbe, 0x7f, 0xa7, 0xca, 0x0c, 0x50, 0x9b, 0x86, 0x9f, 0x73, - 0x26, 0xe0, 0x8c, 0x25, 0x60, 0x6d, 0xea, 0x14, 0xc9, 0x7a, 0x29, 0x9e, 0xb0, 0x50, 0xf0, 0x4a, - 0xe6, 0x3e, 0x2a, 0x0b, 0xa7, 0xfd, 0x7a, 0xce, 0xf1, 0x17, 0xa1, 0xe6, 0x39, 0x6a, 0x09, 0x48, - 0xe1, 0x8b, 0xde, 0xb0, 0xf5, 0x7f, 0x1b, 0x76, 0xca, 0xc2, 0x69, 0xf9, 0x37, 0x14, 0x7f, 0x0e, - 0x34, 0x5f, 0xa1, 0x8e, 0xbe, 0xec, 0x4c, 0xd0, 0x54, 0xb2, 0xea, 0x36, 0x69, 0x35, 0x75, 0x16, - 0xdd, 0xb2, 0x70, 0x3a, 0xde, 0x52, 0xcf, 0x5f, 0x99, 0x76, 0x0f, 0x26, 0x53, 0xbb, 0x71, 0x35, - 0xb5, 0x1b, 0xd7, 0x53, 0xbb, 0xf1, 0xb5, 0xb4, 0x8d, 0x49, 0x69, 0x1b, 0x57, 0xa5, 0x6d, 0x5c, - 0x97, 0xb6, 0xf1, 0xab, 0xb4, 0x8d, 0x6f, 0xbf, 0xed, 0xc6, 0xfb, 0x07, 0xf5, 0x6f, 0xfe, 0x13, - 0x00, 0x00, 0xff, 0xff, 0x51, 0x34, 0x6a, 0x0f, 0x77, 0x04, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/vendor/k8s.io/api/coordination/v1beta1/generated.proto index 918e0de1c..cfc2711c6 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.proto @@ -30,12 +30,12 @@ option go_package = "v1beta1"; // Lease defines a lease concept. message Lease { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the Lease. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LeaseSpec spec = 2; } @@ -43,7 +43,7 @@ message Lease { // LeaseList is a list of Lease objects. message LeaseList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/coordination/v1beta1/types.go b/vendor/k8s.io/api/coordination/v1beta1/types.go index 846f72802..da88f675c 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types.go @@ -26,12 +26,12 @@ import ( // Lease defines a lease concept. type Lease struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the Lease. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -65,7 +65,7 @@ type LeaseSpec struct { type LeaseList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go index 4532d322a..f557d265d 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go @@ -29,8 +29,8 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Lease = map[string]string{ "": "Lease defines a lease concept.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Lease) SwaggerDoc() map[string]string { @@ -39,7 +39,7 @@ func (Lease) SwaggerDoc() map[string]string { var map_LeaseList = map[string]string{ "": "LeaseList is a list of Lease objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } diff --git a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go index a628ac19b..de6962137 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go @@ -55,7 +55,7 @@ func (in *Lease) DeepCopyObject() runtime.Object { func (in *LeaseList) DeepCopyInto(out *LeaseList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Lease, len(*in)) diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index 058e03eb9..8f788035e 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -17,229 +17,26 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto - - It has these top-level messages: - AWSElasticBlockStoreVolumeSource - Affinity - AttachedVolume - AvoidPods - AzureDiskVolumeSource - AzureFilePersistentVolumeSource - AzureFileVolumeSource - Binding - CSIPersistentVolumeSource - CSIVolumeSource - Capabilities - CephFSPersistentVolumeSource - CephFSVolumeSource - CinderPersistentVolumeSource - CinderVolumeSource - ClientIPConfig - ComponentCondition - ComponentStatus - ComponentStatusList - ConfigMap - ConfigMapEnvSource - ConfigMapKeySelector - ConfigMapList - ConfigMapNodeConfigSource - ConfigMapProjection - ConfigMapVolumeSource - Container - ContainerImage - ContainerPort - ContainerState - ContainerStateRunning - ContainerStateTerminated - ContainerStateWaiting - ContainerStatus - DaemonEndpoint - DownwardAPIProjection - DownwardAPIVolumeFile - DownwardAPIVolumeSource - EmptyDirVolumeSource - EndpointAddress - EndpointPort - EndpointSubset - Endpoints - EndpointsList - EnvFromSource - EnvVar - EnvVarSource - Event - EventList - EventSeries - EventSource - ExecAction - FCVolumeSource - FlexPersistentVolumeSource - FlexVolumeSource - FlockerVolumeSource - GCEPersistentDiskVolumeSource - GitRepoVolumeSource - GlusterfsPersistentVolumeSource - GlusterfsVolumeSource - HTTPGetAction - HTTPHeader - Handler - HostAlias - HostPathVolumeSource - ISCSIPersistentVolumeSource - ISCSIVolumeSource - KeyToPath - Lifecycle - LimitRange - LimitRangeItem - LimitRangeList - LimitRangeSpec - List - LoadBalancerIngress - LoadBalancerStatus - LocalObjectReference - LocalVolumeSource - NFSVolumeSource - Namespace - NamespaceList - NamespaceSpec - NamespaceStatus - Node - NodeAddress - NodeAffinity - NodeCondition - NodeConfigSource - NodeConfigStatus - NodeDaemonEndpoints - NodeList - NodeProxyOptions - NodeResources - NodeSelector - NodeSelectorRequirement - NodeSelectorTerm - NodeSpec - NodeStatus - NodeSystemInfo - ObjectFieldSelector - ObjectReference - PersistentVolume - PersistentVolumeClaim - PersistentVolumeClaimCondition - PersistentVolumeClaimList - PersistentVolumeClaimSpec - PersistentVolumeClaimStatus - PersistentVolumeClaimVolumeSource - PersistentVolumeList - PersistentVolumeSource - PersistentVolumeSpec - PersistentVolumeStatus - PhotonPersistentDiskVolumeSource - Pod - PodAffinity - PodAffinityTerm - PodAntiAffinity - PodAttachOptions - PodCondition - PodDNSConfig - PodDNSConfigOption - PodExecOptions - PodList - PodLogOptions - PodPortForwardOptions - PodProxyOptions - PodReadinessGate - PodSecurityContext - PodSignature - PodSpec - PodStatus - PodStatusResult - PodTemplate - PodTemplateList - PodTemplateSpec - PortworxVolumeSource - Preconditions - PreferAvoidPodsEntry - PreferredSchedulingTerm - Probe - ProjectedVolumeSource - QuobyteVolumeSource - RBDPersistentVolumeSource - RBDVolumeSource - RangeAllocation - ReplicationController - ReplicationControllerCondition - ReplicationControllerList - ReplicationControllerSpec - ReplicationControllerStatus - ResourceFieldSelector - ResourceQuota - ResourceQuotaList - ResourceQuotaSpec - ResourceQuotaStatus - ResourceRequirements - SELinuxOptions - ScaleIOPersistentVolumeSource - ScaleIOVolumeSource - ScopeSelector - ScopedResourceSelectorRequirement - Secret - SecretEnvSource - SecretKeySelector - SecretList - SecretProjection - SecretReference - SecretVolumeSource - SecurityContext - SerializedReference - Service - ServiceAccount - ServiceAccountList - ServiceAccountTokenProjection - ServiceList - ServicePort - ServiceProxyOptions - ServiceSpec - ServiceStatus - SessionAffinityConfig - StorageOSPersistentVolumeSource - StorageOSVolumeSource - Sysctl - TCPSocketAction - Taint - Toleration - TopologySelectorLabelRequirement - TopologySelectorTerm - TypedLocalObjectReference - Volume - VolumeDevice - VolumeMount - VolumeNodeAffinity - VolumeProjection - VolumeSource - VsphereVirtualDiskVolumeSource - WeightedPodAffinityTerm -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" + io "io" -import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import strings "strings" -import reflect "reflect" - -import io "io" + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -255,17278 +52,29053 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *AWSElasticBlockStoreVolumeSource) Reset() { *m = AWSElasticBlockStoreVolumeSource{} } func (*AWSElasticBlockStoreVolumeSource) ProtoMessage() {} func (*AWSElasticBlockStoreVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_83c10c24ec417dc9, []int{0} } - -func (m *Affinity) Reset() { *m = Affinity{} } -func (*Affinity) ProtoMessage() {} -func (*Affinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *AttachedVolume) Reset() { *m = AttachedVolume{} } -func (*AttachedVolume) ProtoMessage() {} -func (*AttachedVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *AvoidPods) Reset() { *m = AvoidPods{} } -func (*AvoidPods) ProtoMessage() {} -func (*AvoidPods) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} } -func (*AzureDiskVolumeSource) ProtoMessage() {} -func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *AzureFilePersistentVolumeSource) Reset() { *m = AzureFilePersistentVolumeSource{} } -func (*AzureFilePersistentVolumeSource) ProtoMessage() {} -func (*AzureFilePersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} +func (m *AWSElasticBlockStoreVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AWSElasticBlockStoreVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AWSElasticBlockStoreVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AWSElasticBlockStoreVolumeSource.Merge(m, src) +} +func (m *AWSElasticBlockStoreVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *AWSElasticBlockStoreVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_AWSElasticBlockStoreVolumeSource.DiscardUnknown(m) } -func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} } -func (*AzureFileVolumeSource) ProtoMessage() {} -func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *Binding) Reset() { *m = Binding{} } -func (*Binding) ProtoMessage() {} -func (*Binding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_AWSElasticBlockStoreVolumeSource proto.InternalMessageInfo -func (m *CSIPersistentVolumeSource) Reset() { *m = CSIPersistentVolumeSource{} } -func (*CSIPersistentVolumeSource) ProtoMessage() {} -func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{8} +func (m *Affinity) Reset() { *m = Affinity{} } +func (*Affinity) ProtoMessage() {} +func (*Affinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{1} +} +func (m *Affinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Affinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Affinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Affinity.Merge(m, src) +} +func (m *Affinity) XXX_Size() int { + return m.Size() +} +func (m *Affinity) XXX_DiscardUnknown() { + xxx_messageInfo_Affinity.DiscardUnknown(m) } -func (m *CSIVolumeSource) Reset() { *m = CSIVolumeSource{} } -func (*CSIVolumeSource) ProtoMessage() {} -func (*CSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *Capabilities) Reset() { *m = Capabilities{} } -func (*Capabilities) ProtoMessage() {} -func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_Affinity proto.InternalMessageInfo -func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} } -func (*CephFSPersistentVolumeSource) ProtoMessage() {} -func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} +func (m *AttachedVolume) Reset() { *m = AttachedVolume{} } +func (*AttachedVolume) ProtoMessage() {} +func (*AttachedVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{2} +} +func (m *AttachedVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AttachedVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AttachedVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttachedVolume.Merge(m, src) +} +func (m *AttachedVolume) XXX_Size() int { + return m.Size() +} +func (m *AttachedVolume) XXX_DiscardUnknown() { + xxx_messageInfo_AttachedVolume.DiscardUnknown(m) } -func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } -func (*CephFSVolumeSource) ProtoMessage() {} -func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +var xxx_messageInfo_AttachedVolume proto.InternalMessageInfo -func (m *CinderPersistentVolumeSource) Reset() { *m = CinderPersistentVolumeSource{} } -func (*CinderPersistentVolumeSource) ProtoMessage() {} -func (*CinderPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} +func (m *AvoidPods) Reset() { *m = AvoidPods{} } +func (*AvoidPods) ProtoMessage() {} +func (*AvoidPods) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{3} +} +func (m *AvoidPods) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AvoidPods) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AvoidPods) XXX_Merge(src proto.Message) { + xxx_messageInfo_AvoidPods.Merge(m, src) +} +func (m *AvoidPods) XXX_Size() int { + return m.Size() +} +func (m *AvoidPods) XXX_DiscardUnknown() { + xxx_messageInfo_AvoidPods.DiscardUnknown(m) } -func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } -func (*CinderVolumeSource) ProtoMessage() {} -func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +var xxx_messageInfo_AvoidPods proto.InternalMessageInfo -func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } -func (*ClientIPConfig) ProtoMessage() {} -func (*ClientIPConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} } +func (*AzureDiskVolumeSource) ProtoMessage() {} +func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{4} +} +func (m *AzureDiskVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AzureDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AzureDiskVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AzureDiskVolumeSource.Merge(m, src) +} +func (m *AzureDiskVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *AzureDiskVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_AzureDiskVolumeSource.DiscardUnknown(m) +} -func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } -func (*ComponentCondition) ProtoMessage() {} -func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +var xxx_messageInfo_AzureDiskVolumeSource proto.InternalMessageInfo -func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } -func (*ComponentStatus) ProtoMessage() {} -func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (m *AzureFilePersistentVolumeSource) Reset() { *m = AzureFilePersistentVolumeSource{} } +func (*AzureFilePersistentVolumeSource) ProtoMessage() {} +func (*AzureFilePersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{5} +} +func (m *AzureFilePersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AzureFilePersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AzureFilePersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AzureFilePersistentVolumeSource.Merge(m, src) +} +func (m *AzureFilePersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *AzureFilePersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_AzureFilePersistentVolumeSource.DiscardUnknown(m) +} -func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } -func (*ComponentStatusList) ProtoMessage() {} -func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +var xxx_messageInfo_AzureFilePersistentVolumeSource proto.InternalMessageInfo -func (m *ConfigMap) Reset() { *m = ConfigMap{} } -func (*ConfigMap) ProtoMessage() {} -func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} } +func (*AzureFileVolumeSource) ProtoMessage() {} +func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{6} +} +func (m *AzureFileVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AzureFileVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AzureFileVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AzureFileVolumeSource.Merge(m, src) +} +func (m *AzureFileVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *AzureFileVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_AzureFileVolumeSource.DiscardUnknown(m) +} -func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } -func (*ConfigMapEnvSource) ProtoMessage() {} -func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +var xxx_messageInfo_AzureFileVolumeSource proto.InternalMessageInfo -func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } -func (*ConfigMapKeySelector) ProtoMessage() {} -func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (m *Binding) Reset() { *m = Binding{} } +func (*Binding) ProtoMessage() {} +func (*Binding) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{7} +} +func (m *Binding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Binding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Binding) XXX_Merge(src proto.Message) { + xxx_messageInfo_Binding.Merge(m, src) +} +func (m *Binding) XXX_Size() int { + return m.Size() +} +func (m *Binding) XXX_DiscardUnknown() { + xxx_messageInfo_Binding.DiscardUnknown(m) +} -func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } -func (*ConfigMapList) ProtoMessage() {} -func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +var xxx_messageInfo_Binding proto.InternalMessageInfo -func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } -func (*ConfigMapNodeConfigSource) ProtoMessage() {} -func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{23} +func (m *CSIPersistentVolumeSource) Reset() { *m = CSIPersistentVolumeSource{} } +func (*CSIPersistentVolumeSource) ProtoMessage() {} +func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{8} +} +func (m *CSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIPersistentVolumeSource.Merge(m, src) +} +func (m *CSIPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CSIPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CSIPersistentVolumeSource.DiscardUnknown(m) } -func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } -func (*ConfigMapProjection) ProtoMessage() {} -func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +var xxx_messageInfo_CSIPersistentVolumeSource proto.InternalMessageInfo -func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } -func (*ConfigMapVolumeSource) ProtoMessage() {} -func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (m *CSIVolumeSource) Reset() { *m = CSIVolumeSource{} } +func (*CSIVolumeSource) ProtoMessage() {} +func (*CSIVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{9} +} +func (m *CSIVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIVolumeSource.Merge(m, src) +} +func (m *CSIVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CSIVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CSIVolumeSource.DiscardUnknown(m) +} -func (m *Container) Reset() { *m = Container{} } -func (*Container) ProtoMessage() {} -func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +var xxx_messageInfo_CSIVolumeSource proto.InternalMessageInfo -func (m *ContainerImage) Reset() { *m = ContainerImage{} } -func (*ContainerImage) ProtoMessage() {} -func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (m *Capabilities) Reset() { *m = Capabilities{} } +func (*Capabilities) ProtoMessage() {} +func (*Capabilities) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{10} +} +func (m *Capabilities) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Capabilities) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Capabilities) XXX_Merge(src proto.Message) { + xxx_messageInfo_Capabilities.Merge(m, src) +} +func (m *Capabilities) XXX_Size() int { + return m.Size() +} +func (m *Capabilities) XXX_DiscardUnknown() { + xxx_messageInfo_Capabilities.DiscardUnknown(m) +} -func (m *ContainerPort) Reset() { *m = ContainerPort{} } -func (*ContainerPort) ProtoMessage() {} -func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +var xxx_messageInfo_Capabilities proto.InternalMessageInfo -func (m *ContainerState) Reset() { *m = ContainerState{} } -func (*ContainerState) ProtoMessage() {} -func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} } +func (*CephFSPersistentVolumeSource) ProtoMessage() {} +func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{11} +} +func (m *CephFSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CephFSPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CephFSPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CephFSPersistentVolumeSource.Merge(m, src) +} +func (m *CephFSPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CephFSPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CephFSPersistentVolumeSource.DiscardUnknown(m) +} -func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } -func (*ContainerStateRunning) ProtoMessage() {} -func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +var xxx_messageInfo_CephFSPersistentVolumeSource proto.InternalMessageInfo -func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } -func (*ContainerStateTerminated) ProtoMessage() {} -func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{31} +func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } +func (*CephFSVolumeSource) ProtoMessage() {} +func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{12} +} +func (m *CephFSVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CephFSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CephFSVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CephFSVolumeSource.Merge(m, src) +} +func (m *CephFSVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CephFSVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CephFSVolumeSource.DiscardUnknown(m) } -func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } -func (*ContainerStateWaiting) ProtoMessage() {} -func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +var xxx_messageInfo_CephFSVolumeSource proto.InternalMessageInfo -func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } -func (*ContainerStatus) ProtoMessage() {} -func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (m *CinderPersistentVolumeSource) Reset() { *m = CinderPersistentVolumeSource{} } +func (*CinderPersistentVolumeSource) ProtoMessage() {} +func (*CinderPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{13} +} +func (m *CinderPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CinderPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CinderPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CinderPersistentVolumeSource.Merge(m, src) +} +func (m *CinderPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CinderPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CinderPersistentVolumeSource.DiscardUnknown(m) +} -func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } -func (*DaemonEndpoint) ProtoMessage() {} -func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +var xxx_messageInfo_CinderPersistentVolumeSource proto.InternalMessageInfo -func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } -func (*DownwardAPIProjection) ProtoMessage() {} -func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } +func (*CinderVolumeSource) ProtoMessage() {} +func (*CinderVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{14} +} +func (m *CinderVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CinderVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CinderVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CinderVolumeSource.Merge(m, src) +} +func (m *CinderVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CinderVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CinderVolumeSource.DiscardUnknown(m) +} -func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } -func (*DownwardAPIVolumeFile) ProtoMessage() {} -func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +var xxx_messageInfo_CinderVolumeSource proto.InternalMessageInfo -func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } -func (*DownwardAPIVolumeSource) ProtoMessage() {} -func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{37} +func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } +func (*ClientIPConfig) ProtoMessage() {} +func (*ClientIPConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{15} +} +func (m *ClientIPConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClientIPConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClientIPConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientIPConfig.Merge(m, src) +} +func (m *ClientIPConfig) XXX_Size() int { + return m.Size() +} +func (m *ClientIPConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ClientIPConfig.DiscardUnknown(m) } -func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } -func (*EmptyDirVolumeSource) ProtoMessage() {} -func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +var xxx_messageInfo_ClientIPConfig proto.InternalMessageInfo -func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } -func (*EndpointAddress) ProtoMessage() {} -func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } +func (*ComponentCondition) ProtoMessage() {} +func (*ComponentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{16} +} +func (m *ComponentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ComponentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ComponentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ComponentCondition.Merge(m, src) +} +func (m *ComponentCondition) XXX_Size() int { + return m.Size() +} +func (m *ComponentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ComponentCondition.DiscardUnknown(m) +} -func (m *EndpointPort) Reset() { *m = EndpointPort{} } -func (*EndpointPort) ProtoMessage() {} -func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +var xxx_messageInfo_ComponentCondition proto.InternalMessageInfo -func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } -func (*EndpointSubset) ProtoMessage() {} -func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } +func (*ComponentStatus) ProtoMessage() {} +func (*ComponentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{17} +} +func (m *ComponentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ComponentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ComponentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ComponentStatus.Merge(m, src) +} +func (m *ComponentStatus) XXX_Size() int { + return m.Size() +} +func (m *ComponentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ComponentStatus.DiscardUnknown(m) +} -func (m *Endpoints) Reset() { *m = Endpoints{} } -func (*Endpoints) ProtoMessage() {} -func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +var xxx_messageInfo_ComponentStatus proto.InternalMessageInfo -func (m *EndpointsList) Reset() { *m = EndpointsList{} } -func (*EndpointsList) ProtoMessage() {} -func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } +func (*ComponentStatusList) ProtoMessage() {} +func (*ComponentStatusList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{18} +} +func (m *ComponentStatusList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ComponentStatusList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ComponentStatusList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ComponentStatusList.Merge(m, src) +} +func (m *ComponentStatusList) XXX_Size() int { + return m.Size() +} +func (m *ComponentStatusList) XXX_DiscardUnknown() { + xxx_messageInfo_ComponentStatusList.DiscardUnknown(m) +} -func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } -func (*EnvFromSource) ProtoMessage() {} -func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +var xxx_messageInfo_ComponentStatusList proto.InternalMessageInfo -func (m *EnvVar) Reset() { *m = EnvVar{} } -func (*EnvVar) ProtoMessage() {} -func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } +func (m *ConfigMap) Reset() { *m = ConfigMap{} } +func (*ConfigMap) ProtoMessage() {} +func (*ConfigMap) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{19} +} +func (m *ConfigMap) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMap) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMap.Merge(m, src) +} +func (m *ConfigMap) XXX_Size() int { + return m.Size() +} +func (m *ConfigMap) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMap.DiscardUnknown(m) +} -func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } -func (*EnvVarSource) ProtoMessage() {} -func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +var xxx_messageInfo_ConfigMap proto.InternalMessageInfo -func (m *Event) Reset() { *m = Event{} } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } +func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } +func (*ConfigMapEnvSource) ProtoMessage() {} +func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{20} +} +func (m *ConfigMapEnvSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapEnvSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapEnvSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapEnvSource.Merge(m, src) +} +func (m *ConfigMapEnvSource) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapEnvSource) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapEnvSource.DiscardUnknown(m) +} -func (m *EventList) Reset() { *m = EventList{} } -func (*EventList) ProtoMessage() {} -func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } +var xxx_messageInfo_ConfigMapEnvSource proto.InternalMessageInfo -func (m *EventSeries) Reset() { *m = EventSeries{} } -func (*EventSeries) ProtoMessage() {} -func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } +func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } +func (*ConfigMapKeySelector) ProtoMessage() {} +func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{21} +} +func (m *ConfigMapKeySelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapKeySelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapKeySelector.Merge(m, src) +} +func (m *ConfigMapKeySelector) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapKeySelector) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapKeySelector.DiscardUnknown(m) +} -func (m *EventSource) Reset() { *m = EventSource{} } -func (*EventSource) ProtoMessage() {} -func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } +var xxx_messageInfo_ConfigMapKeySelector proto.InternalMessageInfo -func (m *ExecAction) Reset() { *m = ExecAction{} } -func (*ExecAction) ProtoMessage() {} -func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } +func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } +func (*ConfigMapList) ProtoMessage() {} +func (*ConfigMapList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{22} +} +func (m *ConfigMapList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapList.Merge(m, src) +} +func (m *ConfigMapList) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapList) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapList.DiscardUnknown(m) +} -func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } -func (*FCVolumeSource) ProtoMessage() {} -func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +var xxx_messageInfo_ConfigMapList proto.InternalMessageInfo -func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } -func (*FlexPersistentVolumeSource) ProtoMessage() {} -func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{53} +func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } +func (*ConfigMapNodeConfigSource) ProtoMessage() {} +func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{23} +} +func (m *ConfigMapNodeConfigSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapNodeConfigSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapNodeConfigSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapNodeConfigSource.Merge(m, src) +} +func (m *ConfigMapNodeConfigSource) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapNodeConfigSource) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapNodeConfigSource.DiscardUnknown(m) } -func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } -func (*FlexVolumeSource) ProtoMessage() {} -func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } - -func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } -func (*FlockerVolumeSource) ProtoMessage() {} -func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } +var xxx_messageInfo_ConfigMapNodeConfigSource proto.InternalMessageInfo -func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } -func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} -func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{56} +func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } +func (*ConfigMapProjection) ProtoMessage() {} +func (*ConfigMapProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{24} +} +func (m *ConfigMapProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapProjection.Merge(m, src) +} +func (m *ConfigMapProjection) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapProjection) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapProjection.DiscardUnknown(m) } -func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } -func (*GitRepoVolumeSource) ProtoMessage() {} -func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } +var xxx_messageInfo_ConfigMapProjection proto.InternalMessageInfo -func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } -func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} -func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{58} +func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } +func (*ConfigMapVolumeSource) ProtoMessage() {} +func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{25} +} +func (m *ConfigMapVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapVolumeSource.Merge(m, src) +} +func (m *ConfigMapVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapVolumeSource.DiscardUnknown(m) } -func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } -func (*GlusterfsVolumeSource) ProtoMessage() {} -func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } - -func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } -func (*HTTPGetAction) ProtoMessage() {} -func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } +var xxx_messageInfo_ConfigMapVolumeSource proto.InternalMessageInfo -func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } -func (*HTTPHeader) ProtoMessage() {} -func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } +func (m *Container) Reset() { *m = Container{} } +func (*Container) ProtoMessage() {} +func (*Container) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{26} +} +func (m *Container) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Container) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Container) XXX_Merge(src proto.Message) { + xxx_messageInfo_Container.Merge(m, src) +} +func (m *Container) XXX_Size() int { + return m.Size() +} +func (m *Container) XXX_DiscardUnknown() { + xxx_messageInfo_Container.DiscardUnknown(m) +} -func (m *Handler) Reset() { *m = Handler{} } -func (*Handler) ProtoMessage() {} -func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } +var xxx_messageInfo_Container proto.InternalMessageInfo -func (m *HostAlias) Reset() { *m = HostAlias{} } -func (*HostAlias) ProtoMessage() {} -func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } +func (m *ContainerImage) Reset() { *m = ContainerImage{} } +func (*ContainerImage) ProtoMessage() {} +func (*ContainerImage) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{27} +} +func (m *ContainerImage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerImage) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerImage.Merge(m, src) +} +func (m *ContainerImage) XXX_Size() int { + return m.Size() +} +func (m *ContainerImage) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerImage.DiscardUnknown(m) +} -func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } -func (*HostPathVolumeSource) ProtoMessage() {} -func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } +var xxx_messageInfo_ContainerImage proto.InternalMessageInfo -func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } -func (*ISCSIPersistentVolumeSource) ProtoMessage() {} -func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{65} +func (m *ContainerPort) Reset() { *m = ContainerPort{} } +func (*ContainerPort) ProtoMessage() {} +func (*ContainerPort) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{28} +} +func (m *ContainerPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerPort.Merge(m, src) +} +func (m *ContainerPort) XXX_Size() int { + return m.Size() +} +func (m *ContainerPort) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerPort.DiscardUnknown(m) } -func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } -func (*ISCSIVolumeSource) ProtoMessage() {} -func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } - -func (m *KeyToPath) Reset() { *m = KeyToPath{} } -func (*KeyToPath) ProtoMessage() {} -func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } +var xxx_messageInfo_ContainerPort proto.InternalMessageInfo -func (m *Lifecycle) Reset() { *m = Lifecycle{} } -func (*Lifecycle) ProtoMessage() {} -func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } +func (m *ContainerState) Reset() { *m = ContainerState{} } +func (*ContainerState) ProtoMessage() {} +func (*ContainerState) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{29} +} +func (m *ContainerState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerState.Merge(m, src) +} +func (m *ContainerState) XXX_Size() int { + return m.Size() +} +func (m *ContainerState) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerState.DiscardUnknown(m) +} -func (m *LimitRange) Reset() { *m = LimitRange{} } -func (*LimitRange) ProtoMessage() {} -func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } +var xxx_messageInfo_ContainerState proto.InternalMessageInfo -func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } -func (*LimitRangeItem) ProtoMessage() {} -func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } +func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } +func (*ContainerStateRunning) ProtoMessage() {} +func (*ContainerStateRunning) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{30} +} +func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStateRunning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerStateRunning) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStateRunning.Merge(m, src) +} +func (m *ContainerStateRunning) XXX_Size() int { + return m.Size() +} +func (m *ContainerStateRunning) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStateRunning.DiscardUnknown(m) +} -func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } -func (*LimitRangeList) ProtoMessage() {} -func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } +var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo -func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } -func (*LimitRangeSpec) ProtoMessage() {} -func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } +func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } +func (*ContainerStateTerminated) ProtoMessage() {} +func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{31} +} +func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStateTerminated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerStateTerminated) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStateTerminated.Merge(m, src) +} +func (m *ContainerStateTerminated) XXX_Size() int { + return m.Size() +} +func (m *ContainerStateTerminated) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStateTerminated.DiscardUnknown(m) +} -func (m *List) Reset() { *m = List{} } -func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } +var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo -func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } -func (*LoadBalancerIngress) ProtoMessage() {} -func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } +func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } +func (*ContainerStateWaiting) ProtoMessage() {} +func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{32} +} +func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStateWaiting) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerStateWaiting) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStateWaiting.Merge(m, src) +} +func (m *ContainerStateWaiting) XXX_Size() int { + return m.Size() +} +func (m *ContainerStateWaiting) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStateWaiting.DiscardUnknown(m) +} -func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } -func (*LoadBalancerStatus) ProtoMessage() {} -func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } +var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo -func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } -func (*LocalObjectReference) ProtoMessage() {} -func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } +func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } +func (*ContainerStatus) ProtoMessage() {} +func (*ContainerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{33} +} +func (m *ContainerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStatus.Merge(m, src) +} +func (m *ContainerStatus) XXX_Size() int { + return m.Size() +} +func (m *ContainerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStatus.DiscardUnknown(m) +} -func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } -func (*LocalVolumeSource) ProtoMessage() {} -func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } +var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo -func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } -func (*NFSVolumeSource) ProtoMessage() {} -func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } +func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } +func (*DaemonEndpoint) ProtoMessage() {} +func (*DaemonEndpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{34} +} +func (m *DaemonEndpoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonEndpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonEndpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonEndpoint.Merge(m, src) +} +func (m *DaemonEndpoint) XXX_Size() int { + return m.Size() +} +func (m *DaemonEndpoint) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonEndpoint.DiscardUnknown(m) +} -func (m *Namespace) Reset() { *m = Namespace{} } -func (*Namespace) ProtoMessage() {} -func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } +var xxx_messageInfo_DaemonEndpoint proto.InternalMessageInfo -func (m *NamespaceList) Reset() { *m = NamespaceList{} } -func (*NamespaceList) ProtoMessage() {} -func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } +func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } +func (*DownwardAPIProjection) ProtoMessage() {} +func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{35} +} +func (m *DownwardAPIProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DownwardAPIProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DownwardAPIProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_DownwardAPIProjection.Merge(m, src) +} +func (m *DownwardAPIProjection) XXX_Size() int { + return m.Size() +} +func (m *DownwardAPIProjection) XXX_DiscardUnknown() { + xxx_messageInfo_DownwardAPIProjection.DiscardUnknown(m) +} -func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } -func (*NamespaceSpec) ProtoMessage() {} -func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } +var xxx_messageInfo_DownwardAPIProjection proto.InternalMessageInfo -func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } -func (*NamespaceStatus) ProtoMessage() {} -func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } +func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } +func (*DownwardAPIVolumeFile) ProtoMessage() {} +func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{36} +} +func (m *DownwardAPIVolumeFile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DownwardAPIVolumeFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DownwardAPIVolumeFile) XXX_Merge(src proto.Message) { + xxx_messageInfo_DownwardAPIVolumeFile.Merge(m, src) +} +func (m *DownwardAPIVolumeFile) XXX_Size() int { + return m.Size() +} +func (m *DownwardAPIVolumeFile) XXX_DiscardUnknown() { + xxx_messageInfo_DownwardAPIVolumeFile.DiscardUnknown(m) +} -func (m *Node) Reset() { *m = Node{} } -func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } +var xxx_messageInfo_DownwardAPIVolumeFile proto.InternalMessageInfo -func (m *NodeAddress) Reset() { *m = NodeAddress{} } -func (*NodeAddress) ProtoMessage() {} -func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } +func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } +func (*DownwardAPIVolumeSource) ProtoMessage() {} +func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{37} +} +func (m *DownwardAPIVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DownwardAPIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DownwardAPIVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_DownwardAPIVolumeSource.Merge(m, src) +} +func (m *DownwardAPIVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *DownwardAPIVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_DownwardAPIVolumeSource.DiscardUnknown(m) +} -func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } -func (*NodeAffinity) ProtoMessage() {} -func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } +var xxx_messageInfo_DownwardAPIVolumeSource proto.InternalMessageInfo -func (m *NodeCondition) Reset() { *m = NodeCondition{} } -func (*NodeCondition) ProtoMessage() {} -func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } +func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } +func (*EmptyDirVolumeSource) ProtoMessage() {} +func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{38} +} +func (m *EmptyDirVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EmptyDirVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EmptyDirVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_EmptyDirVolumeSource.Merge(m, src) +} +func (m *EmptyDirVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *EmptyDirVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_EmptyDirVolumeSource.DiscardUnknown(m) +} -func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } -func (*NodeConfigSource) ProtoMessage() {} -func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } +var xxx_messageInfo_EmptyDirVolumeSource proto.InternalMessageInfo -func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } -func (*NodeConfigStatus) ProtoMessage() {} -func (*NodeConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } +func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } +func (*EndpointAddress) ProtoMessage() {} +func (*EndpointAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{39} +} +func (m *EndpointAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointAddress.Merge(m, src) +} +func (m *EndpointAddress) XXX_Size() int { + return m.Size() +} +func (m *EndpointAddress) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointAddress.DiscardUnknown(m) +} -func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } -func (*NodeDaemonEndpoints) ProtoMessage() {} -func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } +var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo -func (m *NodeList) Reset() { *m = NodeList{} } -func (*NodeList) ProtoMessage() {} -func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } +func (m *EndpointPort) Reset() { *m = EndpointPort{} } +func (*EndpointPort) ProtoMessage() {} +func (*EndpointPort) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{40} +} +func (m *EndpointPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointPort.Merge(m, src) +} +func (m *EndpointPort) XXX_Size() int { + return m.Size() +} +func (m *EndpointPort) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointPort.DiscardUnknown(m) +} -func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } -func (*NodeProxyOptions) ProtoMessage() {} -func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } +var xxx_messageInfo_EndpointPort proto.InternalMessageInfo -func (m *NodeResources) Reset() { *m = NodeResources{} } -func (*NodeResources) ProtoMessage() {} -func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{92} } +func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } +func (*EndpointSubset) ProtoMessage() {} +func (*EndpointSubset) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{41} +} +func (m *EndpointSubset) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointSubset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointSubset) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointSubset.Merge(m, src) +} +func (m *EndpointSubset) XXX_Size() int { + return m.Size() +} +func (m *EndpointSubset) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointSubset.DiscardUnknown(m) +} -func (m *NodeSelector) Reset() { *m = NodeSelector{} } -func (*NodeSelector) ProtoMessage() {} -func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } +var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo -func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } -func (*NodeSelectorRequirement) ProtoMessage() {} -func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{94} +func (m *Endpoints) Reset() { *m = Endpoints{} } +func (*Endpoints) ProtoMessage() {} +func (*Endpoints) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{42} +} +func (m *Endpoints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Endpoints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Endpoints) XXX_Merge(src proto.Message) { + xxx_messageInfo_Endpoints.Merge(m, src) +} +func (m *Endpoints) XXX_Size() int { + return m.Size() +} +func (m *Endpoints) XXX_DiscardUnknown() { + xxx_messageInfo_Endpoints.DiscardUnknown(m) } -func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } -func (*NodeSelectorTerm) ProtoMessage() {} -func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } - -func (m *NodeSpec) Reset() { *m = NodeSpec{} } -func (*NodeSpec) ProtoMessage() {} -func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } +var xxx_messageInfo_Endpoints proto.InternalMessageInfo -func (m *NodeStatus) Reset() { *m = NodeStatus{} } -func (*NodeStatus) ProtoMessage() {} -func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } +func (m *EndpointsList) Reset() { *m = EndpointsList{} } +func (*EndpointsList) ProtoMessage() {} +func (*EndpointsList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{43} +} +func (m *EndpointsList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointsList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointsList.Merge(m, src) +} +func (m *EndpointsList) XXX_Size() int { + return m.Size() +} +func (m *EndpointsList) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointsList.DiscardUnknown(m) +} -func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } -func (*NodeSystemInfo) ProtoMessage() {} -func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } +var xxx_messageInfo_EndpointsList proto.InternalMessageInfo -func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } -func (*ObjectFieldSelector) ProtoMessage() {} -func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } +func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } +func (*EnvFromSource) ProtoMessage() {} +func (*EnvFromSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{44} +} +func (m *EnvFromSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvFromSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EnvFromSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvFromSource.Merge(m, src) +} +func (m *EnvFromSource) XXX_Size() int { + return m.Size() +} +func (m *EnvFromSource) XXX_DiscardUnknown() { + xxx_messageInfo_EnvFromSource.DiscardUnknown(m) +} -func (m *ObjectReference) Reset() { *m = ObjectReference{} } -func (*ObjectReference) ProtoMessage() {} -func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } +var xxx_messageInfo_EnvFromSource proto.InternalMessageInfo -func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } -func (*PersistentVolume) ProtoMessage() {} -func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{101} } +func (m *EnvVar) Reset() { *m = EnvVar{} } +func (*EnvVar) ProtoMessage() {} +func (*EnvVar) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{45} +} +func (m *EnvVar) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvVar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EnvVar) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvVar.Merge(m, src) +} +func (m *EnvVar) XXX_Size() int { + return m.Size() +} +func (m *EnvVar) XXX_DiscardUnknown() { + xxx_messageInfo_EnvVar.DiscardUnknown(m) +} -func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } -func (*PersistentVolumeClaim) ProtoMessage() {} -func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{102} } +var xxx_messageInfo_EnvVar proto.InternalMessageInfo -func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } -func (*PersistentVolumeClaimCondition) ProtoMessage() {} -func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{103} +func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } +func (*EnvVarSource) ProtoMessage() {} +func (*EnvVarSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{46} } - -func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } -func (*PersistentVolumeClaimList) ProtoMessage() {} -func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{104} +func (m *EnvVarSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } -func (*PersistentVolumeClaimSpec) ProtoMessage() {} -func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{105} +func (m *EnvVarSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - -func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } -func (*PersistentVolumeClaimStatus) ProtoMessage() {} -func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{106} +func (m *EnvVarSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvVarSource.Merge(m, src) } - -func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } -func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} -func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{107} +func (m *EnvVarSource) XXX_Size() int { + return m.Size() +} +func (m *EnvVarSource) XXX_DiscardUnknown() { + xxx_messageInfo_EnvVarSource.DiscardUnknown(m) } -func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } -func (*PersistentVolumeList) ProtoMessage() {} -func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } +var xxx_messageInfo_EnvVarSource proto.InternalMessageInfo -func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } -func (*PersistentVolumeSource) ProtoMessage() {} -func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{109} +func (m *EphemeralContainer) Reset() { *m = EphemeralContainer{} } +func (*EphemeralContainer) ProtoMessage() {} +func (*EphemeralContainer) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{47} +} +func (m *EphemeralContainer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EphemeralContainer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EphemeralContainer) XXX_Merge(src proto.Message) { + xxx_messageInfo_EphemeralContainer.Merge(m, src) +} +func (m *EphemeralContainer) XXX_Size() int { + return m.Size() +} +func (m *EphemeralContainer) XXX_DiscardUnknown() { + xxx_messageInfo_EphemeralContainer.DiscardUnknown(m) } -func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } -func (*PersistentVolumeSpec) ProtoMessage() {} -func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{110} } +var xxx_messageInfo_EphemeralContainer proto.InternalMessageInfo -func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } -func (*PersistentVolumeStatus) ProtoMessage() {} -func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{111} +func (m *EphemeralContainerCommon) Reset() { *m = EphemeralContainerCommon{} } +func (*EphemeralContainerCommon) ProtoMessage() {} +func (*EphemeralContainerCommon) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{48} } - -func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } -func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} -func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{112} +func (m *EphemeralContainerCommon) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EphemeralContainerCommon) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EphemeralContainerCommon) XXX_Merge(src proto.Message) { + xxx_messageInfo_EphemeralContainerCommon.Merge(m, src) +} +func (m *EphemeralContainerCommon) XXX_Size() int { + return m.Size() +} +func (m *EphemeralContainerCommon) XXX_DiscardUnknown() { + xxx_messageInfo_EphemeralContainerCommon.DiscardUnknown(m) } -func (m *Pod) Reset() { *m = Pod{} } -func (*Pod) ProtoMessage() {} -func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } +var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo -func (m *PodAffinity) Reset() { *m = PodAffinity{} } -func (*PodAffinity) ProtoMessage() {} -func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } +func (m *EphemeralContainers) Reset() { *m = EphemeralContainers{} } +func (*EphemeralContainers) ProtoMessage() {} +func (*EphemeralContainers) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{49} +} +func (m *EphemeralContainers) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EphemeralContainers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EphemeralContainers) XXX_Merge(src proto.Message) { + xxx_messageInfo_EphemeralContainers.Merge(m, src) +} +func (m *EphemeralContainers) XXX_Size() int { + return m.Size() +} +func (m *EphemeralContainers) XXX_DiscardUnknown() { + xxx_messageInfo_EphemeralContainers.DiscardUnknown(m) +} -func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } -func (*PodAffinityTerm) ProtoMessage() {} -func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } +var xxx_messageInfo_EphemeralContainers proto.InternalMessageInfo -func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } -func (*PodAntiAffinity) ProtoMessage() {} -func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{50} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} -func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } -func (*PodAttachOptions) ProtoMessage() {} -func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } +var xxx_messageInfo_Event proto.InternalMessageInfo -func (m *PodCondition) Reset() { *m = PodCondition{} } -func (*PodCondition) ProtoMessage() {} -func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{51} +} +func (m *EventList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventList.Merge(m, src) +} +func (m *EventList) XXX_Size() int { + return m.Size() +} +func (m *EventList) XXX_DiscardUnknown() { + xxx_messageInfo_EventList.DiscardUnknown(m) +} -func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } -func (*PodDNSConfig) ProtoMessage() {} -func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } +var xxx_messageInfo_EventList proto.InternalMessageInfo -func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } -func (*PodDNSConfigOption) ProtoMessage() {} -func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } +func (m *EventSeries) Reset() { *m = EventSeries{} } +func (*EventSeries) ProtoMessage() {} +func (*EventSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{52} +} +func (m *EventSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventSeries.Merge(m, src) +} +func (m *EventSeries) XXX_Size() int { + return m.Size() +} +func (m *EventSeries) XXX_DiscardUnknown() { + xxx_messageInfo_EventSeries.DiscardUnknown(m) +} -func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } -func (*PodExecOptions) ProtoMessage() {} -func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } +var xxx_messageInfo_EventSeries proto.InternalMessageInfo -func (m *PodList) Reset() { *m = PodList{} } -func (*PodList) ProtoMessage() {} -func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } +func (m *EventSource) Reset() { *m = EventSource{} } +func (*EventSource) ProtoMessage() {} +func (*EventSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{53} +} +func (m *EventSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventSource.Merge(m, src) +} +func (m *EventSource) XXX_Size() int { + return m.Size() +} +func (m *EventSource) XXX_DiscardUnknown() { + xxx_messageInfo_EventSource.DiscardUnknown(m) +} -func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } -func (*PodLogOptions) ProtoMessage() {} -func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } +var xxx_messageInfo_EventSource proto.InternalMessageInfo -func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } -func (*PodPortForwardOptions) ProtoMessage() {} -func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } +func (m *ExecAction) Reset() { *m = ExecAction{} } +func (*ExecAction) ProtoMessage() {} +func (*ExecAction) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{54} +} +func (m *ExecAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExecAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExecAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecAction.Merge(m, src) +} +func (m *ExecAction) XXX_Size() int { + return m.Size() +} +func (m *ExecAction) XXX_DiscardUnknown() { + xxx_messageInfo_ExecAction.DiscardUnknown(m) +} -func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } -func (*PodProxyOptions) ProtoMessage() {} -func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } +var xxx_messageInfo_ExecAction proto.InternalMessageInfo -func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } -func (*PodReadinessGate) ProtoMessage() {} -func (*PodReadinessGate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } +func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } +func (*FCVolumeSource) ProtoMessage() {} +func (*FCVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{55} +} +func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FCVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FCVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_FCVolumeSource.Merge(m, src) +} +func (m *FCVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *FCVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_FCVolumeSource.DiscardUnknown(m) +} -func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } -func (*PodSecurityContext) ProtoMessage() {} -func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } +var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo -func (m *PodSignature) Reset() { *m = PodSignature{} } -func (*PodSignature) ProtoMessage() {} -func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } +func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } +func (*FlexPersistentVolumeSource) ProtoMessage() {} +func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{56} +} +func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlexPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlexPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlexPersistentVolumeSource.Merge(m, src) +} +func (m *FlexPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *FlexPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_FlexPersistentVolumeSource.DiscardUnknown(m) +} -func (m *PodSpec) Reset() { *m = PodSpec{} } -func (*PodSpec) ProtoMessage() {} -func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } +var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo -func (m *PodStatus) Reset() { *m = PodStatus{} } -func (*PodStatus) ProtoMessage() {} -func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } +func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } +func (*FlexVolumeSource) ProtoMessage() {} +func (*FlexVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{57} +} +func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlexVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlexVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlexVolumeSource.Merge(m, src) +} +func (m *FlexVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *FlexVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_FlexVolumeSource.DiscardUnknown(m) +} -func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } -func (*PodStatusResult) ProtoMessage() {} -func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } +var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo -func (m *PodTemplate) Reset() { *m = PodTemplate{} } -func (*PodTemplate) ProtoMessage() {} -func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } +func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } +func (*FlockerVolumeSource) ProtoMessage() {} +func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{58} +} +func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlockerVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlockerVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlockerVolumeSource.Merge(m, src) +} +func (m *FlockerVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *FlockerVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_FlockerVolumeSource.DiscardUnknown(m) +} -func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } -func (*PodTemplateList) ProtoMessage() {} -func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } +var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo -func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } -func (*PodTemplateSpec) ProtoMessage() {} -func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } +func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } +func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} +func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{59} +} +func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GCEPersistentDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GCEPersistentDiskVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GCEPersistentDiskVolumeSource.Merge(m, src) +} +func (m *GCEPersistentDiskVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *GCEPersistentDiskVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_GCEPersistentDiskVolumeSource.DiscardUnknown(m) +} -func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } -func (*PortworxVolumeSource) ProtoMessage() {} -func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } +var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo -func (m *Preconditions) Reset() { *m = Preconditions{} } -func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } +func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } +func (*GitRepoVolumeSource) ProtoMessage() {} +func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{60} +} +func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitRepoVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitRepoVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitRepoVolumeSource.Merge(m, src) +} +func (m *GitRepoVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *GitRepoVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_GitRepoVolumeSource.DiscardUnknown(m) +} -func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } -func (*PreferAvoidPodsEntry) ProtoMessage() {} -func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } +var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo -func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } -func (*PreferredSchedulingTerm) ProtoMessage() {} -func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{138} +func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } +func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} +func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{61} +} +func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GlusterfsPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GlusterfsPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GlusterfsPersistentVolumeSource.Merge(m, src) +} +func (m *GlusterfsPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *GlusterfsPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_GlusterfsPersistentVolumeSource.DiscardUnknown(m) } -func (m *Probe) Reset() { *m = Probe{} } -func (*Probe) ProtoMessage() {} -func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } +var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo -func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } -func (*ProjectedVolumeSource) ProtoMessage() {} -func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } +func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } +func (*GlusterfsVolumeSource) ProtoMessage() {} +func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{62} +} +func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GlusterfsVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GlusterfsVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GlusterfsVolumeSource.Merge(m, src) +} +func (m *GlusterfsVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *GlusterfsVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_GlusterfsVolumeSource.DiscardUnknown(m) +} -func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } -func (*QuobyteVolumeSource) ProtoMessage() {} -func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } +var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo -func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } -func (*RBDPersistentVolumeSource) ProtoMessage() {} -func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{142} +func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } +func (*HTTPGetAction) ProtoMessage() {} +func (*HTTPGetAction) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{63} +} +func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPGetAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPGetAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPGetAction.Merge(m, src) +} +func (m *HTTPGetAction) XXX_Size() int { + return m.Size() +} +func (m *HTTPGetAction) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPGetAction.DiscardUnknown(m) } -func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } -func (*RBDVolumeSource) ProtoMessage() {} -func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } +var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo -func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } -func (*RangeAllocation) ProtoMessage() {} -func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{144} } +func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } +func (*HTTPHeader) ProtoMessage() {} +func (*HTTPHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{64} +} +func (m *HTTPHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPHeader.Merge(m, src) +} +func (m *HTTPHeader) XXX_Size() int { + return m.Size() +} +func (m *HTTPHeader) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPHeader.DiscardUnknown(m) +} -func (m *ReplicationController) Reset() { *m = ReplicationController{} } -func (*ReplicationController) ProtoMessage() {} -func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{145} } +var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo -func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } -func (*ReplicationControllerCondition) ProtoMessage() {} -func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{146} +func (m *Handler) Reset() { *m = Handler{} } +func (*Handler) ProtoMessage() {} +func (*Handler) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{65} +} +func (m *Handler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Handler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Handler) XXX_Merge(src proto.Message) { + xxx_messageInfo_Handler.Merge(m, src) +} +func (m *Handler) XXX_Size() int { + return m.Size() +} +func (m *Handler) XXX_DiscardUnknown() { + xxx_messageInfo_Handler.DiscardUnknown(m) } -func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } -func (*ReplicationControllerList) ProtoMessage() {} -func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{147} +var xxx_messageInfo_Handler proto.InternalMessageInfo + +func (m *HostAlias) Reset() { *m = HostAlias{} } +func (*HostAlias) ProtoMessage() {} +func (*HostAlias) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{66} +} +func (m *HostAlias) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostAlias) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostAlias) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostAlias.Merge(m, src) +} +func (m *HostAlias) XXX_Size() int { + return m.Size() +} +func (m *HostAlias) XXX_DiscardUnknown() { + xxx_messageInfo_HostAlias.DiscardUnknown(m) } -func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } -func (*ReplicationControllerSpec) ProtoMessage() {} -func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{148} +var xxx_messageInfo_HostAlias proto.InternalMessageInfo + +func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } +func (*HostPathVolumeSource) ProtoMessage() {} +func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{67} +} +func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostPathVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostPathVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostPathVolumeSource.Merge(m, src) +} +func (m *HostPathVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *HostPathVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_HostPathVolumeSource.DiscardUnknown(m) } -func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } -func (*ReplicationControllerStatus) ProtoMessage() {} -func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{149} +var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo + +func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } +func (*ISCSIPersistentVolumeSource) ProtoMessage() {} +func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{68} +} +func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ISCSIPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ISCSIPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ISCSIPersistentVolumeSource.Merge(m, src) +} +func (m *ISCSIPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ISCSIPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ISCSIPersistentVolumeSource.DiscardUnknown(m) } -func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } -func (*ResourceFieldSelector) ProtoMessage() {} -func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } +var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo -func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } -func (*ResourceQuota) ProtoMessage() {} -func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } +func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } +func (*ISCSIVolumeSource) ProtoMessage() {} +func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{69} +} +func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ISCSIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ISCSIVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ISCSIVolumeSource.Merge(m, src) +} +func (m *ISCSIVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ISCSIVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ISCSIVolumeSource.DiscardUnknown(m) +} -func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } -func (*ResourceQuotaList) ProtoMessage() {} -func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } +var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo -func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } -func (*ResourceQuotaSpec) ProtoMessage() {} -func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } +func (m *KeyToPath) Reset() { *m = KeyToPath{} } +func (*KeyToPath) ProtoMessage() {} +func (*KeyToPath) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{70} +} +func (m *KeyToPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KeyToPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KeyToPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyToPath.Merge(m, src) +} +func (m *KeyToPath) XXX_Size() int { + return m.Size() +} +func (m *KeyToPath) XXX_DiscardUnknown() { + xxx_messageInfo_KeyToPath.DiscardUnknown(m) +} -func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } -func (*ResourceQuotaStatus) ProtoMessage() {} -func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } +var xxx_messageInfo_KeyToPath proto.InternalMessageInfo -func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } -func (*ResourceRequirements) ProtoMessage() {} -func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } +func (m *Lifecycle) Reset() { *m = Lifecycle{} } +func (*Lifecycle) ProtoMessage() {} +func (*Lifecycle) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{71} +} +func (m *Lifecycle) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Lifecycle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Lifecycle) XXX_Merge(src proto.Message) { + xxx_messageInfo_Lifecycle.Merge(m, src) +} +func (m *Lifecycle) XXX_Size() int { + return m.Size() +} +func (m *Lifecycle) XXX_DiscardUnknown() { + xxx_messageInfo_Lifecycle.DiscardUnknown(m) +} -func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } -func (*SELinuxOptions) ProtoMessage() {} -func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } +var xxx_messageInfo_Lifecycle proto.InternalMessageInfo -func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } -func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} -func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{157} +func (m *LimitRange) Reset() { *m = LimitRange{} } +func (*LimitRange) ProtoMessage() {} +func (*LimitRange) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{72} +} +func (m *LimitRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitRange.Merge(m, src) +} +func (m *LimitRange) XXX_Size() int { + return m.Size() +} +func (m *LimitRange) XXX_DiscardUnknown() { + xxx_messageInfo_LimitRange.DiscardUnknown(m) } -func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } -func (*ScaleIOVolumeSource) ProtoMessage() {} -func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } +var xxx_messageInfo_LimitRange proto.InternalMessageInfo + +func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } +func (*LimitRangeItem) ProtoMessage() {} +func (*LimitRangeItem) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{73} +} +func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitRangeItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitRangeItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitRangeItem.Merge(m, src) +} +func (m *LimitRangeItem) XXX_Size() int { + return m.Size() +} +func (m *LimitRangeItem) XXX_DiscardUnknown() { + xxx_messageInfo_LimitRangeItem.DiscardUnknown(m) +} -func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } -func (*ScopeSelector) ProtoMessage() {} -func (*ScopeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } +var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo -func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } -func (*ScopedResourceSelectorRequirement) ProtoMessage() {} -func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{160} +func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } +func (*LimitRangeList) ProtoMessage() {} +func (*LimitRangeList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{74} +} +func (m *LimitRangeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitRangeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } +func (m *LimitRangeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitRangeList.Merge(m, src) +} +func (m *LimitRangeList) XXX_Size() int { + return m.Size() +} +func (m *LimitRangeList) XXX_DiscardUnknown() { + xxx_messageInfo_LimitRangeList.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo -func (m *Secret) Reset() { *m = Secret{} } -func (*Secret) ProtoMessage() {} -func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } +func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } +func (*LimitRangeSpec) ProtoMessage() {} +func (*LimitRangeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{75} +} +func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitRangeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitRangeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitRangeSpec.Merge(m, src) +} +func (m *LimitRangeSpec) XXX_Size() int { + return m.Size() +} +func (m *LimitRangeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_LimitRangeSpec.DiscardUnknown(m) +} -func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } -func (*SecretEnvSource) ProtoMessage() {} -func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } +var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo -func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } -func (*SecretKeySelector) ProtoMessage() {} -func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } +func (m *List) Reset() { *m = List{} } +func (*List) ProtoMessage() {} +func (*List) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{76} +} +func (m *List) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *List) XXX_Merge(src proto.Message) { + xxx_messageInfo_List.Merge(m, src) +} +func (m *List) XXX_Size() int { + return m.Size() +} +func (m *List) XXX_DiscardUnknown() { + xxx_messageInfo_List.DiscardUnknown(m) +} -func (m *SecretList) Reset() { *m = SecretList{} } -func (*SecretList) ProtoMessage() {} -func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } +var xxx_messageInfo_List proto.InternalMessageInfo -func (m *SecretProjection) Reset() { *m = SecretProjection{} } -func (*SecretProjection) ProtoMessage() {} -func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } +func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } +func (*LoadBalancerIngress) ProtoMessage() {} +func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{77} +} +func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LoadBalancerIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LoadBalancerIngress) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalancerIngress.Merge(m, src) +} +func (m *LoadBalancerIngress) XXX_Size() int { + return m.Size() +} +func (m *LoadBalancerIngress) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalancerIngress.DiscardUnknown(m) +} -func (m *SecretReference) Reset() { *m = SecretReference{} } -func (*SecretReference) ProtoMessage() {} -func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } +var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo -func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } -func (*SecretVolumeSource) ProtoMessage() {} -func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } +func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } +func (*LoadBalancerStatus) ProtoMessage() {} +func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{78} +} +func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LoadBalancerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LoadBalancerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalancerStatus.Merge(m, src) +} +func (m *LoadBalancerStatus) XXX_Size() int { + return m.Size() +} +func (m *LoadBalancerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalancerStatus.DiscardUnknown(m) +} -func (m *SecurityContext) Reset() { *m = SecurityContext{} } -func (*SecurityContext) ProtoMessage() {} -func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } +var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo -func (m *SerializedReference) Reset() { *m = SerializedReference{} } -func (*SerializedReference) ProtoMessage() {} -func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } +func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } +func (*LocalObjectReference) ProtoMessage() {} +func (*LocalObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{79} +} +func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalObjectReference.Merge(m, src) +} +func (m *LocalObjectReference) XXX_Size() int { + return m.Size() +} +func (m *LocalObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_LocalObjectReference.DiscardUnknown(m) +} -func (m *Service) Reset() { *m = Service{} } -func (*Service) ProtoMessage() {} -func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } +var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo -func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } -func (*ServiceAccount) ProtoMessage() {} -func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{171} } +func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } +func (*LocalVolumeSource) ProtoMessage() {} +func (*LocalVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{80} +} +func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalVolumeSource.Merge(m, src) +} +func (m *LocalVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *LocalVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_LocalVolumeSource.DiscardUnknown(m) +} -func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } -func (*ServiceAccountList) ProtoMessage() {} -func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } +var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo -func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } -func (*ServiceAccountTokenProjection) ProtoMessage() {} -func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{173} +func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } +func (*NFSVolumeSource) ProtoMessage() {} +func (*NFSVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{81} +} +func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NFSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NFSVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_NFSVolumeSource.Merge(m, src) +} +func (m *NFSVolumeSource) XXX_Size() int { + return m.Size() } +func (m *NFSVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_NFSVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo -func (m *ServiceList) Reset() { *m = ServiceList{} } -func (*ServiceList) ProtoMessage() {} -func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } +func (m *Namespace) Reset() { *m = Namespace{} } +func (*Namespace) ProtoMessage() {} +func (*Namespace) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{82} +} +func (m *Namespace) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Namespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Namespace) XXX_Merge(src proto.Message) { + xxx_messageInfo_Namespace.Merge(m, src) +} +func (m *Namespace) XXX_Size() int { + return m.Size() +} +func (m *Namespace) XXX_DiscardUnknown() { + xxx_messageInfo_Namespace.DiscardUnknown(m) +} -func (m *ServicePort) Reset() { *m = ServicePort{} } -func (*ServicePort) ProtoMessage() {} -func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } +var xxx_messageInfo_Namespace proto.InternalMessageInfo -func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } -func (*ServiceProxyOptions) ProtoMessage() {} -func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } +func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} } +func (*NamespaceCondition) ProtoMessage() {} +func (*NamespaceCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{83} +} +func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamespaceCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceCondition.Merge(m, src) +} +func (m *NamespaceCondition) XXX_Size() int { + return m.Size() +} +func (m *NamespaceCondition) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceCondition.DiscardUnknown(m) +} -func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } -func (*ServiceSpec) ProtoMessage() {} -func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } +var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo -func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } -func (*ServiceStatus) ProtoMessage() {} -func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{178} } +func (m *NamespaceList) Reset() { *m = NamespaceList{} } +func (*NamespaceList) ProtoMessage() {} +func (*NamespaceList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{84} +} +func (m *NamespaceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamespaceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceList.Merge(m, src) +} +func (m *NamespaceList) XXX_Size() int { + return m.Size() +} +func (m *NamespaceList) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceList.DiscardUnknown(m) +} -func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } -func (*SessionAffinityConfig) ProtoMessage() {} -func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} } +var xxx_messageInfo_NamespaceList proto.InternalMessageInfo -func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } -func (*StorageOSPersistentVolumeSource) ProtoMessage() {} -func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{180} +func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } +func (*NamespaceSpec) ProtoMessage() {} +func (*NamespaceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{85} +} +func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamespaceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceSpec.Merge(m, src) +} +func (m *NamespaceSpec) XXX_Size() int { + return m.Size() +} +func (m *NamespaceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceSpec.DiscardUnknown(m) } -func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } -func (*StorageOSVolumeSource) ProtoMessage() {} -func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } +var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo -func (m *Sysctl) Reset() { *m = Sysctl{} } -func (*Sysctl) ProtoMessage() {} -func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } +func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } +func (*NamespaceStatus) ProtoMessage() {} +func (*NamespaceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{86} +} +func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamespaceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceStatus.Merge(m, src) +} +func (m *NamespaceStatus) XXX_Size() int { + return m.Size() +} +func (m *NamespaceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceStatus.DiscardUnknown(m) +} -func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } -func (*TCPSocketAction) ProtoMessage() {} -func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } +var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo -func (m *Taint) Reset() { *m = Taint{} } -func (*Taint) ProtoMessage() {} -func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{184} } +func (m *Node) Reset() { *m = Node{} } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{87} +} +func (m *Node) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Node) XXX_Merge(src proto.Message) { + xxx_messageInfo_Node.Merge(m, src) +} +func (m *Node) XXX_Size() int { + return m.Size() +} +func (m *Node) XXX_DiscardUnknown() { + xxx_messageInfo_Node.DiscardUnknown(m) +} -func (m *Toleration) Reset() { *m = Toleration{} } -func (*Toleration) ProtoMessage() {} -func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{185} } +var xxx_messageInfo_Node proto.InternalMessageInfo -func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } -func (*TopologySelectorLabelRequirement) ProtoMessage() {} -func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{186} +func (m *NodeAddress) Reset() { *m = NodeAddress{} } +func (*NodeAddress) ProtoMessage() {} +func (*NodeAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{88} +} +func (m *NodeAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeAddress.Merge(m, src) +} +func (m *NodeAddress) XXX_Size() int { + return m.Size() +} +func (m *NodeAddress) XXX_DiscardUnknown() { + xxx_messageInfo_NodeAddress.DiscardUnknown(m) } -func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } -func (*TopologySelectorTerm) ProtoMessage() {} -func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{187} } +var xxx_messageInfo_NodeAddress proto.InternalMessageInfo -func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } -func (*TypedLocalObjectReference) ProtoMessage() {} -func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{188} +func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } +func (*NodeAffinity) ProtoMessage() {} +func (*NodeAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{89} +} +func (m *NodeAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeAffinity.Merge(m, src) +} +func (m *NodeAffinity) XXX_Size() int { + return m.Size() +} +func (m *NodeAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_NodeAffinity.DiscardUnknown(m) } -func (m *Volume) Reset() { *m = Volume{} } -func (*Volume) ProtoMessage() {} -func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{189} } +var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo -func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } -func (*VolumeDevice) ProtoMessage() {} -func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{190} } +func (m *NodeCondition) Reset() { *m = NodeCondition{} } +func (*NodeCondition) ProtoMessage() {} +func (*NodeCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{90} +} +func (m *NodeCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeCondition.Merge(m, src) +} +func (m *NodeCondition) XXX_Size() int { + return m.Size() +} +func (m *NodeCondition) XXX_DiscardUnknown() { + xxx_messageInfo_NodeCondition.DiscardUnknown(m) +} -func (m *VolumeMount) Reset() { *m = VolumeMount{} } -func (*VolumeMount) ProtoMessage() {} -func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{191} } +var xxx_messageInfo_NodeCondition proto.InternalMessageInfo -func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } -func (*VolumeNodeAffinity) ProtoMessage() {} -func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{192} } +func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } +func (*NodeConfigSource) ProtoMessage() {} +func (*NodeConfigSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{91} +} +func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeConfigSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeConfigSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeConfigSource.Merge(m, src) +} +func (m *NodeConfigSource) XXX_Size() int { + return m.Size() +} +func (m *NodeConfigSource) XXX_DiscardUnknown() { + xxx_messageInfo_NodeConfigSource.DiscardUnknown(m) +} -func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } -func (*VolumeProjection) ProtoMessage() {} -func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{193} } +var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo -func (m *VolumeSource) Reset() { *m = VolumeSource{} } -func (*VolumeSource) ProtoMessage() {} -func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{194} } +func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } +func (*NodeConfigStatus) ProtoMessage() {} +func (*NodeConfigStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{92} +} +func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeConfigStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeConfigStatus.Merge(m, src) +} +func (m *NodeConfigStatus) XXX_Size() int { + return m.Size() +} +func (m *NodeConfigStatus) XXX_DiscardUnknown() { + xxx_messageInfo_NodeConfigStatus.DiscardUnknown(m) +} -func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } -func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} -func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{195} +var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo + +func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } +func (*NodeDaemonEndpoints) ProtoMessage() {} +func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{93} +} +func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeDaemonEndpoints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeDaemonEndpoints) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeDaemonEndpoints.Merge(m, src) +} +func (m *NodeDaemonEndpoints) XXX_Size() int { + return m.Size() +} +func (m *NodeDaemonEndpoints) XXX_DiscardUnknown() { + xxx_messageInfo_NodeDaemonEndpoints.DiscardUnknown(m) } -func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } -func (*WeightedPodAffinityTerm) ProtoMessage() {} -func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{196} +var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo + +func (m *NodeList) Reset() { *m = NodeList{} } +func (*NodeList) ProtoMessage() {} +func (*NodeList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{94} +} +func (m *NodeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeList.Merge(m, src) +} +func (m *NodeList) XXX_Size() int { + return m.Size() +} +func (m *NodeList) XXX_DiscardUnknown() { + xxx_messageInfo_NodeList.DiscardUnknown(m) } -func init() { - proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource") - proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity") - proto.RegisterType((*AttachedVolume)(nil), "k8s.io.api.core.v1.AttachedVolume") - proto.RegisterType((*AvoidPods)(nil), "k8s.io.api.core.v1.AvoidPods") - proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.api.core.v1.AzureDiskVolumeSource") - proto.RegisterType((*AzureFilePersistentVolumeSource)(nil), "k8s.io.api.core.v1.AzureFilePersistentVolumeSource") - proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource") - proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding") - proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource") - proto.RegisterType((*CSIVolumeSource)(nil), "k8s.io.api.core.v1.CSIVolumeSource") - proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities") - proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource") - proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") - proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource") - proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") - proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") - proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") - proto.RegisterType((*ComponentStatus)(nil), "k8s.io.api.core.v1.ComponentStatus") - proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.api.core.v1.ComponentStatusList") - proto.RegisterType((*ConfigMap)(nil), "k8s.io.api.core.v1.ConfigMap") - proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.api.core.v1.ConfigMapEnvSource") - proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.api.core.v1.ConfigMapKeySelector") - proto.RegisterType((*ConfigMapList)(nil), "k8s.io.api.core.v1.ConfigMapList") - proto.RegisterType((*ConfigMapNodeConfigSource)(nil), "k8s.io.api.core.v1.ConfigMapNodeConfigSource") - proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.api.core.v1.ConfigMapProjection") - proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.api.core.v1.ConfigMapVolumeSource") - proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container") - proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage") - proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort") - proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState") - proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning") - proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated") - proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") - proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") - proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") - proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") - proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") - proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeSource") - proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.api.core.v1.EmptyDirVolumeSource") - proto.RegisterType((*EndpointAddress)(nil), "k8s.io.api.core.v1.EndpointAddress") - proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.core.v1.EndpointPort") - proto.RegisterType((*EndpointSubset)(nil), "k8s.io.api.core.v1.EndpointSubset") - proto.RegisterType((*Endpoints)(nil), "k8s.io.api.core.v1.Endpoints") - proto.RegisterType((*EndpointsList)(nil), "k8s.io.api.core.v1.EndpointsList") - proto.RegisterType((*EnvFromSource)(nil), "k8s.io.api.core.v1.EnvFromSource") - proto.RegisterType((*EnvVar)(nil), "k8s.io.api.core.v1.EnvVar") - proto.RegisterType((*EnvVarSource)(nil), "k8s.io.api.core.v1.EnvVarSource") - proto.RegisterType((*Event)(nil), "k8s.io.api.core.v1.Event") - proto.RegisterType((*EventList)(nil), "k8s.io.api.core.v1.EventList") - proto.RegisterType((*EventSeries)(nil), "k8s.io.api.core.v1.EventSeries") - proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource") - proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction") - proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource") - proto.RegisterType((*FlexPersistentVolumeSource)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource") - proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource") - proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource") - proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource") - proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource") - proto.RegisterType((*GlusterfsPersistentVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsPersistentVolumeSource") - proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource") - proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction") - proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader") - proto.RegisterType((*Handler)(nil), "k8s.io.api.core.v1.Handler") - proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias") - proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") - proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") - proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") - proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") - proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") - proto.RegisterType((*LimitRange)(nil), "k8s.io.api.core.v1.LimitRange") - proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.api.core.v1.LimitRangeItem") - proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList") - proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec") - proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List") - proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress") - proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus") - proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.api.core.v1.LocalObjectReference") - proto.RegisterType((*LocalVolumeSource)(nil), "k8s.io.api.core.v1.LocalVolumeSource") - proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.api.core.v1.NFSVolumeSource") - proto.RegisterType((*Namespace)(nil), "k8s.io.api.core.v1.Namespace") - proto.RegisterType((*NamespaceList)(nil), "k8s.io.api.core.v1.NamespaceList") - proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.api.core.v1.NamespaceSpec") - proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.api.core.v1.NamespaceStatus") - proto.RegisterType((*Node)(nil), "k8s.io.api.core.v1.Node") - proto.RegisterType((*NodeAddress)(nil), "k8s.io.api.core.v1.NodeAddress") - proto.RegisterType((*NodeAffinity)(nil), "k8s.io.api.core.v1.NodeAffinity") - proto.RegisterType((*NodeCondition)(nil), "k8s.io.api.core.v1.NodeCondition") - proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource") - proto.RegisterType((*NodeConfigStatus)(nil), "k8s.io.api.core.v1.NodeConfigStatus") - proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints") - proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList") - proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions") - proto.RegisterType((*NodeResources)(nil), "k8s.io.api.core.v1.NodeResources") - proto.RegisterType((*NodeSelector)(nil), "k8s.io.api.core.v1.NodeSelector") - proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.api.core.v1.NodeSelectorRequirement") - proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.api.core.v1.NodeSelectorTerm") - proto.RegisterType((*NodeSpec)(nil), "k8s.io.api.core.v1.NodeSpec") - proto.RegisterType((*NodeStatus)(nil), "k8s.io.api.core.v1.NodeStatus") - proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.api.core.v1.NodeSystemInfo") - proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.api.core.v1.ObjectFieldSelector") - proto.RegisterType((*ObjectReference)(nil), "k8s.io.api.core.v1.ObjectReference") - proto.RegisterType((*PersistentVolume)(nil), "k8s.io.api.core.v1.PersistentVolume") - proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.api.core.v1.PersistentVolumeClaim") - proto.RegisterType((*PersistentVolumeClaimCondition)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimCondition") - proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimList") - proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimSpec") - proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus") - proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource") - proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.api.core.v1.PersistentVolumeList") - proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeSource") - proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec") - proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeStatus") - proto.RegisterType((*PhotonPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.PhotonPersistentDiskVolumeSource") - proto.RegisterType((*Pod)(nil), "k8s.io.api.core.v1.Pod") - proto.RegisterType((*PodAffinity)(nil), "k8s.io.api.core.v1.PodAffinity") - proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.api.core.v1.PodAffinityTerm") - proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity") - proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions") - proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition") - proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig") - proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption") - proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions") - proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList") - proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions") - proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.api.core.v1.PodPortForwardOptions") - proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions") - proto.RegisterType((*PodReadinessGate)(nil), "k8s.io.api.core.v1.PodReadinessGate") - proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.api.core.v1.PodSecurityContext") - proto.RegisterType((*PodSignature)(nil), "k8s.io.api.core.v1.PodSignature") - proto.RegisterType((*PodSpec)(nil), "k8s.io.api.core.v1.PodSpec") - proto.RegisterType((*PodStatus)(nil), "k8s.io.api.core.v1.PodStatus") - proto.RegisterType((*PodStatusResult)(nil), "k8s.io.api.core.v1.PodStatusResult") - proto.RegisterType((*PodTemplate)(nil), "k8s.io.api.core.v1.PodTemplate") - proto.RegisterType((*PodTemplateList)(nil), "k8s.io.api.core.v1.PodTemplateList") - proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.api.core.v1.PodTemplateSpec") - proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.api.core.v1.PortworxVolumeSource") - proto.RegisterType((*Preconditions)(nil), "k8s.io.api.core.v1.Preconditions") - proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.api.core.v1.PreferAvoidPodsEntry") - proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.api.core.v1.PreferredSchedulingTerm") - proto.RegisterType((*Probe)(nil), "k8s.io.api.core.v1.Probe") - proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.api.core.v1.ProjectedVolumeSource") - proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.api.core.v1.QuobyteVolumeSource") - proto.RegisterType((*RBDPersistentVolumeSource)(nil), "k8s.io.api.core.v1.RBDPersistentVolumeSource") - proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.api.core.v1.RBDVolumeSource") - proto.RegisterType((*RangeAllocation)(nil), "k8s.io.api.core.v1.RangeAllocation") - proto.RegisterType((*ReplicationController)(nil), "k8s.io.api.core.v1.ReplicationController") - proto.RegisterType((*ReplicationControllerCondition)(nil), "k8s.io.api.core.v1.ReplicationControllerCondition") - proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.api.core.v1.ReplicationControllerList") - proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec") - proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus") - proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector") - proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota") - proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList") - proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec") - proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus") - proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements") - proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions") - proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource") - proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource") - proto.RegisterType((*ScopeSelector)(nil), "k8s.io.api.core.v1.ScopeSelector") - proto.RegisterType((*ScopedResourceSelectorRequirement)(nil), "k8s.io.api.core.v1.ScopedResourceSelectorRequirement") - proto.RegisterType((*Secret)(nil), "k8s.io.api.core.v1.Secret") - proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.api.core.v1.SecretEnvSource") - proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.api.core.v1.SecretKeySelector") - proto.RegisterType((*SecretList)(nil), "k8s.io.api.core.v1.SecretList") - proto.RegisterType((*SecretProjection)(nil), "k8s.io.api.core.v1.SecretProjection") - proto.RegisterType((*SecretReference)(nil), "k8s.io.api.core.v1.SecretReference") - proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.api.core.v1.SecretVolumeSource") - proto.RegisterType((*SecurityContext)(nil), "k8s.io.api.core.v1.SecurityContext") - proto.RegisterType((*SerializedReference)(nil), "k8s.io.api.core.v1.SerializedReference") - proto.RegisterType((*Service)(nil), "k8s.io.api.core.v1.Service") - proto.RegisterType((*ServiceAccount)(nil), "k8s.io.api.core.v1.ServiceAccount") - proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.api.core.v1.ServiceAccountList") - proto.RegisterType((*ServiceAccountTokenProjection)(nil), "k8s.io.api.core.v1.ServiceAccountTokenProjection") - proto.RegisterType((*ServiceList)(nil), "k8s.io.api.core.v1.ServiceList") - proto.RegisterType((*ServicePort)(nil), "k8s.io.api.core.v1.ServicePort") - proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.api.core.v1.ServiceProxyOptions") - proto.RegisterType((*ServiceSpec)(nil), "k8s.io.api.core.v1.ServiceSpec") - proto.RegisterType((*ServiceStatus)(nil), "k8s.io.api.core.v1.ServiceStatus") - proto.RegisterType((*SessionAffinityConfig)(nil), "k8s.io.api.core.v1.SessionAffinityConfig") - proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSPersistentVolumeSource") - proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSVolumeSource") - proto.RegisterType((*Sysctl)(nil), "k8s.io.api.core.v1.Sysctl") - proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.api.core.v1.TCPSocketAction") - proto.RegisterType((*Taint)(nil), "k8s.io.api.core.v1.Taint") - proto.RegisterType((*Toleration)(nil), "k8s.io.api.core.v1.Toleration") - proto.RegisterType((*TopologySelectorLabelRequirement)(nil), "k8s.io.api.core.v1.TopologySelectorLabelRequirement") - proto.RegisterType((*TopologySelectorTerm)(nil), "k8s.io.api.core.v1.TopologySelectorTerm") - proto.RegisterType((*TypedLocalObjectReference)(nil), "k8s.io.api.core.v1.TypedLocalObjectReference") - proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume") - proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice") - proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") - proto.RegisterType((*VolumeNodeAffinity)(nil), "k8s.io.api.core.v1.VolumeNodeAffinity") - proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") - proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") - proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource") - proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm") +var xxx_messageInfo_NodeList proto.InternalMessageInfo + +func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } +func (*NodeProxyOptions) ProtoMessage() {} +func (*NodeProxyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{95} } -func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *NodeProxyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeProxyOptions.Merge(m, src) +} +func (m *NodeProxyOptions) XXX_Size() int { + return m.Size() +} +func (m *NodeProxyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_NodeProxyOptions.DiscardUnknown(m) } -func (m *AWSElasticBlockStoreVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i += copy(dAtA[i:], m.VolumeID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 +var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo + +func (m *NodeResources) Reset() { *m = NodeResources{} } +func (*NodeResources) ProtoMessage() {} +func (*NodeResources) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{96} +} +func (m *NodeResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i++ - return i, nil + return b[:n], nil +} +func (m *NodeResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeResources.Merge(m, src) +} +func (m *NodeResources) XXX_Size() int { + return m.Size() +} +func (m *NodeResources) XXX_DiscardUnknown() { + xxx_messageInfo_NodeResources.DiscardUnknown(m) } -func (m *Affinity) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_NodeResources proto.InternalMessageInfo + +func (m *NodeSelector) Reset() { *m = NodeSelector{} } +func (*NodeSelector) ProtoMessage() {} +func (*NodeSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{97} +} +func (m *NodeSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *NodeSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSelector.Merge(m, src) +} +func (m *NodeSelector) XXX_Size() int { + return m.Size() +} +func (m *NodeSelector) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSelector.DiscardUnknown(m) } -func (m *Affinity) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.NodeAffinity != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeAffinity.Size())) - n1, err := m.NodeAffinity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.PodAffinity != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinity.Size())) - n2, err := m.PodAffinity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.PodAntiAffinity != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodAntiAffinity.Size())) - n3, err := m.PodAntiAffinity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 +var xxx_messageInfo_NodeSelector proto.InternalMessageInfo + +func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } +func (*NodeSelectorRequirement) ProtoMessage() {} +func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{98} +} +func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *NodeSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSelectorRequirement.Merge(m, src) +} +func (m *NodeSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *NodeSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSelectorRequirement.DiscardUnknown(m) } -func (m *AttachedVolume) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo + +func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } +func (*NodeSelectorTerm) ProtoMessage() {} +func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{99} +} +func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSelectorTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *AttachedVolume) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) - i += copy(dAtA[i:], m.DevicePath) - return i, nil +func (m *NodeSelectorTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSelectorTerm.Merge(m, src) +} +func (m *NodeSelectorTerm) XXX_Size() int { + return m.Size() +} +func (m *NodeSelectorTerm) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSelectorTerm.DiscardUnknown(m) } -func (m *AvoidPods) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo + +func (m *NodeSpec) Reset() { *m = NodeSpec{} } +func (*NodeSpec) ProtoMessage() {} +func (*NodeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{100} +} +func (m *NodeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *NodeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSpec.Merge(m, src) +} +func (m *NodeSpec) XXX_Size() int { + return m.Size() +} +func (m *NodeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSpec.DiscardUnknown(m) } -func (m *AvoidPods) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.PreferAvoidPods) > 0 { - for _, msg := range m.PreferAvoidPods { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } +var xxx_messageInfo_NodeSpec proto.InternalMessageInfo + +func (m *NodeStatus) Reset() { *m = NodeStatus{} } +func (*NodeStatus) ProtoMessage() {} +func (*NodeStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{101} +} +func (m *NodeStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *NodeStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeStatus.Merge(m, src) +} +func (m *NodeStatus) XXX_Size() int { + return m.Size() +} +func (m *NodeStatus) XXX_DiscardUnknown() { + xxx_messageInfo_NodeStatus.DiscardUnknown(m) } -func (m *AzureDiskVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_NodeStatus proto.InternalMessageInfo + +func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } +func (*NodeSystemInfo) ProtoMessage() {} +func (*NodeSystemInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{102} +} +func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSystemInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *NodeSystemInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSystemInfo.Merge(m, src) +} +func (m *NodeSystemInfo) XXX_Size() int { + return m.Size() +} +func (m *NodeSystemInfo) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSystemInfo.DiscardUnknown(m) } -func (m *AzureDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DiskName))) - i += copy(dAtA[i:], m.DiskName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DataDiskURI))) - i += copy(dAtA[i:], m.DataDiskURI) - if m.CachingMode != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CachingMode))) - i += copy(dAtA[i:], *m.CachingMode) - } - if m.FSType != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) - i += copy(dAtA[i:], *m.FSType) - } - if m.ReadOnly != nil { - dAtA[i] = 0x28 - i++ - if *m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Kind != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) - i += copy(dAtA[i:], *m.Kind) +var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo + +func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } +func (*ObjectFieldSelector) ProtoMessage() {} +func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{103} +} +func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectFieldSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *ObjectFieldSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectFieldSelector.Merge(m, src) +} +func (m *ObjectFieldSelector) XXX_Size() int { + return m.Size() +} +func (m *ObjectFieldSelector) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectFieldSelector.DiscardUnknown(m) } -func (m *AzureFilePersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo + +func (m *ObjectReference) Reset() { *m = ObjectReference{} } +func (*ObjectReference) ProtoMessage() {} +func (*ObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{104} +} +func (m *ObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectReference.Merge(m, src) +} +func (m *ObjectReference) XXX_Size() int { + return m.Size() +} +func (m *ObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectReference.DiscardUnknown(m) } -func (m *AzureFilePersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) - i += copy(dAtA[i:], m.ShareName) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretNamespace != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SecretNamespace))) - i += copy(dAtA[i:], *m.SecretNamespace) +var xxx_messageInfo_ObjectReference proto.InternalMessageInfo + +func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } +func (*PersistentVolume) ProtoMessage() {} +func (*PersistentVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{105} +} +func (m *PersistentVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PersistentVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolume.Merge(m, src) +} +func (m *PersistentVolume) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolume) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolume.DiscardUnknown(m) } -func (m *AzureFileVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo + +func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } +func (*PersistentVolumeClaim) ProtoMessage() {} +func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{106} +} +func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PersistentVolumeClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaim.Merge(m, src) +} +func (m *PersistentVolumeClaim) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaim) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaim.DiscardUnknown(m) } -func (m *AzureFileVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) - i += copy(dAtA[i:], m.ShareName) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 +var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo + +func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } +func (*PersistentVolumeClaimCondition) ProtoMessage() {} +func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{107} +} +func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i++ - return i, nil + return b[:n], nil +} +func (m *PersistentVolumeClaimCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimCondition.Merge(m, src) +} +func (m *PersistentVolumeClaimCondition) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimCondition) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimCondition.DiscardUnknown(m) } -func (m *Binding) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo + +func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } +func (*PersistentVolumeClaimList) ProtoMessage() {} +func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{108} +} +func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PersistentVolumeClaimList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimList.Merge(m, src) +} +func (m *PersistentVolumeClaimList) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimList) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimList.DiscardUnknown(m) } -func (m *Binding) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo + +func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } +func (*PersistentVolumeClaimSpec) ProtoMessage() {} +func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{109} +} +func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n5, err := m.Target.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *PersistentVolumeClaimSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimSpec.Merge(m, src) +} +func (m *PersistentVolumeClaimSpec) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo + +func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } +func (*PersistentVolumeClaimStatus) ProtoMessage() {} +func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{110} +} +func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n5 - return i, nil + return b[:n], nil +} +func (m *PersistentVolumeClaimStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimStatus.Merge(m, src) +} +func (m *PersistentVolumeClaimStatus) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimStatus.DiscardUnknown(m) } -func (m *CSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo + +func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } +func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} +func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{111} +} +func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PersistentVolumeClaimVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimVolumeSource.Merge(m, src) +} +func (m *PersistentVolumeClaimVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimVolumeSource.DiscardUnknown(m) } -func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeHandle))) - i += copy(dAtA[i:], m.VolumeHandle) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - if len(m.VolumeAttributes) > 0 { - keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) - for k := range m.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - for _, k := range keysForVolumeAttributes { - dAtA[i] = 0x2a - i++ - v := m.VolumeAttributes[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.ControllerPublishSecretRef != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ControllerPublishSecretRef.Size())) - n6, err := m.ControllerPublishSecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - if m.NodeStageSecretRef != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeStageSecretRef.Size())) - n7, err := m.NodeStageSecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.NodePublishSecretRef != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodePublishSecretRef.Size())) - n8, err := m.NodePublishSecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 +var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo + +func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } +func (*PersistentVolumeList) ProtoMessage() {} +func (*PersistentVolumeList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{112} +} +func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PersistentVolumeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeList.Merge(m, src) +} +func (m *PersistentVolumeList) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeList) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeList.DiscardUnknown(m) } -func (m *CSIVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo + +func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } +func (*PersistentVolumeSource) ProtoMessage() {} +func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{113} +} +func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeSource.Merge(m, src) +} +func (m *PersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeSource.DiscardUnknown(m) } -func (m *CSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - if m.ReadOnly != nil { - dAtA[i] = 0x10 - i++ - if *m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.FSType != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) - i += copy(dAtA[i:], *m.FSType) - } - if len(m.VolumeAttributes) > 0 { - keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) - for k := range m.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - for _, k := range keysForVolumeAttributes { - dAtA[i] = 0x22 - i++ - v := m.VolumeAttributes[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.NodePublishSecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodePublishSecretRef.Size())) - n9, err := m.NodePublishSecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - return i, nil -} +var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo -func (m *Capabilities) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } +func (*PersistentVolumeSpec) ProtoMessage() {} +func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{114} +} +func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *Capabilities) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Add) > 0 { - for _, s := range m.Add { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Drop) > 0 { - for _, s := range m.Drop { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil +func (m *PersistentVolumeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeSpec.Merge(m, src) +} +func (m *PersistentVolumeSpec) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeSpec.DiscardUnknown(m) } -func (m *CephFSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo + +func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } +func (*PersistentVolumeStatus) ProtoMessage() {} +func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{115} +} +func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PersistentVolumeStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeStatus.Merge(m, src) +} +func (m *PersistentVolumeStatus) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeStatus.DiscardUnknown(m) } -func (m *CephFSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) - i += copy(dAtA[i:], m.SecretFile) - if m.SecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n10, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } - dAtA[i] = 0x30 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 +var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo + +func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } +func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} +func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{116} +} +func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PhotonPersistentDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i++ - return i, nil + return b[:n], nil +} +func (m *PhotonPersistentDiskVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PhotonPersistentDiskVolumeSource.Merge(m, src) +} +func (m *PhotonPersistentDiskVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *PhotonPersistentDiskVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_PhotonPersistentDiskVolumeSource.DiscardUnknown(m) } -func (m *CephFSVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo + +func (m *Pod) Reset() { *m = Pod{} } +func (*Pod) ProtoMessage() {} +func (*Pod) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{117} +} +func (m *Pod) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Pod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *Pod) XXX_Merge(src proto.Message) { + xxx_messageInfo_Pod.Merge(m, src) +} +func (m *Pod) XXX_Size() int { + return m.Size() +} +func (m *Pod) XXX_DiscardUnknown() { + xxx_messageInfo_Pod.DiscardUnknown(m) } -func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) - i += copy(dAtA[i:], m.SecretFile) - if m.SecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n11, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - } - dAtA[i] = 0x30 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 +var xxx_messageInfo_Pod proto.InternalMessageInfo + +func (m *PodAffinity) Reset() { *m = PodAffinity{} } +func (*PodAffinity) ProtoMessage() {} +func (*PodAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{118} +} +func (m *PodAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i++ - return i, nil + return b[:n], nil +} +func (m *PodAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodAffinity.Merge(m, src) +} +func (m *PodAffinity) XXX_Size() int { + return m.Size() +} +func (m *PodAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_PodAffinity.DiscardUnknown(m) } -func (m *CinderPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodAffinity proto.InternalMessageInfo + +func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } +func (*PodAffinityTerm) ProtoMessage() {} +func (*PodAffinityTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{119} +} +func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodAffinityTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodAffinityTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodAffinityTerm.Merge(m, src) +} +func (m *PodAffinityTerm) XXX_Size() int { + return m.Size() +} +func (m *PodAffinityTerm) XXX_DiscardUnknown() { + xxx_messageInfo_PodAffinityTerm.DiscardUnknown(m) } -func (m *CinderPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i += copy(dAtA[i:], m.VolumeID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n12, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 +var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo + +func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } +func (*PodAntiAffinity) ProtoMessage() {} +func (*PodAntiAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{120} +} +func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodAntiAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodAntiAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodAntiAffinity.Merge(m, src) +} +func (m *PodAntiAffinity) XXX_Size() int { + return m.Size() +} +func (m *PodAntiAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_PodAntiAffinity.DiscardUnknown(m) } -func (m *CinderVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo + +func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } +func (*PodAttachOptions) ProtoMessage() {} +func (*PodAttachOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{121} +} +func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodAttachOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodAttachOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodAttachOptions.Merge(m, src) +} +func (m *PodAttachOptions) XXX_Size() int { + return m.Size() +} +func (m *PodAttachOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodAttachOptions.DiscardUnknown(m) } -func (m *CinderVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i += copy(dAtA[i:], m.VolumeID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n13, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 +var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo + +func (m *PodCondition) Reset() { *m = PodCondition{} } +func (*PodCondition) ProtoMessage() {} +func (*PodCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{122} +} +func (m *PodCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodCondition.Merge(m, src) +} +func (m *PodCondition) XXX_Size() int { + return m.Size() +} +func (m *PodCondition) XXX_DiscardUnknown() { + xxx_messageInfo_PodCondition.DiscardUnknown(m) } -func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodCondition proto.InternalMessageInfo + +func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } +func (*PodDNSConfig) ProtoMessage() {} +func (*PodDNSConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{123} +} +func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDNSConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodDNSConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDNSConfig.Merge(m, src) +} +func (m *PodDNSConfig) XXX_Size() int { + return m.Size() +} +func (m *PodDNSConfig) XXX_DiscardUnknown() { + xxx_messageInfo_PodDNSConfig.DiscardUnknown(m) } -func (m *ClientIPConfig) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.TimeoutSeconds != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) +var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo + +func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } +func (*PodDNSConfigOption) ProtoMessage() {} +func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{124} +} +func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDNSConfigOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodDNSConfigOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDNSConfigOption.Merge(m, src) +} +func (m *PodDNSConfigOption) XXX_Size() int { + return m.Size() +} +func (m *PodDNSConfigOption) XXX_DiscardUnknown() { + xxx_messageInfo_PodDNSConfigOption.DiscardUnknown(m) } -func (m *ComponentCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo + +func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } +func (*PodExecOptions) ProtoMessage() {} +func (*PodExecOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{125} +} +func (m *PodExecOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodExecOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ComponentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - return i, nil +func (m *PodExecOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodExecOptions.Merge(m, src) +} +func (m *PodExecOptions) XXX_Size() int { + return m.Size() +} +func (m *PodExecOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodExecOptions.DiscardUnknown(m) } -func (m *ComponentStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo + +func (m *PodIP) Reset() { *m = PodIP{} } +func (*PodIP) ProtoMessage() {} +func (*PodIP) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{126} +} +func (m *PodIP) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodIP) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodIP.Merge(m, src) +} +func (m *PodIP) XXX_Size() int { + return m.Size() +} +func (m *PodIP) XXX_DiscardUnknown() { + xxx_messageInfo_PodIP.DiscardUnknown(m) } -func (m *ComponentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n14, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodIP proto.InternalMessageInfo + +func (m *PodList) Reset() { *m = PodList{} } +func (*PodList) ProtoMessage() {} +func (*PodList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{127} +} +func (m *PodList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n14 - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodList.Merge(m, src) +} +func (m *PodList) XXX_Size() int { + return m.Size() +} +func (m *PodList) XXX_DiscardUnknown() { + xxx_messageInfo_PodList.DiscardUnknown(m) } -func (m *ComponentStatusList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodList proto.InternalMessageInfo + +func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } +func (*PodLogOptions) ProtoMessage() {} +func (*PodLogOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{128} +} +func (m *PodLogOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodLogOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodLogOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodLogOptions.Merge(m, src) +} +func (m *PodLogOptions) XXX_Size() int { + return m.Size() +} +func (m *PodLogOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodLogOptions.DiscardUnknown(m) } -func (m *ComponentStatusList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n15, err := m.ListMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo + +func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } +func (*PodPortForwardOptions) ProtoMessage() {} +func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{129} +} +func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodPortForwardOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n15 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodPortForwardOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodPortForwardOptions.Merge(m, src) +} +func (m *PodPortForwardOptions) XXX_Size() int { + return m.Size() +} +func (m *PodPortForwardOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodPortForwardOptions.DiscardUnknown(m) } -func (m *ConfigMap) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo + +func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } +func (*PodProxyOptions) ProtoMessage() {} +func (*PodProxyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{130} +} +func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodProxyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodProxyOptions.Merge(m, src) +} +func (m *PodProxyOptions) XXX_Size() int { + return m.Size() +} +func (m *PodProxyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodProxyOptions.DiscardUnknown(m) } -func (m *ConfigMap) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n16, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo + +func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } +func (*PodReadinessGate) ProtoMessage() {} +func (*PodReadinessGate) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{131} +} +func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodReadinessGate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n16 - if len(m.Data) > 0 { - keysForData := make([]string, 0, len(m.Data)) - for k := range m.Data { - keysForData = append(keysForData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - for _, k := range keysForData { - dAtA[i] = 0x12 - i++ - v := m.Data[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.BinaryData) > 0 { - keysForBinaryData := make([]string, 0, len(m.BinaryData)) - for k := range m.BinaryData { - keysForBinaryData = append(keysForBinaryData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) - for _, k := range keysForBinaryData { - dAtA[i] = 0x1a - i++ - v := m.BinaryData[string(k)] - byteSize := 0 - if v != nil { - byteSize = 1 + len(v) + sovGenerated(uint64(len(v))) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + byteSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - if v != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodReadinessGate) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodReadinessGate.Merge(m, src) +} +func (m *PodReadinessGate) XXX_Size() int { + return m.Size() +} +func (m *PodReadinessGate) XXX_DiscardUnknown() { + xxx_messageInfo_PodReadinessGate.DiscardUnknown(m) } -func (m *ConfigMapEnvSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo + +func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } +func (*PodSecurityContext) ProtoMessage() {} +func (*PodSecurityContext) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{132} +} +func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodSecurityContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityContext.Merge(m, src) +} +func (m *PodSecurityContext) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityContext) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityContext.DiscardUnknown(m) } -func (m *ConfigMapEnvSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n17, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo + +func (m *PodSignature) Reset() { *m = PodSignature{} } +func (*PodSignature) ProtoMessage() {} +func (*PodSignature) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{133} +} +func (m *PodSignature) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSignature) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n17 - if m.Optional != nil { - dAtA[i] = 0x10 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodSignature) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSignature.Merge(m, src) +} +func (m *PodSignature) XXX_Size() int { + return m.Size() +} +func (m *PodSignature) XXX_DiscardUnknown() { + xxx_messageInfo_PodSignature.DiscardUnknown(m) } -func (m *ConfigMapKeySelector) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodSignature proto.InternalMessageInfo + +func (m *PodSpec) Reset() { *m = PodSpec{} } +func (*PodSpec) ProtoMessage() {} +func (*PodSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{134} +} +func (m *PodSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSpec.Merge(m, src) +} +func (m *PodSpec) XXX_Size() int { + return m.Size() +} +func (m *PodSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSpec.DiscardUnknown(m) } -func (m *ConfigMapKeySelector) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n18, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodSpec proto.InternalMessageInfo + +func (m *PodStatus) Reset() { *m = PodStatus{} } +func (*PodStatus) ProtoMessage() {} +func (*PodStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{135} +} +func (m *PodStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n18 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - if m.Optional != nil { - dAtA[i] = 0x18 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodStatus.Merge(m, src) +} +func (m *PodStatus) XXX_Size() int { + return m.Size() +} +func (m *PodStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodStatus.DiscardUnknown(m) } -func (m *ConfigMapList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodStatus proto.InternalMessageInfo + +func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } +func (*PodStatusResult) ProtoMessage() {} +func (*PodStatusResult) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{136} +} +func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodStatusResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodStatusResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodStatusResult.Merge(m, src) +} +func (m *PodStatusResult) XXX_Size() int { + return m.Size() +} +func (m *PodStatusResult) XXX_DiscardUnknown() { + xxx_messageInfo_PodStatusResult.DiscardUnknown(m) } -func (m *ConfigMapList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n19, err := m.ListMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo + +func (m *PodTemplate) Reset() { *m = PodTemplate{} } +func (*PodTemplate) ProtoMessage() {} +func (*PodTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{137} +} +func (m *PodTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n19 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodTemplate.Merge(m, src) +} +func (m *PodTemplate) XXX_Size() int { + return m.Size() +} +func (m *PodTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_PodTemplate.DiscardUnknown(m) } -func (m *ConfigMapNodeConfigSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodTemplate proto.InternalMessageInfo + +func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } +func (*PodTemplateList) ProtoMessage() {} +func (*PodTemplateList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{138} +} +func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ConfigMapNodeConfigSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletConfigKey))) - i += copy(dAtA[i:], m.KubeletConfigKey) - return i, nil +func (m *PodTemplateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodTemplateList.Merge(m, src) +} +func (m *PodTemplateList) XXX_Size() int { + return m.Size() +} +func (m *PodTemplateList) XXX_DiscardUnknown() { + xxx_messageInfo_PodTemplateList.DiscardUnknown(m) } -func (m *ConfigMapProjection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo + +func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } +func (*PodTemplateSpec) ProtoMessage() {} +func (*PodTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{139} +} +func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodTemplateSpec.Merge(m, src) +} +func (m *PodTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *PodTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodTemplateSpec.DiscardUnknown(m) } -func (m *ConfigMapProjection) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n20, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo + +func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } +func (*PortworxVolumeSource) ProtoMessage() {} +func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{140} +} +func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PortworxVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n20 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Optional != nil { - dAtA[i] = 0x20 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PortworxVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PortworxVolumeSource.Merge(m, src) +} +func (m *PortworxVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *PortworxVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_PortworxVolumeSource.DiscardUnknown(m) } -func (m *ConfigMapVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo + +func (m *Preconditions) Reset() { *m = Preconditions{} } +func (*Preconditions) ProtoMessage() {} +func (*Preconditions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{141} +} +func (m *Preconditions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Preconditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *Preconditions) XXX_Merge(src proto.Message) { + xxx_messageInfo_Preconditions.Merge(m, src) +} +func (m *Preconditions) XXX_Size() int { + return m.Size() +} +func (m *Preconditions) XXX_DiscardUnknown() { + xxx_messageInfo_Preconditions.DiscardUnknown(m) } -func (m *ConfigMapVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n21, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) +var xxx_messageInfo_Preconditions proto.InternalMessageInfo + +func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } +func (*PreferAvoidPodsEntry) ProtoMessage() {} +func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{142} +} +func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PreferAvoidPodsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n21 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.DefaultMode != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - } - if m.Optional != nil { - dAtA[i] = 0x20 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PreferAvoidPodsEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_PreferAvoidPodsEntry.Merge(m, src) +} +func (m *PreferAvoidPodsEntry) XXX_Size() int { + return m.Size() +} +func (m *PreferAvoidPodsEntry) XXX_DiscardUnknown() { + xxx_messageInfo_PreferAvoidPodsEntry.DiscardUnknown(m) } -func (m *Container) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo + +func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } +func (*PreferredSchedulingTerm) ProtoMessage() {} +func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{143} +} +func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PreferredSchedulingTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PreferredSchedulingTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_PreferredSchedulingTerm.Merge(m, src) +} +func (m *PreferredSchedulingTerm) XXX_Size() int { + return m.Size() +} +func (m *PreferredSchedulingTerm) XXX_DiscardUnknown() { + xxx_messageInfo_PreferredSchedulingTerm.DiscardUnknown(m) } -func (m *Container) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) - i += copy(dAtA[i:], m.Image) - if len(m.Command) > 0 { - for _, s := range m.Command { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Args) > 0 { - for _, s := range m.Args { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir))) - i += copy(dAtA[i:], m.WorkingDir) - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Env) > 0 { - for _, msg := range m.Env { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n22, err := m.Resources.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo + +func (m *Probe) Reset() { *m = Probe{} } +func (*Probe) ProtoMessage() {} +func (*Probe) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{144} +} +func (m *Probe) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Probe) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n22 - if len(m.VolumeMounts) > 0 { - for _, msg := range m.VolumeMounts { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.LivenessProbe != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LivenessProbe.Size())) - n23, err := m.LivenessProbe.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - } - if m.ReadinessProbe != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadinessProbe.Size())) - n24, err := m.ReadinessProbe.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - } - if m.Lifecycle != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Lifecycle.Size())) - n25, err := m.Lifecycle.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 - } - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath))) - i += copy(dAtA[i:], m.TerminationMessagePath) - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy))) - i += copy(dAtA[i:], m.ImagePullPolicy) - if m.SecurityContext != nil { - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n26, err := m.SecurityContext.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - } - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x88 - i++ - dAtA[i] = 0x1 - i++ - if m.StdinOnce { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x90 - i++ - dAtA[i] = 0x1 - i++ - if m.TTY { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.EnvFrom) > 0 { - for _, msg := range m.EnvFrom { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) - i += copy(dAtA[i:], m.TerminationMessagePolicy) - if len(m.VolumeDevices) > 0 { - for _, msg := range m.VolumeDevices { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *Probe) XXX_Merge(src proto.Message) { + xxx_messageInfo_Probe.Merge(m, src) +} +func (m *Probe) XXX_Size() int { + return m.Size() +} +func (m *Probe) XXX_DiscardUnknown() { + xxx_messageInfo_Probe.DiscardUnknown(m) } -func (m *ContainerImage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_Probe proto.InternalMessageInfo + +func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } +func (*ProjectedVolumeSource) ProtoMessage() {} +func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{145} +} +func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectedVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ContainerImage) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Names) > 0 { - for _, s := range m.Names { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SizeBytes)) - return i, nil +func (m *ProjectedVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectedVolumeSource.Merge(m, src) +} +func (m *ProjectedVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ProjectedVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectedVolumeSource.DiscardUnknown(m) } -func (m *ContainerPort) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo + +func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } +func (*QuobyteVolumeSource) ProtoMessage() {} +func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{146} +} +func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QuobyteVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ContainerPort) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HostPort)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ContainerPort)) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) - i += copy(dAtA[i:], m.Protocol) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) - i += copy(dAtA[i:], m.HostIP) - return i, nil +func (m *QuobyteVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuobyteVolumeSource.Merge(m, src) +} +func (m *QuobyteVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *QuobyteVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_QuobyteVolumeSource.DiscardUnknown(m) } -func (m *ContainerState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo + +func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } +func (*RBDPersistentVolumeSource) ProtoMessage() {} +func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{147} +} +func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RBDPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ContainerState) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Waiting != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Waiting.Size())) - n27, err := m.Waiting.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 - } - if m.Running != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Running.Size())) - n28, err := m.Running.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - } - if m.Terminated != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Terminated.Size())) - n29, err := m.Terminated.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n29 - } - return i, nil +func (m *RBDPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_RBDPersistentVolumeSource.Merge(m, src) +} +func (m *RBDPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *RBDPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_RBDPersistentVolumeSource.DiscardUnknown(m) } -func (m *ContainerStateRunning) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo + +func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } +func (*RBDVolumeSource) ProtoMessage() {} +func (*RBDVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{148} +} +func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RBDVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *RBDVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_RBDVolumeSource.Merge(m, src) +} +func (m *RBDVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *RBDVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_RBDVolumeSource.DiscardUnknown(m) } -func (m *ContainerStateRunning) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) - n30, err := m.StartedAt.MarshalTo(dAtA[i:]) +var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo + +func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } +func (*RangeAllocation) ProtoMessage() {} +func (*RangeAllocation) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{149} +} +func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RangeAllocation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n30 - return i, nil + return b[:n], nil +} +func (m *RangeAllocation) XXX_Merge(src proto.Message) { + xxx_messageInfo_RangeAllocation.Merge(m, src) +} +func (m *RangeAllocation) XXX_Size() int { + return m.Size() +} +func (m *RangeAllocation) XXX_DiscardUnknown() { + xxx_messageInfo_RangeAllocation.DiscardUnknown(m) } -func (m *ContainerStateTerminated) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo + +func (m *ReplicationController) Reset() { *m = ReplicationController{} } +func (*ReplicationController) ProtoMessage() {} +func (*ReplicationController) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{150} +} +func (m *ReplicationController) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationController) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ReplicationController) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationController.Merge(m, src) +} +func (m *ReplicationController) XXX_Size() int { + return m.Size() +} +func (m *ReplicationController) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationController.DiscardUnknown(m) } -func (m *ContainerStateTerminated) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ExitCode)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Signal)) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) - n31, err := m.StartedAt.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FinishedAt.Size())) - n32, err := m.FinishedAt.MarshalTo(dAtA[i:]) +var xxx_messageInfo_ReplicationController proto.InternalMessageInfo + +func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } +func (*ReplicationControllerCondition) ProtoMessage() {} +func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{151} +} +func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n32 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) - i += copy(dAtA[i:], m.ContainerID) - return i, nil + return b[:n], nil +} +func (m *ReplicationControllerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerCondition.Merge(m, src) +} +func (m *ReplicationControllerCondition) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerCondition.DiscardUnknown(m) } -func (m *ContainerStateWaiting) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo + +func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } +func (*ReplicationControllerList) ProtoMessage() {} +func (*ReplicationControllerList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{152} +} +func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ContainerStateWaiting) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil +func (m *ReplicationControllerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerList.Merge(m, src) +} +func (m *ReplicationControllerList) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerList) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerList.DiscardUnknown(m) } -func (m *ContainerStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo + +func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } +func (*ReplicationControllerSpec) ProtoMessage() {} +func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{153} +} +func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ReplicationControllerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerSpec.Merge(m, src) +} +func (m *ReplicationControllerSpec) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerSpec.DiscardUnknown(m) } -func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.State.Size())) - n33, err := m.State.MarshalTo(dAtA[i:]) +var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo + +func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } +func (*ReplicationControllerStatus) ProtoMessage() {} +func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{154} +} +func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n33 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTerminationState.Size())) - n34, err := m.LastTerminationState.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ReplicationControllerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerStatus.Merge(m, src) +} +func (m *ReplicationControllerStatus) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo + +func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } +func (*ResourceFieldSelector) ProtoMessage() {} +func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{155} +} +func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceFieldSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n34 - dAtA[i] = 0x20 - i++ - if m.Ready { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + return nil, err } - i++ - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RestartCount)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) - i += copy(dAtA[i:], m.Image) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageID))) - i += copy(dAtA[i:], m.ImageID) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) - i += copy(dAtA[i:], m.ContainerID) - return i, nil + return b[:n], nil +} +func (m *ResourceFieldSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceFieldSelector.Merge(m, src) +} +func (m *ResourceFieldSelector) XXX_Size() int { + return m.Size() +} +func (m *ResourceFieldSelector) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceFieldSelector.DiscardUnknown(m) } -func (m *DaemonEndpoint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo + +func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } +func (*ResourceQuota) ProtoMessage() {} +func (*ResourceQuota) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{156} +} +func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *DaemonEndpoint) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) - return i, nil +func (m *ResourceQuota) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuota.Merge(m, src) +} +func (m *ResourceQuota) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuota) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuota.DiscardUnknown(m) } -func (m *DownwardAPIProjection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo + +func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } +func (*ResourceQuotaList) ProtoMessage() {} +func (*ResourceQuotaList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{157} +} +func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuotaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ResourceQuotaList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuotaList.Merge(m, src) +} +func (m *ResourceQuotaList) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuotaList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuotaList.DiscardUnknown(m) } -func (m *DownwardAPIProjection) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } +var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo + +func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } +func (*ResourceQuotaSpec) ProtoMessage() {} +func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{158} +} +func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuotaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *ResourceQuotaSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuotaSpec.Merge(m, src) +} +func (m *ResourceQuotaSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuotaSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuotaSpec.DiscardUnknown(m) } -func (m *DownwardAPIVolumeFile) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo + +func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } +func (*ResourceQuotaStatus) ProtoMessage() {} +func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{159} +} +func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuotaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ResourceQuotaStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuotaStatus.Merge(m, src) +} +func (m *ResourceQuotaStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuotaStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuotaStatus.DiscardUnknown(m) } -func (m *DownwardAPIVolumeFile) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - if m.FieldRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) - n35, err := m.FieldRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n35 - } - if m.ResourceFieldRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) - n36, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - } - if m.Mode != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) +var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo + +func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } +func (*ResourceRequirements) ProtoMessage() {} +func (*ResourceRequirements) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{160} +} +func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *ResourceRequirements) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceRequirements.Merge(m, src) +} +func (m *ResourceRequirements) XXX_Size() int { + return m.Size() +} +func (m *ResourceRequirements) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceRequirements.DiscardUnknown(m) } -func (m *DownwardAPIVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo + +func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } +func (*SELinuxOptions) ProtoMessage() {} +func (*SELinuxOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{161} +} +func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SELinuxOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *DownwardAPIVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.DefaultMode != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - } - return i, nil +func (m *SELinuxOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SELinuxOptions.Merge(m, src) +} +func (m *SELinuxOptions) XXX_Size() int { + return m.Size() +} +func (m *SELinuxOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SELinuxOptions.DiscardUnknown(m) } -func (m *EmptyDirVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo + +func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } +func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} +func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{162} +} +func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleIOPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Medium))) - i += copy(dAtA[i:], m.Medium) - if m.SizeLimit != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SizeLimit.Size())) - n37, err := m.SizeLimit.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n37 - } - return i, nil +func (m *ScaleIOPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleIOPersistentVolumeSource.Merge(m, src) +} +func (m *ScaleIOPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ScaleIOPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleIOPersistentVolumeSource.DiscardUnknown(m) } -func (m *EndpointAddress) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo + +func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } +func (*ScaleIOVolumeSource) ProtoMessage() {} +func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{163} +} +func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleIOVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *EndpointAddress) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) - i += copy(dAtA[i:], m.IP) - if m.TargetRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetRef.Size())) - n38, err := m.TargetRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n38 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) - i += copy(dAtA[i:], m.Hostname) - if m.NodeName != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) - i += copy(dAtA[i:], *m.NodeName) - } - return i, nil +func (m *ScaleIOVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleIOVolumeSource.Merge(m, src) +} +func (m *ScaleIOVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ScaleIOVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleIOVolumeSource.DiscardUnknown(m) } -func (m *EndpointPort) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo + +func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } +func (*ScopeSelector) ProtoMessage() {} +func (*ScopeSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{164} +} +func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScopeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) - i += copy(dAtA[i:], m.Protocol) - return i, nil +func (m *ScopeSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScopeSelector.Merge(m, src) +} +func (m *ScopeSelector) XXX_Size() int { + return m.Size() +} +func (m *ScopeSelector) XXX_DiscardUnknown() { + xxx_messageInfo_ScopeSelector.DiscardUnknown(m) } -func (m *EndpointSubset) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo + +func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } +func (*ScopedResourceSelectorRequirement) ProtoMessage() {} +func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{165} +} +func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScopedResourceSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *EndpointSubset) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Addresses) > 0 { - for _, msg := range m.Addresses { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.NotReadyAddresses) > 0 { - for _, msg := range m.NotReadyAddresses { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil +func (m *ScopedResourceSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScopedResourceSelectorRequirement.Merge(m, src) +} +func (m *ScopedResourceSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *ScopedResourceSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_ScopedResourceSelectorRequirement.DiscardUnknown(m) } -func (m *Endpoints) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo + +func (m *Secret) Reset() { *m = Secret{} } +func (*Secret) ProtoMessage() {} +func (*Secret) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{166} +} +func (m *Secret) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Secret) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *Secret) XXX_Merge(src proto.Message) { + xxx_messageInfo_Secret.Merge(m, src) +} +func (m *Secret) XXX_Size() int { + return m.Size() +} +func (m *Secret) XXX_DiscardUnknown() { + xxx_messageInfo_Secret.DiscardUnknown(m) } -func (m *Endpoints) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n39, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_Secret proto.InternalMessageInfo + +func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } +func (*SecretEnvSource) ProtoMessage() {} +func (*SecretEnvSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{167} +} +func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretEnvSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n39 - if len(m.Subsets) > 0 { - for _, msg := range m.Subsets { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *SecretEnvSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretEnvSource.Merge(m, src) +} +func (m *SecretEnvSource) XXX_Size() int { + return m.Size() +} +func (m *SecretEnvSource) XXX_DiscardUnknown() { + xxx_messageInfo_SecretEnvSource.DiscardUnknown(m) } -func (m *EndpointsList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo + +func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } +func (*SecretKeySelector) ProtoMessage() {} +func (*SecretKeySelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{168} +} +func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *SecretKeySelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretKeySelector.Merge(m, src) +} +func (m *SecretKeySelector) XXX_Size() int { + return m.Size() +} +func (m *SecretKeySelector) XXX_DiscardUnknown() { + xxx_messageInfo_SecretKeySelector.DiscardUnknown(m) } -func (m *EndpointsList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n40, err := m.ListMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo + +func (m *SecretList) Reset() { *m = SecretList{} } +func (*SecretList) ProtoMessage() {} +func (*SecretList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{169} +} +func (m *SecretList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n40 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *SecretList) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretList.Merge(m, src) +} +func (m *SecretList) XXX_Size() int { + return m.Size() +} +func (m *SecretList) XXX_DiscardUnknown() { + xxx_messageInfo_SecretList.DiscardUnknown(m) } -func (m *EnvFromSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SecretList proto.InternalMessageInfo + +func (m *SecretProjection) Reset() { *m = SecretProjection{} } +func (*SecretProjection) ProtoMessage() {} +func (*SecretProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{170} +} +func (m *SecretProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *SecretProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretProjection.Merge(m, src) +} +func (m *SecretProjection) XXX_Size() int { + return m.Size() +} +func (m *SecretProjection) XXX_DiscardUnknown() { + xxx_messageInfo_SecretProjection.DiscardUnknown(m) } -func (m *EnvFromSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Prefix))) - i += copy(dAtA[i:], m.Prefix) - if m.ConfigMapRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapRef.Size())) - n41, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n41 - } - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n42, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n42 +var xxx_messageInfo_SecretProjection proto.InternalMessageInfo + +func (m *SecretReference) Reset() { *m = SecretReference{} } +func (*SecretReference) ProtoMessage() {} +func (*SecretReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{171} +} +func (m *SecretReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *SecretReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretReference.Merge(m, src) +} +func (m *SecretReference) XXX_Size() int { + return m.Size() +} +func (m *SecretReference) XXX_DiscardUnknown() { + xxx_messageInfo_SecretReference.DiscardUnknown(m) } -func (m *EnvVar) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SecretReference proto.InternalMessageInfo + +func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } +func (*SecretVolumeSource) ProtoMessage() {} +func (*SecretVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{172} +} +func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *SecretVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretVolumeSource.Merge(m, src) +} +func (m *SecretVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *SecretVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_SecretVolumeSource.DiscardUnknown(m) } -func (m *EnvVar) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - if m.ValueFrom != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ValueFrom.Size())) - n43, err := m.ValueFrom.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n43 +var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo + +func (m *SecurityContext) Reset() { *m = SecurityContext{} } +func (*SecurityContext) ProtoMessage() {} +func (*SecurityContext) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{173} +} +func (m *SecurityContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecurityContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *SecurityContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityContext.Merge(m, src) +} +func (m *SecurityContext) XXX_Size() int { + return m.Size() +} +func (m *SecurityContext) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityContext.DiscardUnknown(m) } -func (m *EnvVarSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SecurityContext proto.InternalMessageInfo + +func (m *SerializedReference) Reset() { *m = SerializedReference{} } +func (*SerializedReference) ProtoMessage() {} +func (*SerializedReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{174} +} +func (m *SerializedReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SerializedReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *SerializedReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_SerializedReference.Merge(m, src) +} +func (m *SerializedReference) XXX_Size() int { + return m.Size() +} +func (m *SerializedReference) XXX_DiscardUnknown() { + xxx_messageInfo_SerializedReference.DiscardUnknown(m) } -func (m *EnvVarSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.FieldRef != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) - n44, err := m.FieldRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n44 - } - if m.ResourceFieldRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) - n45, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n45 - } - if m.ConfigMapKeyRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapKeyRef.Size())) - n46, err := m.ConfigMapKeyRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n46 - } - if m.SecretKeyRef != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretKeyRef.Size())) - n47, err := m.SecretKeyRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n47 +var xxx_messageInfo_SerializedReference proto.InternalMessageInfo + +func (m *Service) Reset() { *m = Service{} } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{175} +} +func (m *Service) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *Service) XXX_Merge(src proto.Message) { + xxx_messageInfo_Service.Merge(m, src) +} +func (m *Service) XXX_Size() int { + return m.Size() +} +func (m *Service) XXX_DiscardUnknown() { + xxx_messageInfo_Service.DiscardUnknown(m) } -func (m *Event) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_Service proto.InternalMessageInfo + +func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } +func (*ServiceAccount) ProtoMessage() {} +func (*ServiceAccount) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{176} +} +func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ServiceAccount) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccount.Merge(m, src) +} +func (m *ServiceAccount) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccount) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccount.DiscardUnknown(m) } -func (m *Event) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n48, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo + +func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } +func (*ServiceAccountList) ProtoMessage() {} +func (*ServiceAccountList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{177} +} +func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n48 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.InvolvedObject.Size())) - n49, err := m.InvolvedObject.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ServiceAccountList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountList.Merge(m, src) +} +func (m *ServiceAccountList) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountList) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountList.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo + +func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } +func (*ServiceAccountTokenProjection) ProtoMessage() {} +func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{178} +} +func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountTokenProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n49 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n50, err := m.Source.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ServiceAccountTokenProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountTokenProjection.Merge(m, src) +} +func (m *ServiceAccountTokenProjection) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountTokenProjection) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountTokenProjection.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo + +func (m *ServiceList) Reset() { *m = ServiceList{} } +func (*ServiceList) ProtoMessage() {} +func (*ServiceList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{179} +} +func (m *ServiceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n50 - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FirstTimestamp.Size())) - n51, err := m.FirstTimestamp.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ServiceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceList.Merge(m, src) +} +func (m *ServiceList) XXX_Size() int { + return m.Size() +} +func (m *ServiceList) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceList.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceList proto.InternalMessageInfo + +func (m *ServicePort) Reset() { *m = ServicePort{} } +func (*ServicePort) ProtoMessage() {} +func (*ServicePort) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{180} +} +func (m *ServicePort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServicePort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n51 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTimestamp.Size())) - n52, err := m.LastTimestamp.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ServicePort) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServicePort.Merge(m, src) +} +func (m *ServicePort) XXX_Size() int { + return m.Size() +} +func (m *ServicePort) XXX_DiscardUnknown() { + xxx_messageInfo_ServicePort.DiscardUnknown(m) +} + +var xxx_messageInfo_ServicePort proto.InternalMessageInfo + +func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } +func (*ServiceProxyOptions) ProtoMessage() {} +func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{181} +} +func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n52 - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) - n53, err := m.EventTime.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ServiceProxyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceProxyOptions.Merge(m, src) +} +func (m *ServiceProxyOptions) XXX_Size() int { + return m.Size() +} +func (m *ServiceProxyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceProxyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo + +func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } +func (*ServiceSpec) ProtoMessage() {} +func (*ServiceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{182} +} +func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n53 - if m.Series != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) - n54, err := m.Series.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n54 - } - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) - i += copy(dAtA[i:], m.Action) - if m.Related != nil { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) - n55, err := m.Related.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n55 + return nil, err } - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) - i += copy(dAtA[i:], m.ReportingController) - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) - i += copy(dAtA[i:], m.ReportingInstance) - return i, nil + return b[:n], nil +} +func (m *ServiceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceSpec.Merge(m, src) +} +func (m *ServiceSpec) XXX_Size() int { + return m.Size() +} +func (m *ServiceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceSpec.DiscardUnknown(m) } -func (m *EventList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo + +func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } +func (*ServiceStatus) ProtoMessage() {} +func (*ServiceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{183} +} +func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ServiceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceStatus.Merge(m, src) +} +func (m *ServiceStatus) XXX_Size() int { + return m.Size() +} +func (m *ServiceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceStatus.DiscardUnknown(m) } -func (m *EventList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n56, err := m.ListMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo + +func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } +func (*SessionAffinityConfig) ProtoMessage() {} +func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{184} +} +func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SessionAffinityConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n56 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *SessionAffinityConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_SessionAffinityConfig.Merge(m, src) +} +func (m *SessionAffinityConfig) XXX_Size() int { + return m.Size() +} +func (m *SessionAffinityConfig) XXX_DiscardUnknown() { + xxx_messageInfo_SessionAffinityConfig.DiscardUnknown(m) } -func (m *EventSeries) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo + +func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } +func (*StorageOSPersistentVolumeSource) ProtoMessage() {} +func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{185} +} +func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageOSPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *StorageOSPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageOSPersistentVolumeSource.Merge(m, src) +} +func (m *StorageOSPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *StorageOSPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_StorageOSPersistentVolumeSource.DiscardUnknown(m) } -func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) - n57, err := m.LastObservedTime.MarshalTo(dAtA[i:]) +var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo + +func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } +func (*StorageOSVolumeSource) ProtoMessage() {} +func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{186} +} +func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageOSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n57 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) - i += copy(dAtA[i:], m.State) - return i, nil + return b[:n], nil +} +func (m *StorageOSVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageOSVolumeSource.Merge(m, src) +} +func (m *StorageOSVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *StorageOSVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_StorageOSVolumeSource.DiscardUnknown(m) } -func (m *EventSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo + +func (m *Sysctl) Reset() { *m = Sysctl{} } +func (*Sysctl) ProtoMessage() {} +func (*Sysctl) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{187} +} +func (m *Sysctl) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sysctl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *EventSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Component))) - i += copy(dAtA[i:], m.Component) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - return i, nil +func (m *Sysctl) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sysctl.Merge(m, src) +} +func (m *Sysctl) XXX_Size() int { + return m.Size() +} +func (m *Sysctl) XXX_DiscardUnknown() { + xxx_messageInfo_Sysctl.DiscardUnknown(m) } -func (m *ExecAction) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_Sysctl proto.InternalMessageInfo + +func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } +func (*TCPSocketAction) ProtoMessage() {} +func (*TCPSocketAction) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{188} +} +func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TCPSocketAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *TCPSocketAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_TCPSocketAction.Merge(m, src) +} +func (m *TCPSocketAction) XXX_Size() int { + return m.Size() +} +func (m *TCPSocketAction) XXX_DiscardUnknown() { + xxx_messageInfo_TCPSocketAction.DiscardUnknown(m) } -func (m *ExecAction) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Command) > 0 { - for _, s := range m.Command { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } +var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo + +func (m *Taint) Reset() { *m = Taint{} } +func (*Taint) ProtoMessage() {} +func (*Taint) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{189} +} +func (m *Taint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Taint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *Taint) XXX_Merge(src proto.Message) { + xxx_messageInfo_Taint.Merge(m, src) +} +func (m *Taint) XXX_Size() int { + return m.Size() +} +func (m *Taint) XXX_DiscardUnknown() { + xxx_messageInfo_Taint.DiscardUnknown(m) } -func (m *FCVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_Taint proto.InternalMessageInfo + +func (m *Toleration) Reset() { *m = Toleration{} } +func (*Toleration) ProtoMessage() {} +func (*Toleration) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{190} +} +func (m *Toleration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Toleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *FCVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.TargetWWNs) > 0 { - for _, s := range m.TargetWWNs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.Lun != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Lun)) - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.WWIDs) > 0 { - for _, s := range m.WWIDs { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil +func (m *Toleration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Toleration.Merge(m, src) } - -func (m *FlexPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (m *Toleration) XXX_Size() int { + return m.Size() } - -func (m *FlexPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n58, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n58 - } - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.Options) > 0 { - keysForOptions := make([]string, 0, len(m.Options)) - for k := range m.Options { - keysForOptions = append(keysForOptions, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - for _, k := range keysForOptions { - dAtA[i] = 0x2a - i++ - v := m.Options[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - return i, nil +func (m *Toleration) XXX_DiscardUnknown() { + xxx_messageInfo_Toleration.DiscardUnknown(m) } -func (m *FlexVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} +var xxx_messageInfo_Toleration proto.InternalMessageInfo -func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n59, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n59 - } - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.Options) > 0 { - keysForOptions := make([]string, 0, len(m.Options)) - for k := range m.Options { - keysForOptions = append(keysForOptions, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - for _, k := range keysForOptions { - dAtA[i] = 0x2a - i++ - v := m.Options[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - return i, nil +func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } +func (*TopologySelectorLabelRequirement) ProtoMessage() {} +func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{191} } - -func (m *FlockerVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologySelectorLabelRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *FlockerVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetName))) - i += copy(dAtA[i:], m.DatasetName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetUUID))) - i += copy(dAtA[i:], m.DatasetUUID) - return i, nil +func (m *TopologySelectorLabelRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologySelectorLabelRequirement.Merge(m, src) } - -func (m *GCEPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (m *TopologySelectorLabelRequirement) XXX_Size() int { + return m.Size() } - -func (m *GCEPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PDName))) - i += copy(dAtA[i:], m.PDName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil +func (m *TopologySelectorLabelRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_TopologySelectorLabelRequirement.DiscardUnknown(m) } -func (m *GitRepoVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo + +func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } +func (*TopologySelectorTerm) ProtoMessage() {} +func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{192} +} +func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologySelectorTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *GitRepoVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Repository))) - i += copy(dAtA[i:], m.Repository) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Revision))) - i += copy(dAtA[i:], m.Revision) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Directory))) - i += copy(dAtA[i:], m.Directory) - return i, nil +func (m *TopologySelectorTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologySelectorTerm.Merge(m, src) +} +func (m *TopologySelectorTerm) XXX_Size() int { + return m.Size() +} +func (m *TopologySelectorTerm) XXX_DiscardUnknown() { + xxx_messageInfo_TopologySelectorTerm.DiscardUnknown(m) } -func (m *GlusterfsPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo + +func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } +func (*TopologySpreadConstraint) ProtoMessage() {} +func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{193} +} +func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologySpreadConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *GlusterfsPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) - i += copy(dAtA[i:], m.EndpointsName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.EndpointsNamespace != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.EndpointsNamespace))) - i += copy(dAtA[i:], *m.EndpointsNamespace) - } - return i, nil +func (m *TopologySpreadConstraint) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologySpreadConstraint.Merge(m, src) +} +func (m *TopologySpreadConstraint) XXX_Size() int { + return m.Size() +} +func (m *TopologySpreadConstraint) XXX_DiscardUnknown() { + xxx_messageInfo_TopologySpreadConstraint.DiscardUnknown(m) } -func (m *GlusterfsVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo + +func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } +func (*TypedLocalObjectReference) ProtoMessage() {} +func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{194} +} +func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypedLocalObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *GlusterfsVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) - i += copy(dAtA[i:], m.EndpointsName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil +func (m *TypedLocalObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypedLocalObjectReference.Merge(m, src) +} +func (m *TypedLocalObjectReference) XXX_Size() int { + return m.Size() +} +func (m *TypedLocalObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_TypedLocalObjectReference.DiscardUnknown(m) } -func (m *HTTPGetAction) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo + +func (m *Volume) Reset() { *m = Volume{} } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{195} +} +func (m *Volume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n60, err := m.Port.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n60 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scheme))) - i += copy(dAtA[i:], m.Scheme) - if len(m.HTTPHeaders) > 0 { - for _, msg := range m.HTTPHeaders { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil +func (m *Volume) XXX_Merge(src proto.Message) { + xxx_messageInfo_Volume.Merge(m, src) +} +func (m *Volume) XXX_Size() int { + return m.Size() +} +func (m *Volume) XXX_DiscardUnknown() { + xxx_messageInfo_Volume.DiscardUnknown(m) } -func (m *HTTPHeader) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_Volume proto.InternalMessageInfo + +func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } +func (*VolumeDevice) ProtoMessage() {} +func (*VolumeDevice) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{196} +} +func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *HTTPHeader) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - return i, nil +func (m *VolumeDevice) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeDevice.Merge(m, src) +} +func (m *VolumeDevice) XXX_Size() int { + return m.Size() +} +func (m *VolumeDevice) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeDevice.DiscardUnknown(m) } -func (m *Handler) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo + +func (m *VolumeMount) Reset() { *m = VolumeMount{} } +func (*VolumeMount) ProtoMessage() {} +func (*VolumeMount) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{197} +} +func (m *VolumeMount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeMount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *Handler) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Exec != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Exec.Size())) - n61, err := m.Exec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n61 - } - if m.HTTPGet != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HTTPGet.Size())) - n62, err := m.HTTPGet.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n62 - } - if m.TCPSocket != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TCPSocket.Size())) - n63, err := m.TCPSocket.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n63 - } - return i, nil +func (m *VolumeMount) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeMount.Merge(m, src) +} +func (m *VolumeMount) XXX_Size() int { + return m.Size() +} +func (m *VolumeMount) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeMount.DiscardUnknown(m) } -func (m *HostAlias) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VolumeMount proto.InternalMessageInfo + +func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } +func (*VolumeNodeAffinity) ProtoMessage() {} +func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{198} +} +func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeNodeAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *HostAlias) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) - i += copy(dAtA[i:], m.IP) - if len(m.Hostnames) > 0 { - for _, s := range m.Hostnames { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil +func (m *VolumeNodeAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeNodeAffinity.Merge(m, src) +} +func (m *VolumeNodeAffinity) XXX_Size() int { + return m.Size() +} +func (m *VolumeNodeAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeNodeAffinity.DiscardUnknown(m) } -func (m *HostPathVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo + +func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } +func (*VolumeProjection) ProtoMessage() {} +func (*VolumeProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{199} +} +func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *HostPathVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - if m.Type != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) - i += copy(dAtA[i:], *m.Type) - } - return i, nil +func (m *VolumeProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeProjection.Merge(m, src) +} +func (m *VolumeProjection) XXX_Size() int { + return m.Size() +} +func (m *VolumeProjection) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeProjection.DiscardUnknown(m) } -func (m *ISCSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo + +func (m *VolumeSource) Reset() { *m = VolumeSource{} } +func (*VolumeSource) ProtoMessage() {} +func (*VolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{200} +} +func (m *VolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) - i += copy(dAtA[i:], m.TargetPortal) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) - i += copy(dAtA[i:], m.IQN) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) - i += copy(dAtA[i:], m.ISCSIInterface) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x30 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.Portals) > 0 { - for _, s := range m.Portals { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x40 - i++ - if m.DiscoveryCHAPAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n64, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n64 - } - dAtA[i] = 0x58 - i++ - if m.SessionCHAPAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.InitiatorName != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) - i += copy(dAtA[i:], *m.InitiatorName) - } - return i, nil +func (m *VolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeSource.Merge(m, src) +} +func (m *VolumeSource) XXX_Size() int { + return m.Size() +} +func (m *VolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeSource.DiscardUnknown(m) } -func (m *ISCSIVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VolumeSource proto.InternalMessageInfo + +func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } +func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} +func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{201} +} +func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VsphereVirtualDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) - i += copy(dAtA[i:], m.TargetPortal) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) - i += copy(dAtA[i:], m.IQN) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) - i += copy(dAtA[i:], m.ISCSIInterface) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x30 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.Portals) > 0 { - for _, s := range m.Portals { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x40 - i++ - if m.DiscoveryCHAPAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n65, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n65 - } - dAtA[i] = 0x58 - i++ - if m.SessionCHAPAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.InitiatorName != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) - i += copy(dAtA[i:], *m.InitiatorName) - } - return i, nil +func (m *VsphereVirtualDiskVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VsphereVirtualDiskVolumeSource.Merge(m, src) +} +func (m *VsphereVirtualDiskVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *VsphereVirtualDiskVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_VsphereVirtualDiskVolumeSource.DiscardUnknown(m) } -func (m *KeyToPath) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo + +func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } +func (*WeightedPodAffinityTerm) ProtoMessage() {} +func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{202} +} +func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WeightedPodAffinityTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *KeyToPath) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - if m.Mode != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) - } - return i, nil +func (m *WeightedPodAffinityTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_WeightedPodAffinityTerm.Merge(m, src) +} +func (m *WeightedPodAffinityTerm) XXX_Size() int { + return m.Size() +} +func (m *WeightedPodAffinityTerm) XXX_DiscardUnknown() { + xxx_messageInfo_WeightedPodAffinityTerm.DiscardUnknown(m) } -func (m *Lifecycle) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo + +func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } +func (*WindowsSecurityContextOptions) ProtoMessage() {} +func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{203} +} +func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WindowsSecurityContextOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *WindowsSecurityContextOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsSecurityContextOptions.Merge(m, src) +} +func (m *WindowsSecurityContextOptions) XXX_Size() int { + return m.Size() +} +func (m *WindowsSecurityContextOptions) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsSecurityContextOptions.DiscardUnknown(m) } -func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PostStart != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PostStart.Size())) - n66, err := m.PostStart.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n66 - } - if m.PreStop != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PreStop.Size())) - n67, err := m.PreStop.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n67 - } - return i, nil +var xxx_messageInfo_WindowsSecurityContextOptions proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource") + proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity") + proto.RegisterType((*AttachedVolume)(nil), "k8s.io.api.core.v1.AttachedVolume") + proto.RegisterType((*AvoidPods)(nil), "k8s.io.api.core.v1.AvoidPods") + proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.api.core.v1.AzureDiskVolumeSource") + proto.RegisterType((*AzureFilePersistentVolumeSource)(nil), "k8s.io.api.core.v1.AzureFilePersistentVolumeSource") + proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource") + proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding") + proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource.VolumeAttributesEntry") + proto.RegisterType((*CSIVolumeSource)(nil), "k8s.io.api.core.v1.CSIVolumeSource") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.CSIVolumeSource.VolumeAttributesEntry") + proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities") + proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource") + proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") + proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource") + proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") + proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") + proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") + proto.RegisterType((*ComponentStatus)(nil), "k8s.io.api.core.v1.ComponentStatus") + proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.api.core.v1.ComponentStatusList") + proto.RegisterType((*ConfigMap)(nil), "k8s.io.api.core.v1.ConfigMap") + proto.RegisterMapType((map[string][]byte)(nil), "k8s.io.api.core.v1.ConfigMap.BinaryDataEntry") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ConfigMap.DataEntry") + proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.api.core.v1.ConfigMapEnvSource") + proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.api.core.v1.ConfigMapKeySelector") + proto.RegisterType((*ConfigMapList)(nil), "k8s.io.api.core.v1.ConfigMapList") + proto.RegisterType((*ConfigMapNodeConfigSource)(nil), "k8s.io.api.core.v1.ConfigMapNodeConfigSource") + proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.api.core.v1.ConfigMapProjection") + proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.api.core.v1.ConfigMapVolumeSource") + proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container") + proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage") + proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort") + proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState") + proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning") + proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated") + proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") + proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") + proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") + proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") + proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") + proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeSource") + proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.api.core.v1.EmptyDirVolumeSource") + proto.RegisterType((*EndpointAddress)(nil), "k8s.io.api.core.v1.EndpointAddress") + proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.core.v1.EndpointPort") + proto.RegisterType((*EndpointSubset)(nil), "k8s.io.api.core.v1.EndpointSubset") + proto.RegisterType((*Endpoints)(nil), "k8s.io.api.core.v1.Endpoints") + proto.RegisterType((*EndpointsList)(nil), "k8s.io.api.core.v1.EndpointsList") + proto.RegisterType((*EnvFromSource)(nil), "k8s.io.api.core.v1.EnvFromSource") + proto.RegisterType((*EnvVar)(nil), "k8s.io.api.core.v1.EnvVar") + proto.RegisterType((*EnvVarSource)(nil), "k8s.io.api.core.v1.EnvVarSource") + proto.RegisterType((*EphemeralContainer)(nil), "k8s.io.api.core.v1.EphemeralContainer") + proto.RegisterType((*EphemeralContainerCommon)(nil), "k8s.io.api.core.v1.EphemeralContainerCommon") + proto.RegisterType((*EphemeralContainers)(nil), "k8s.io.api.core.v1.EphemeralContainers") + proto.RegisterType((*Event)(nil), "k8s.io.api.core.v1.Event") + proto.RegisterType((*EventList)(nil), "k8s.io.api.core.v1.EventList") + proto.RegisterType((*EventSeries)(nil), "k8s.io.api.core.v1.EventSeries") + proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource") + proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction") + proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource") + proto.RegisterType((*FlexPersistentVolumeSource)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource.OptionsEntry") + proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexVolumeSource.OptionsEntry") + proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource") + proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource") + proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource") + proto.RegisterType((*GlusterfsPersistentVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsPersistentVolumeSource") + proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource") + proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction") + proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader") + proto.RegisterType((*Handler)(nil), "k8s.io.api.core.v1.Handler") + proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias") + proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") + proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") + proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") + proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") + proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") + proto.RegisterType((*LimitRange)(nil), "k8s.io.api.core.v1.LimitRange") + proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.api.core.v1.LimitRangeItem") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.DefaultEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.DefaultRequestEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MaxEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MaxLimitRequestRatioEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MinEntry") + proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList") + proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec") + proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List") + proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress") + proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus") + proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.api.core.v1.LocalObjectReference") + proto.RegisterType((*LocalVolumeSource)(nil), "k8s.io.api.core.v1.LocalVolumeSource") + proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.api.core.v1.NFSVolumeSource") + proto.RegisterType((*Namespace)(nil), "k8s.io.api.core.v1.Namespace") + proto.RegisterType((*NamespaceCondition)(nil), "k8s.io.api.core.v1.NamespaceCondition") + proto.RegisterType((*NamespaceList)(nil), "k8s.io.api.core.v1.NamespaceList") + proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.api.core.v1.NamespaceSpec") + proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.api.core.v1.NamespaceStatus") + proto.RegisterType((*Node)(nil), "k8s.io.api.core.v1.Node") + proto.RegisterType((*NodeAddress)(nil), "k8s.io.api.core.v1.NodeAddress") + proto.RegisterType((*NodeAffinity)(nil), "k8s.io.api.core.v1.NodeAffinity") + proto.RegisterType((*NodeCondition)(nil), "k8s.io.api.core.v1.NodeCondition") + proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource") + proto.RegisterType((*NodeConfigStatus)(nil), "k8s.io.api.core.v1.NodeConfigStatus") + proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints") + proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList") + proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions") + proto.RegisterType((*NodeResources)(nil), "k8s.io.api.core.v1.NodeResources") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeResources.CapacityEntry") + proto.RegisterType((*NodeSelector)(nil), "k8s.io.api.core.v1.NodeSelector") + proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.api.core.v1.NodeSelectorRequirement") + proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.api.core.v1.NodeSelectorTerm") + proto.RegisterType((*NodeSpec)(nil), "k8s.io.api.core.v1.NodeSpec") + proto.RegisterType((*NodeStatus)(nil), "k8s.io.api.core.v1.NodeStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.AllocatableEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.CapacityEntry") + proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.api.core.v1.NodeSystemInfo") + proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.api.core.v1.ObjectFieldSelector") + proto.RegisterType((*ObjectReference)(nil), "k8s.io.api.core.v1.ObjectReference") + proto.RegisterType((*PersistentVolume)(nil), "k8s.io.api.core.v1.PersistentVolume") + proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.api.core.v1.PersistentVolumeClaim") + proto.RegisterType((*PersistentVolumeClaimCondition)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimCondition") + proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimList") + proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimSpec") + proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.CapacityEntry") + proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource") + proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.api.core.v1.PersistentVolumeList") + proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeSource") + proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec.CapacityEntry") + proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeStatus") + proto.RegisterType((*PhotonPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.PhotonPersistentDiskVolumeSource") + proto.RegisterType((*Pod)(nil), "k8s.io.api.core.v1.Pod") + proto.RegisterType((*PodAffinity)(nil), "k8s.io.api.core.v1.PodAffinity") + proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.api.core.v1.PodAffinityTerm") + proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity") + proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions") + proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition") + proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig") + proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption") + proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions") + proto.RegisterType((*PodIP)(nil), "k8s.io.api.core.v1.PodIP") + proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList") + proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions") + proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.api.core.v1.PodPortForwardOptions") + proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions") + proto.RegisterType((*PodReadinessGate)(nil), "k8s.io.api.core.v1.PodReadinessGate") + proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.api.core.v1.PodSecurityContext") + proto.RegisterType((*PodSignature)(nil), "k8s.io.api.core.v1.PodSignature") + proto.RegisterType((*PodSpec)(nil), "k8s.io.api.core.v1.PodSpec") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.PodSpec.NodeSelectorEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PodSpec.OverheadEntry") + proto.RegisterType((*PodStatus)(nil), "k8s.io.api.core.v1.PodStatus") + proto.RegisterType((*PodStatusResult)(nil), "k8s.io.api.core.v1.PodStatusResult") + proto.RegisterType((*PodTemplate)(nil), "k8s.io.api.core.v1.PodTemplate") + proto.RegisterType((*PodTemplateList)(nil), "k8s.io.api.core.v1.PodTemplateList") + proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.api.core.v1.PodTemplateSpec") + proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.api.core.v1.PortworxVolumeSource") + proto.RegisterType((*Preconditions)(nil), "k8s.io.api.core.v1.Preconditions") + proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.api.core.v1.PreferAvoidPodsEntry") + proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.api.core.v1.PreferredSchedulingTerm") + proto.RegisterType((*Probe)(nil), "k8s.io.api.core.v1.Probe") + proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.api.core.v1.ProjectedVolumeSource") + proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.api.core.v1.QuobyteVolumeSource") + proto.RegisterType((*RBDPersistentVolumeSource)(nil), "k8s.io.api.core.v1.RBDPersistentVolumeSource") + proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.api.core.v1.RBDVolumeSource") + proto.RegisterType((*RangeAllocation)(nil), "k8s.io.api.core.v1.RangeAllocation") + proto.RegisterType((*ReplicationController)(nil), "k8s.io.api.core.v1.ReplicationController") + proto.RegisterType((*ReplicationControllerCondition)(nil), "k8s.io.api.core.v1.ReplicationControllerCondition") + proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.api.core.v1.ReplicationControllerList") + proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec.SelectorEntry") + proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus") + proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector") + proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota") + proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList") + proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec.HardEntry") + proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus.HardEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus.UsedEntry") + proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.LimitsEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.RequestsEntry") + proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions") + proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource") + proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource") + proto.RegisterType((*ScopeSelector)(nil), "k8s.io.api.core.v1.ScopeSelector") + proto.RegisterType((*ScopedResourceSelectorRequirement)(nil), "k8s.io.api.core.v1.ScopedResourceSelectorRequirement") + proto.RegisterType((*Secret)(nil), "k8s.io.api.core.v1.Secret") + proto.RegisterMapType((map[string][]byte)(nil), "k8s.io.api.core.v1.Secret.DataEntry") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.Secret.StringDataEntry") + proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.api.core.v1.SecretEnvSource") + proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.api.core.v1.SecretKeySelector") + proto.RegisterType((*SecretList)(nil), "k8s.io.api.core.v1.SecretList") + proto.RegisterType((*SecretProjection)(nil), "k8s.io.api.core.v1.SecretProjection") + proto.RegisterType((*SecretReference)(nil), "k8s.io.api.core.v1.SecretReference") + proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.api.core.v1.SecretVolumeSource") + proto.RegisterType((*SecurityContext)(nil), "k8s.io.api.core.v1.SecurityContext") + proto.RegisterType((*SerializedReference)(nil), "k8s.io.api.core.v1.SerializedReference") + proto.RegisterType((*Service)(nil), "k8s.io.api.core.v1.Service") + proto.RegisterType((*ServiceAccount)(nil), "k8s.io.api.core.v1.ServiceAccount") + proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.api.core.v1.ServiceAccountList") + proto.RegisterType((*ServiceAccountTokenProjection)(nil), "k8s.io.api.core.v1.ServiceAccountTokenProjection") + proto.RegisterType((*ServiceList)(nil), "k8s.io.api.core.v1.ServiceList") + proto.RegisterType((*ServicePort)(nil), "k8s.io.api.core.v1.ServicePort") + proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.api.core.v1.ServiceProxyOptions") + proto.RegisterType((*ServiceSpec)(nil), "k8s.io.api.core.v1.ServiceSpec") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ServiceSpec.SelectorEntry") + proto.RegisterType((*ServiceStatus)(nil), "k8s.io.api.core.v1.ServiceStatus") + proto.RegisterType((*SessionAffinityConfig)(nil), "k8s.io.api.core.v1.SessionAffinityConfig") + proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSPersistentVolumeSource") + proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSVolumeSource") + proto.RegisterType((*Sysctl)(nil), "k8s.io.api.core.v1.Sysctl") + proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.api.core.v1.TCPSocketAction") + proto.RegisterType((*Taint)(nil), "k8s.io.api.core.v1.Taint") + proto.RegisterType((*Toleration)(nil), "k8s.io.api.core.v1.Toleration") + proto.RegisterType((*TopologySelectorLabelRequirement)(nil), "k8s.io.api.core.v1.TopologySelectorLabelRequirement") + proto.RegisterType((*TopologySelectorTerm)(nil), "k8s.io.api.core.v1.TopologySelectorTerm") + proto.RegisterType((*TopologySpreadConstraint)(nil), "k8s.io.api.core.v1.TopologySpreadConstraint") + proto.RegisterType((*TypedLocalObjectReference)(nil), "k8s.io.api.core.v1.TypedLocalObjectReference") + proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume") + proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice") + proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") + proto.RegisterType((*VolumeNodeAffinity)(nil), "k8s.io.api.core.v1.VolumeNodeAffinity") + proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") + proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") + proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource") + proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm") + proto.RegisterType((*WindowsSecurityContextOptions)(nil), "k8s.io.api.core.v1.WindowsSecurityContextOptions") } -func (m *LimitRange) Marshal() (dAtA []byte, err error) { +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto", fileDescriptor_83c10c24ec417dc9) +} + +var fileDescriptor_83c10c24ec417dc9 = []byte{ + // 13567 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x7b, 0x70, 0x24, 0x49, + 0x5a, 0x18, 0x7e, 0xd5, 0xad, 0x47, 0xf7, 0xa7, 0x77, 0xce, 0x63, 0x35, 0xda, 0x9d, 0xd1, 0x6c, + 0xed, 0xdd, 0xec, 0xec, 0xed, 0xae, 0xe6, 0xf6, 0x75, 0xbb, 0xdc, 0xde, 0x2d, 0x48, 0x6a, 0x69, + 0xa6, 0x77, 0x46, 0x9a, 0xde, 0x6c, 0xcd, 0xcc, 0xdd, 0xb2, 0x77, 0x5c, 0xa9, 0x2b, 0x25, 0xd5, + 0xaa, 0xbb, 0xaa, 0xb7, 0xaa, 0x5a, 0x33, 0xda, 0x1f, 0xc4, 0x0f, 0x1f, 0xcf, 0x33, 0xe0, 0xb8, + 0xb0, 0x09, 0x3f, 0x80, 0xc0, 0x11, 0x18, 0x07, 0x60, 0xb0, 0xc3, 0x18, 0x0c, 0x98, 0xc3, 0x36, + 0x06, 0xdb, 0x81, 0xfd, 0x07, 0xc6, 0x0e, 0xdb, 0x47, 0x04, 0x61, 0x19, 0x06, 0x87, 0x89, 0xfb, + 0xc3, 0x40, 0x18, 0xfc, 0x87, 0x65, 0xc2, 0x38, 0xf2, 0x59, 0x99, 0xd5, 0x55, 0xdd, 0xad, 0x59, + 0x8d, 0x6e, 0xb9, 0xd8, 0xff, 0xba, 0xf3, 0xfb, 0xf2, 0xcb, 0xac, 0x7c, 0x7e, 0xf9, 0x3d, 0xe1, + 0xd5, 0xdd, 0x57, 0xa2, 0x05, 0x2f, 0xb8, 0xb2, 0xdb, 0xd9, 0x24, 0xa1, 0x4f, 0x62, 0x12, 0x5d, + 0xd9, 0x23, 0xbe, 0x1b, 0x84, 0x57, 0x04, 0xc0, 0x69, 0x7b, 0x57, 0x1a, 0x41, 0x48, 0xae, 0xec, + 0x3d, 0x77, 0x65, 0x9b, 0xf8, 0x24, 0x74, 0x62, 0xe2, 0x2e, 0xb4, 0xc3, 0x20, 0x0e, 0x10, 0xe2, + 0x38, 0x0b, 0x4e, 0xdb, 0x5b, 0xa0, 0x38, 0x0b, 0x7b, 0xcf, 0xcd, 0x3d, 0xbb, 0xed, 0xc5, 0x3b, + 0x9d, 0xcd, 0x85, 0x46, 0xd0, 0xba, 0xb2, 0x1d, 0x6c, 0x07, 0x57, 0x18, 0xea, 0x66, 0x67, 0x8b, + 0xfd, 0x63, 0x7f, 0xd8, 0x2f, 0x4e, 0x62, 0xee, 0xc5, 0xa4, 0x99, 0x96, 0xd3, 0xd8, 0xf1, 0x7c, + 0x12, 0xee, 0x5f, 0x69, 0xef, 0x6e, 0xb3, 0x76, 0x43, 0x12, 0x05, 0x9d, 0xb0, 0x41, 0xd2, 0x0d, + 0xf7, 0xac, 0x15, 0x5d, 0x69, 0x91, 0xd8, 0xc9, 0xe8, 0xee, 0xdc, 0x95, 0xbc, 0x5a, 0x61, 0xc7, + 0x8f, 0xbd, 0x56, 0x77, 0x33, 0x1f, 0xef, 0x57, 0x21, 0x6a, 0xec, 0x90, 0x96, 0xd3, 0x55, 0xef, + 0x85, 0xbc, 0x7a, 0x9d, 0xd8, 0x6b, 0x5e, 0xf1, 0xfc, 0x38, 0x8a, 0xc3, 0x74, 0x25, 0xfb, 0x2b, + 0x16, 0x5c, 0x5c, 0xbc, 0x53, 0x5f, 0x69, 0x3a, 0x51, 0xec, 0x35, 0x96, 0x9a, 0x41, 0x63, 0xb7, + 0x1e, 0x07, 0x21, 0xb9, 0x1d, 0x34, 0x3b, 0x2d, 0x52, 0x67, 0x03, 0x81, 0x9e, 0x81, 0xd2, 0x1e, + 0xfb, 0x5f, 0xad, 0xcc, 0x5a, 0x17, 0xad, 0xcb, 0xe5, 0xa5, 0xe9, 0xdf, 0x38, 0x98, 0xff, 0xd0, + 0xfd, 0x83, 0xf9, 0xd2, 0x6d, 0x51, 0x8e, 0x15, 0x06, 0xba, 0x04, 0x23, 0x5b, 0xd1, 0xc6, 0x7e, + 0x9b, 0xcc, 0x16, 0x18, 0xee, 0xa4, 0xc0, 0x1d, 0x59, 0xad, 0xd3, 0x52, 0x2c, 0xa0, 0xe8, 0x0a, + 0x94, 0xdb, 0x4e, 0x18, 0x7b, 0xb1, 0x17, 0xf8, 0xb3, 0xc5, 0x8b, 0xd6, 0xe5, 0xe1, 0xa5, 0x19, + 0x81, 0x5a, 0xae, 0x49, 0x00, 0x4e, 0x70, 0x68, 0x37, 0x42, 0xe2, 0xb8, 0x37, 0xfd, 0xe6, 0xfe, + 0xec, 0xd0, 0x45, 0xeb, 0x72, 0x29, 0xe9, 0x06, 0x16, 0xe5, 0x58, 0x61, 0xd8, 0x3f, 0x54, 0x80, + 0xd2, 0xe2, 0xd6, 0x96, 0xe7, 0x7b, 0xf1, 0x3e, 0xba, 0x0d, 0xe3, 0x7e, 0xe0, 0x12, 0xf9, 0x9f, + 0x7d, 0xc5, 0xd8, 0xf3, 0x17, 0x17, 0xba, 0x97, 0xd2, 0xc2, 0xba, 0x86, 0xb7, 0x34, 0x7d, 0xff, + 0x60, 0x7e, 0x5c, 0x2f, 0xc1, 0x06, 0x1d, 0x84, 0x61, 0xac, 0x1d, 0xb8, 0x8a, 0x6c, 0x81, 0x91, + 0x9d, 0xcf, 0x22, 0x5b, 0x4b, 0xd0, 0x96, 0xa6, 0xee, 0x1f, 0xcc, 0x8f, 0x69, 0x05, 0x58, 0x27, + 0x82, 0x36, 0x61, 0x8a, 0xfe, 0xf5, 0x63, 0x4f, 0xd1, 0x2d, 0x32, 0xba, 0x4f, 0xe4, 0xd1, 0xd5, + 0x50, 0x97, 0x4e, 0xdd, 0x3f, 0x98, 0x9f, 0x4a, 0x15, 0xe2, 0x34, 0x41, 0xfb, 0x5d, 0x98, 0x5c, + 0x8c, 0x63, 0xa7, 0xb1, 0x43, 0x5c, 0x3e, 0x83, 0xe8, 0x45, 0x18, 0xf2, 0x9d, 0x16, 0x11, 0xf3, + 0x7b, 0x51, 0x0c, 0xec, 0xd0, 0xba, 0xd3, 0x22, 0x87, 0x07, 0xf3, 0xd3, 0xb7, 0x7c, 0xef, 0x9d, + 0x8e, 0x58, 0x15, 0xb4, 0x0c, 0x33, 0x6c, 0xf4, 0x3c, 0x80, 0x4b, 0xf6, 0xbc, 0x06, 0xa9, 0x39, + 0xf1, 0x8e, 0x98, 0x6f, 0x24, 0xea, 0x42, 0x45, 0x41, 0xb0, 0x86, 0x65, 0xdf, 0x83, 0xf2, 0xe2, + 0x5e, 0xe0, 0xb9, 0xb5, 0xc0, 0x8d, 0xd0, 0x2e, 0x4c, 0xb5, 0x43, 0xb2, 0x45, 0x42, 0x55, 0x34, + 0x6b, 0x5d, 0x2c, 0x5e, 0x1e, 0x7b, 0xfe, 0x72, 0xe6, 0xc7, 0x9a, 0xa8, 0x2b, 0x7e, 0x1c, 0xee, + 0x2f, 0x3d, 0x22, 0xda, 0x9b, 0x4a, 0x41, 0x71, 0x9a, 0xb2, 0xfd, 0x2f, 0x0b, 0x70, 0x66, 0xf1, + 0xdd, 0x4e, 0x48, 0x2a, 0x5e, 0xb4, 0x9b, 0x5e, 0xe1, 0xae, 0x17, 0xed, 0xae, 0x27, 0x23, 0xa0, + 0x96, 0x56, 0x45, 0x94, 0x63, 0x85, 0x81, 0x9e, 0x85, 0x51, 0xfa, 0xfb, 0x16, 0xae, 0x8a, 0x4f, + 0x3e, 0x25, 0x90, 0xc7, 0x2a, 0x4e, 0xec, 0x54, 0x38, 0x08, 0x4b, 0x1c, 0xb4, 0x06, 0x63, 0x0d, + 0xb6, 0x21, 0xb7, 0xd7, 0x02, 0x97, 0xb0, 0xc9, 0x2c, 0x2f, 0x3d, 0x4d, 0xd1, 0x97, 0x93, 0xe2, + 0xc3, 0x83, 0xf9, 0x59, 0xde, 0x37, 0x41, 0x42, 0x83, 0x61, 0xbd, 0x3e, 0xb2, 0xd5, 0xfe, 0x1a, + 0x62, 0x94, 0x20, 0x63, 0x6f, 0x5d, 0xd6, 0xb6, 0xca, 0x30, 0xdb, 0x2a, 0xe3, 0xd9, 0xdb, 0x04, + 0x3d, 0x07, 0x43, 0xbb, 0x9e, 0xef, 0xce, 0x8e, 0x30, 0x5a, 0xe7, 0xe9, 0x9c, 0x5f, 0xf7, 0x7c, + 0xf7, 0xf0, 0x60, 0x7e, 0xc6, 0xe8, 0x0e, 0x2d, 0xc4, 0x0c, 0xd5, 0xfe, 0x13, 0x0b, 0xe6, 0x19, + 0x6c, 0xd5, 0x6b, 0x92, 0x1a, 0x09, 0x23, 0x2f, 0x8a, 0x89, 0x1f, 0x1b, 0x03, 0xfa, 0x3c, 0x40, + 0x44, 0x1a, 0x21, 0x89, 0xb5, 0x21, 0x55, 0x0b, 0xa3, 0xae, 0x20, 0x58, 0xc3, 0xa2, 0x07, 0x42, + 0xb4, 0xe3, 0x84, 0x6c, 0x7d, 0x89, 0x81, 0x55, 0x07, 0x42, 0x5d, 0x02, 0x70, 0x82, 0x63, 0x1c, + 0x08, 0xc5, 0x7e, 0x07, 0x02, 0xfa, 0x14, 0x4c, 0x25, 0x8d, 0x45, 0x6d, 0xa7, 0x21, 0x07, 0x90, + 0x6d, 0x99, 0xba, 0x09, 0xc2, 0x69, 0x5c, 0xfb, 0xef, 0x59, 0x62, 0xf1, 0xd0, 0xaf, 0x7e, 0x9f, + 0x7f, 0xab, 0xfd, 0x4b, 0x16, 0x8c, 0x2e, 0x79, 0xbe, 0xeb, 0xf9, 0xdb, 0xe8, 0xf3, 0x50, 0xa2, + 0x77, 0x93, 0xeb, 0xc4, 0x8e, 0x38, 0xf7, 0x3e, 0xa6, 0xed, 0x2d, 0x75, 0x55, 0x2c, 0xb4, 0x77, + 0xb7, 0x69, 0x41, 0xb4, 0x40, 0xb1, 0xe9, 0x6e, 0xbb, 0xb9, 0xf9, 0x36, 0x69, 0xc4, 0x6b, 0x24, + 0x76, 0x92, 0xcf, 0x49, 0xca, 0xb0, 0xa2, 0x8a, 0xae, 0xc3, 0x48, 0xec, 0x84, 0xdb, 0x24, 0x16, + 0x07, 0x60, 0xe6, 0x41, 0xc5, 0x6b, 0x62, 0xba, 0x23, 0x89, 0xdf, 0x20, 0xc9, 0xb5, 0xb0, 0xc1, + 0xaa, 0x62, 0x41, 0xc2, 0xfe, 0x81, 0x51, 0x38, 0xb7, 0x5c, 0xaf, 0xe6, 0xac, 0xab, 0x4b, 0x30, + 0xe2, 0x86, 0xde, 0x1e, 0x09, 0xc5, 0x38, 0x2b, 0x2a, 0x15, 0x56, 0x8a, 0x05, 0x14, 0xbd, 0x02, + 0xe3, 0xfc, 0x42, 0xba, 0xe6, 0xf8, 0x6e, 0x53, 0x0e, 0xf1, 0x69, 0x81, 0x3d, 0x7e, 0x5b, 0x83, + 0x61, 0x03, 0xf3, 0x88, 0x8b, 0xea, 0x52, 0x6a, 0x33, 0xe6, 0x5d, 0x76, 0x5f, 0xb4, 0x60, 0x9a, + 0x37, 0xb3, 0x18, 0xc7, 0xa1, 0xb7, 0xd9, 0x89, 0x49, 0x34, 0x3b, 0xcc, 0x4e, 0xba, 0xe5, 0xac, + 0xd1, 0xca, 0x1d, 0x81, 0x85, 0xdb, 0x29, 0x2a, 0xfc, 0x10, 0x9c, 0x15, 0xed, 0x4e, 0xa7, 0xc1, + 0xb8, 0xab, 0x59, 0xf4, 0x1d, 0x16, 0xcc, 0x35, 0x02, 0x3f, 0x0e, 0x83, 0x66, 0x93, 0x84, 0xb5, + 0xce, 0x66, 0xd3, 0x8b, 0x76, 0xf8, 0x3a, 0xc5, 0x64, 0x8b, 0x9d, 0x04, 0x39, 0x73, 0xa8, 0x90, + 0xc4, 0x1c, 0x5e, 0xb8, 0x7f, 0x30, 0x3f, 0xb7, 0x9c, 0x4b, 0x0a, 0xf7, 0x68, 0x06, 0xed, 0x02, + 0xa2, 0x57, 0x69, 0x3d, 0x76, 0xb6, 0x49, 0xd2, 0xf8, 0xe8, 0xe0, 0x8d, 0x9f, 0xbd, 0x7f, 0x30, + 0x8f, 0xd6, 0xbb, 0x48, 0xe0, 0x0c, 0xb2, 0xe8, 0x1d, 0x38, 0x4d, 0x4b, 0xbb, 0xbe, 0xb5, 0x34, + 0x78, 0x73, 0xb3, 0xf7, 0x0f, 0xe6, 0x4f, 0xaf, 0x67, 0x10, 0xc1, 0x99, 0xa4, 0xd1, 0xb7, 0x5b, + 0x70, 0x2e, 0xf9, 0xfc, 0x95, 0x7b, 0x6d, 0xc7, 0x77, 0x93, 0x86, 0xcb, 0x83, 0x37, 0x4c, 0xcf, + 0xe4, 0x73, 0xcb, 0x79, 0x94, 0x70, 0x7e, 0x23, 0x73, 0xcb, 0x70, 0x26, 0x73, 0xb5, 0xa0, 0x69, + 0x28, 0xee, 0x12, 0xce, 0x05, 0x95, 0x31, 0xfd, 0x89, 0x4e, 0xc3, 0xf0, 0x9e, 0xd3, 0xec, 0x88, + 0x8d, 0x82, 0xf9, 0x9f, 0x4f, 0x14, 0x5e, 0xb1, 0xec, 0x7f, 0x55, 0x84, 0xa9, 0xe5, 0x7a, 0xf5, + 0x81, 0x76, 0xa1, 0x7e, 0x0d, 0x15, 0x7a, 0x5e, 0x43, 0xc9, 0xa5, 0x56, 0xcc, 0xbd, 0xd4, 0xfe, + 0xff, 0x8c, 0x2d, 0x34, 0xc4, 0xb6, 0xd0, 0x37, 0xe4, 0x6c, 0xa1, 0x63, 0xde, 0x38, 0x7b, 0x39, + 0xab, 0x68, 0x98, 0x4d, 0x66, 0x26, 0xc7, 0x72, 0x23, 0x68, 0x38, 0xcd, 0xf4, 0xd1, 0x77, 0xc4, + 0xa5, 0x74, 0x3c, 0xf3, 0xd8, 0x80, 0xf1, 0x65, 0xa7, 0xed, 0x6c, 0x7a, 0x4d, 0x2f, 0xf6, 0x48, + 0x84, 0x9e, 0x84, 0xa2, 0xe3, 0xba, 0x8c, 0xdb, 0x2a, 0x2f, 0x9d, 0xb9, 0x7f, 0x30, 0x5f, 0x5c, + 0x74, 0xe9, 0xb5, 0x0f, 0x0a, 0x6b, 0x1f, 0x53, 0x0c, 0xf4, 0x51, 0x18, 0x72, 0xc3, 0xa0, 0x3d, + 0x5b, 0x60, 0x98, 0x74, 0xd7, 0x0d, 0x55, 0xc2, 0xa0, 0x9d, 0x42, 0x65, 0x38, 0xf6, 0xaf, 0x16, + 0xe0, 0xb1, 0x65, 0xd2, 0xde, 0x59, 0xad, 0xe7, 0x9c, 0xdf, 0x97, 0xa1, 0xd4, 0x0a, 0x7c, 0x2f, + 0x0e, 0xc2, 0x48, 0x34, 0xcd, 0x56, 0xc4, 0x9a, 0x28, 0xc3, 0x0a, 0x8a, 0x2e, 0xc2, 0x50, 0x3b, + 0x61, 0x2a, 0xc7, 0x25, 0x43, 0xca, 0xd8, 0x49, 0x06, 0xa1, 0x18, 0x9d, 0x88, 0x84, 0x62, 0xc5, + 0x28, 0x8c, 0x5b, 0x11, 0x09, 0x31, 0x83, 0x24, 0x37, 0x33, 0xbd, 0xb3, 0xc5, 0x09, 0x9d, 0xba, + 0x99, 0x29, 0x04, 0x6b, 0x58, 0xa8, 0x06, 0xe5, 0x28, 0x35, 0xb3, 0x03, 0x6d, 0xd3, 0x09, 0x76, + 0x75, 0xab, 0x99, 0x4c, 0x88, 0x18, 0x37, 0xca, 0x48, 0xdf, 0xab, 0xfb, 0xcb, 0x05, 0x40, 0x7c, + 0x08, 0xff, 0x82, 0x0d, 0xdc, 0xad, 0xee, 0x81, 0x1b, 0x7c, 0x4b, 0x1c, 0xd7, 0xe8, 0xfd, 0xa9, + 0x05, 0x8f, 0x2d, 0x7b, 0xbe, 0x4b, 0xc2, 0x9c, 0x05, 0xf8, 0x70, 0xde, 0xb2, 0x47, 0x63, 0x1a, + 0x8c, 0x25, 0x36, 0x74, 0x0c, 0x4b, 0xcc, 0xfe, 0x23, 0x0b, 0x10, 0xff, 0xec, 0xf7, 0xdd, 0xc7, + 0xde, 0xea, 0xfe, 0xd8, 0x63, 0x58, 0x16, 0xf6, 0x0d, 0x98, 0x5c, 0x6e, 0x7a, 0xc4, 0x8f, 0xab, + 0xb5, 0xe5, 0xc0, 0xdf, 0xf2, 0xb6, 0xd1, 0x27, 0x60, 0x32, 0xf6, 0x5a, 0x24, 0xe8, 0xc4, 0x75, + 0xd2, 0x08, 0x7c, 0xf6, 0x92, 0xb4, 0x2e, 0x0f, 0x2f, 0xa1, 0xfb, 0x07, 0xf3, 0x93, 0x1b, 0x06, + 0x04, 0xa7, 0x30, 0xed, 0xdf, 0xa1, 0xe3, 0x17, 0xb4, 0xda, 0x81, 0x4f, 0xfc, 0x78, 0x39, 0xf0, + 0x5d, 0x2e, 0x71, 0xf8, 0x04, 0x0c, 0xc5, 0x74, 0x3c, 0xf8, 0xd8, 0x5d, 0x92, 0x1b, 0x85, 0x8e, + 0xc2, 0xe1, 0xc1, 0xfc, 0xd9, 0xee, 0x1a, 0x6c, 0x9c, 0x58, 0x1d, 0xf4, 0x0d, 0x30, 0x12, 0xc5, + 0x4e, 0xdc, 0x89, 0xc4, 0x68, 0x3e, 0x2e, 0x47, 0xb3, 0xce, 0x4a, 0x0f, 0x0f, 0xe6, 0xa7, 0x54, + 0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x14, 0x8c, 0xb6, 0x48, 0x14, 0x39, 0xdb, 0xf2, 0x36, 0x9c, + 0x12, 0x75, 0x47, 0xd7, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x04, 0x0c, 0x93, 0x30, 0x0c, 0x42, 0xb1, + 0x47, 0x27, 0x04, 0xe2, 0xf0, 0x0a, 0x2d, 0xc4, 0x1c, 0x66, 0xff, 0x3b, 0x0b, 0xa6, 0x54, 0x5f, + 0x79, 0x5b, 0x27, 0xf0, 0x2a, 0x78, 0x13, 0xa0, 0x21, 0x3f, 0x30, 0x62, 0xb7, 0xc7, 0xd8, 0xf3, + 0x97, 0x32, 0x2f, 0xea, 0xae, 0x61, 0x4c, 0x28, 0xab, 0xa2, 0x08, 0x6b, 0xd4, 0xec, 0x7f, 0x6a, + 0xc1, 0xa9, 0xd4, 0x17, 0xdd, 0xf0, 0xa2, 0x18, 0xbd, 0xd5, 0xf5, 0x55, 0x0b, 0x83, 0x7d, 0x15, + 0xad, 0xcd, 0xbe, 0x49, 0x2d, 0x65, 0x59, 0xa2, 0x7d, 0xd1, 0x35, 0x18, 0xf6, 0x62, 0xd2, 0x92, + 0x1f, 0xf3, 0x44, 0xcf, 0x8f, 0xe1, 0xbd, 0x4a, 0x66, 0xa4, 0x4a, 0x6b, 0x62, 0x4e, 0xc0, 0xfe, + 0x6b, 0x45, 0x28, 0xf3, 0x65, 0xbb, 0xe6, 0xb4, 0x4f, 0x60, 0x2e, 0xaa, 0x30, 0xc4, 0xa8, 0xf3, + 0x8e, 0x3f, 0x99, 0xdd, 0x71, 0xd1, 0x9d, 0x05, 0xfa, 0xe4, 0xe7, 0xcc, 0x91, 0xba, 0x1a, 0x68, + 0x11, 0x66, 0x24, 0x90, 0x03, 0xb0, 0xe9, 0xf9, 0x4e, 0xb8, 0x4f, 0xcb, 0x66, 0x8b, 0x8c, 0xe0, + 0xb3, 0xbd, 0x09, 0x2e, 0x29, 0x7c, 0x4e, 0x56, 0xf5, 0x35, 0x01, 0x60, 0x8d, 0xe8, 0xdc, 0xcb, + 0x50, 0x56, 0xc8, 0x47, 0xe1, 0x71, 0xe6, 0x3e, 0x05, 0x53, 0xa9, 0xb6, 0xfa, 0x55, 0x1f, 0xd7, + 0x59, 0xa4, 0x5f, 0x66, 0xa7, 0x80, 0xe8, 0xf5, 0x8a, 0xbf, 0x27, 0x4e, 0xd1, 0x77, 0xe1, 0x74, + 0x33, 0xe3, 0x70, 0x12, 0x53, 0x35, 0xf8, 0x61, 0xf6, 0x98, 0xf8, 0xec, 0xd3, 0x59, 0x50, 0x9c, + 0xd9, 0x06, 0xbd, 0xf6, 0x83, 0x36, 0x5d, 0xf3, 0x4e, 0x53, 0xe7, 0xa0, 0x6f, 0x8a, 0x32, 0xac, + 0xa0, 0xf4, 0x08, 0x3b, 0xad, 0x3a, 0x7f, 0x9d, 0xec, 0xd7, 0x49, 0x93, 0x34, 0xe2, 0x20, 0xfc, + 0x9a, 0x76, 0xff, 0x3c, 0x1f, 0x7d, 0x7e, 0x02, 0x8e, 0x09, 0x02, 0xc5, 0xeb, 0x64, 0x9f, 0x4f, + 0x85, 0xfe, 0x75, 0xc5, 0x9e, 0x5f, 0xf7, 0xb3, 0x16, 0x4c, 0xa8, 0xaf, 0x3b, 0x81, 0xad, 0xbe, + 0x64, 0x6e, 0xf5, 0xf3, 0x3d, 0x17, 0x78, 0xce, 0x26, 0xff, 0x72, 0x01, 0xce, 0x29, 0x1c, 0xca, + 0xee, 0xf3, 0x3f, 0x62, 0x55, 0x5d, 0x81, 0xb2, 0xaf, 0x04, 0x51, 0x96, 0x29, 0x01, 0x4a, 0xc4, + 0x50, 0x09, 0x0e, 0xe5, 0xda, 0xfc, 0x44, 0x5a, 0x34, 0xae, 0x4b, 0x68, 0x85, 0x34, 0x76, 0x09, + 0x8a, 0x1d, 0xcf, 0x15, 0x77, 0xc6, 0xc7, 0xe4, 0x68, 0xdf, 0xaa, 0x56, 0x0e, 0x0f, 0xe6, 0x1f, + 0xcf, 0xd3, 0x0e, 0xd0, 0xcb, 0x2a, 0x5a, 0xb8, 0x55, 0xad, 0x60, 0x5a, 0x19, 0x2d, 0xc2, 0x94, + 0x54, 0x80, 0xdc, 0xa6, 0x1c, 0x54, 0xe0, 0x8b, 0xab, 0x45, 0x89, 0x59, 0xb1, 0x09, 0xc6, 0x69, + 0x7c, 0x54, 0x81, 0xe9, 0xdd, 0xce, 0x26, 0x69, 0x92, 0x98, 0x7f, 0xf0, 0x75, 0xc2, 0x85, 0x90, + 0xe5, 0xe4, 0xb1, 0x75, 0x3d, 0x05, 0xc7, 0x5d, 0x35, 0xec, 0x3f, 0x67, 0x47, 0xbc, 0x18, 0xbd, + 0x5a, 0x18, 0xd0, 0x85, 0x45, 0xa9, 0x7f, 0x2d, 0x97, 0xf3, 0x20, 0xab, 0xe2, 0x3a, 0xd9, 0xdf, + 0x08, 0x28, 0xb3, 0x9d, 0xbd, 0x2a, 0x8c, 0x35, 0x3f, 0xd4, 0x73, 0xcd, 0xff, 0x7c, 0x01, 0xce, + 0xa8, 0x11, 0x30, 0xf8, 0xba, 0xbf, 0xe8, 0x63, 0xf0, 0x1c, 0x8c, 0xb9, 0x64, 0xcb, 0xe9, 0x34, + 0x63, 0x25, 0x11, 0x1f, 0xe6, 0x5a, 0x91, 0x4a, 0x52, 0x8c, 0x75, 0x9c, 0x23, 0x0c, 0xdb, 0xff, + 0x1a, 0x63, 0x77, 0x6b, 0xec, 0xd0, 0x35, 0xae, 0x76, 0x8d, 0x95, 0xbb, 0x6b, 0x9e, 0x80, 0x61, + 0xaf, 0x45, 0x79, 0xad, 0x82, 0xc9, 0x42, 0x55, 0x69, 0x21, 0xe6, 0x30, 0xf4, 0x11, 0x18, 0x6d, + 0x04, 0xad, 0x96, 0xe3, 0xbb, 0xec, 0xca, 0x2b, 0x2f, 0x8d, 0x51, 0x76, 0x6c, 0x99, 0x17, 0x61, + 0x09, 0x43, 0x8f, 0xc1, 0x90, 0x13, 0x6e, 0x73, 0xb1, 0x44, 0x79, 0xa9, 0x44, 0x5b, 0x5a, 0x0c, + 0xb7, 0x23, 0xcc, 0x4a, 0xe9, 0xab, 0xea, 0x6e, 0x10, 0xee, 0x7a, 0xfe, 0x76, 0xc5, 0x0b, 0xc5, + 0x96, 0x50, 0x77, 0xe1, 0x1d, 0x05, 0xc1, 0x1a, 0x16, 0x5a, 0x85, 0xe1, 0x76, 0x10, 0xc6, 0xd1, + 0xec, 0x08, 0x1b, 0xee, 0xc7, 0x73, 0x0e, 0x22, 0xfe, 0xb5, 0xb5, 0x20, 0x8c, 0x93, 0x0f, 0xa0, + 0xff, 0x22, 0xcc, 0xab, 0xa3, 0x1b, 0x30, 0x4a, 0xfc, 0xbd, 0xd5, 0x30, 0x68, 0xcd, 0x9e, 0xca, + 0xa7, 0xb4, 0xc2, 0x51, 0xf8, 0x32, 0x4b, 0xd8, 0x4e, 0x51, 0x8c, 0x25, 0x09, 0xf4, 0x0d, 0x50, + 0x24, 0xfe, 0xde, 0xec, 0x28, 0xa3, 0x34, 0x97, 0x43, 0xe9, 0xb6, 0x13, 0x26, 0x67, 0xfe, 0x8a, + 0xbf, 0x87, 0x69, 0x1d, 0xf4, 0x19, 0x28, 0xcb, 0x03, 0x23, 0x12, 0xf2, 0xb7, 0xcc, 0x05, 0x2b, + 0x8f, 0x19, 0x4c, 0xde, 0xe9, 0x78, 0x21, 0x69, 0x11, 0x3f, 0x8e, 0x92, 0x13, 0x52, 0x42, 0x23, + 0x9c, 0x50, 0x43, 0x9f, 0x91, 0x42, 0xdf, 0xb5, 0xa0, 0xe3, 0xc7, 0xd1, 0x6c, 0x99, 0x75, 0x2f, + 0x53, 0x1d, 0x77, 0x3b, 0xc1, 0x4b, 0x4b, 0x85, 0x79, 0x65, 0x6c, 0x90, 0x42, 0x9f, 0x85, 0x09, + 0xfe, 0x9f, 0x2b, 0xb5, 0xa2, 0xd9, 0x33, 0x8c, 0xf6, 0xc5, 0x7c, 0xda, 0x1c, 0x71, 0xe9, 0x8c, + 0x20, 0x3e, 0xa1, 0x97, 0x46, 0xd8, 0xa4, 0x86, 0x30, 0x4c, 0x34, 0xbd, 0x3d, 0xe2, 0x93, 0x28, + 0xaa, 0x85, 0xc1, 0x26, 0x99, 0x05, 0x36, 0x30, 0xe7, 0xb2, 0x95, 0x60, 0xc1, 0x26, 0x59, 0x9a, + 0xa1, 0x34, 0x6f, 0xe8, 0x75, 0xb0, 0x49, 0x02, 0xdd, 0x82, 0x49, 0xfa, 0x08, 0xf3, 0x12, 0xa2, + 0x63, 0xfd, 0x88, 0xb2, 0xa7, 0x12, 0x36, 0x2a, 0xe1, 0x14, 0x11, 0x74, 0x13, 0xc6, 0xa3, 0xd8, + 0x09, 0xe3, 0x4e, 0x9b, 0x13, 0x3d, 0xdb, 0x8f, 0x28, 0xd3, 0xa1, 0xd6, 0xb5, 0x2a, 0xd8, 0x20, + 0x80, 0x5e, 0x87, 0x72, 0xd3, 0xdb, 0x22, 0x8d, 0xfd, 0x46, 0x93, 0xcc, 0x8e, 0x33, 0x6a, 0x99, + 0x87, 0xca, 0x0d, 0x89, 0xc4, 0x5f, 0x85, 0xea, 0x2f, 0x4e, 0xaa, 0xa3, 0xdb, 0x70, 0x36, 0x26, + 0x61, 0xcb, 0xf3, 0x1d, 0x7a, 0x18, 0x88, 0xd7, 0x12, 0xd3, 0x4d, 0x4e, 0xb0, 0xdd, 0x76, 0x41, + 0xcc, 0xc6, 0xd9, 0x8d, 0x4c, 0x2c, 0x9c, 0x53, 0x1b, 0xdd, 0x83, 0xd9, 0x0c, 0x48, 0xd0, 0xf4, + 0x1a, 0xfb, 0xb3, 0xa7, 0x19, 0xe5, 0x4f, 0x0a, 0xca, 0xb3, 0x1b, 0x39, 0x78, 0x87, 0x3d, 0x60, + 0x38, 0x97, 0x3a, 0xba, 0x09, 0x53, 0xec, 0x04, 0xaa, 0x75, 0x9a, 0x4d, 0xd1, 0xe0, 0x24, 0x6b, + 0xf0, 0x23, 0xf2, 0x3e, 0xae, 0x9a, 0xe0, 0xc3, 0x83, 0x79, 0x48, 0xfe, 0xe1, 0x74, 0x6d, 0xb4, + 0xc9, 0xd4, 0x60, 0x9d, 0xd0, 0x8b, 0xf7, 0xe9, 0xb9, 0x41, 0xee, 0xc5, 0xb3, 0x53, 0x3d, 0x45, + 0x10, 0x3a, 0xaa, 0xd2, 0x95, 0xe9, 0x85, 0x38, 0x4d, 0x90, 0x1e, 0xa9, 0x51, 0xec, 0x7a, 0xfe, + 0xec, 0x34, 0x3b, 0xa9, 0xd5, 0x89, 0x54, 0xa7, 0x85, 0x98, 0xc3, 0x98, 0x0a, 0x8c, 0xfe, 0xb8, + 0x49, 0x6f, 0xae, 0x19, 0x86, 0x98, 0xa8, 0xc0, 0x24, 0x00, 0x27, 0x38, 0x94, 0x99, 0x8c, 0xe3, + 0xfd, 0x59, 0xc4, 0x50, 0xd5, 0xc1, 0xb2, 0xb1, 0xf1, 0x19, 0x4c, 0xcb, 0xed, 0x4d, 0x98, 0x54, + 0x07, 0x21, 0x1b, 0x13, 0x34, 0x0f, 0xc3, 0x8c, 0x7d, 0x12, 0x02, 0xb3, 0x32, 0xed, 0x02, 0x63, + 0xad, 0x30, 0x2f, 0x67, 0x5d, 0xf0, 0xde, 0x25, 0x4b, 0xfb, 0x31, 0xe1, 0xcf, 0xf4, 0xa2, 0xd6, + 0x05, 0x09, 0xc0, 0x09, 0x8e, 0xfd, 0x7f, 0x39, 0x1b, 0x9a, 0x9c, 0xb6, 0x03, 0xdc, 0x2f, 0xcf, + 0x40, 0x69, 0x27, 0x88, 0x62, 0x8a, 0xcd, 0xda, 0x18, 0x4e, 0x18, 0xcf, 0x6b, 0xa2, 0x1c, 0x2b, + 0x0c, 0xf4, 0x2a, 0x4c, 0x34, 0xf4, 0x06, 0xc4, 0xe5, 0xa8, 0x8e, 0x11, 0xa3, 0x75, 0x6c, 0xe2, + 0xa2, 0x57, 0xa0, 0xc4, 0xcc, 0x3a, 0x1a, 0x41, 0x53, 0x70, 0x6d, 0xf2, 0x86, 0x2f, 0xd5, 0x44, + 0xf9, 0xa1, 0xf6, 0x1b, 0x2b, 0x6c, 0x74, 0x09, 0x46, 0x68, 0x17, 0xaa, 0x35, 0x71, 0x2d, 0x29, + 0xd9, 0xcf, 0x35, 0x56, 0x8a, 0x05, 0xd4, 0xfe, 0xab, 0x05, 0x6d, 0x94, 0xe9, 0x13, 0x97, 0xa0, + 0x1a, 0x8c, 0xde, 0x75, 0xbc, 0xd8, 0xf3, 0xb7, 0x05, 0xff, 0xf1, 0x54, 0xcf, 0x3b, 0x8a, 0x55, + 0xba, 0xc3, 0x2b, 0xf0, 0x5b, 0x54, 0xfc, 0xc1, 0x92, 0x0c, 0xa5, 0x18, 0x76, 0x7c, 0x9f, 0x52, + 0x2c, 0x0c, 0x4a, 0x11, 0xf3, 0x0a, 0x9c, 0xa2, 0xf8, 0x83, 0x25, 0x19, 0xf4, 0x16, 0x80, 0xdc, + 0x61, 0xc4, 0x15, 0xe6, 0x14, 0xcf, 0xf4, 0x27, 0xba, 0xa1, 0xea, 0x2c, 0x4d, 0xd2, 0x3b, 0x3a, + 0xf9, 0x8f, 0x35, 0x7a, 0x76, 0xcc, 0xf8, 0xb4, 0xee, 0xce, 0xa0, 0x6f, 0xa6, 0x4b, 0xdc, 0x09, + 0x63, 0xe2, 0x2e, 0xc6, 0x62, 0x70, 0x3e, 0x3a, 0xd8, 0x23, 0x65, 0xc3, 0x6b, 0x11, 0x7d, 0x3b, + 0x08, 0x22, 0x38, 0xa1, 0x67, 0xff, 0x62, 0x11, 0x66, 0xf3, 0xba, 0x4b, 0x17, 0x1d, 0xb9, 0xe7, + 0xc5, 0xcb, 0x94, 0xbd, 0xb2, 0xcc, 0x45, 0xb7, 0x22, 0xca, 0xb1, 0xc2, 0xa0, 0xb3, 0x1f, 0x79, + 0xdb, 0xf2, 0x8d, 0x39, 0x9c, 0xcc, 0x7e, 0x9d, 0x95, 0x62, 0x01, 0xa5, 0x78, 0x21, 0x71, 0x22, + 0x61, 0xaf, 0xa3, 0xad, 0x12, 0xcc, 0x4a, 0xb1, 0x80, 0xea, 0x02, 0xac, 0xa1, 0x3e, 0x02, 0x2c, + 0x63, 0x88, 0x86, 0x8f, 0x77, 0x88, 0xd0, 0xe7, 0x00, 0xb6, 0x3c, 0xdf, 0x8b, 0x76, 0x18, 0xf5, + 0x91, 0x23, 0x53, 0x57, 0xcc, 0xd9, 0xaa, 0xa2, 0x82, 0x35, 0x8a, 0xe8, 0x25, 0x18, 0x53, 0x1b, + 0xb0, 0x5a, 0x61, 0xca, 0x4b, 0xcd, 0x18, 0x24, 0x39, 0x8d, 0x2a, 0x58, 0xc7, 0xb3, 0xdf, 0x4e, + 0xaf, 0x17, 0xb1, 0x03, 0xb4, 0xf1, 0xb5, 0x06, 0x1d, 0xdf, 0x42, 0xef, 0xf1, 0xb5, 0xbf, 0x5a, + 0x84, 0x29, 0xa3, 0xb1, 0x4e, 0x34, 0xc0, 0x99, 0x75, 0x95, 0x1e, 0xe0, 0x4e, 0x4c, 0xc4, 0xfe, + 0xb3, 0xfb, 0x6f, 0x15, 0xfd, 0x90, 0xa7, 0x3b, 0x80, 0xd7, 0x47, 0x9f, 0x83, 0x72, 0xd3, 0x89, + 0x98, 0x30, 0x8c, 0x88, 0x7d, 0x37, 0x08, 0xb1, 0xe4, 0x61, 0xe2, 0x44, 0xb1, 0x76, 0x6b, 0x72, + 0xda, 0x09, 0x49, 0x7a, 0xd3, 0x50, 0xfe, 0x44, 0x1a, 0x84, 0xa9, 0x4e, 0x50, 0x26, 0x66, 0x1f, + 0x73, 0x18, 0x7a, 0x05, 0xc6, 0x43, 0xc2, 0x56, 0xc5, 0x32, 0xe5, 0xe6, 0xd8, 0x32, 0x1b, 0x4e, + 0xd8, 0x3e, 0xac, 0xc1, 0xb0, 0x81, 0x99, 0xbc, 0x0d, 0x46, 0x7a, 0xbc, 0x0d, 0x9e, 0x82, 0x51, + 0xf6, 0x43, 0xad, 0x00, 0x35, 0x1b, 0x55, 0x5e, 0x8c, 0x25, 0x3c, 0xbd, 0x60, 0x4a, 0x83, 0x2d, + 0x18, 0xfa, 0xfa, 0x10, 0x8b, 0x9a, 0x29, 0x8e, 0x4b, 0xfc, 0x94, 0x13, 0x4b, 0x1e, 0x4b, 0x98, + 0xfd, 0x51, 0x98, 0xac, 0x38, 0xa4, 0x15, 0xf8, 0x2b, 0xbe, 0xdb, 0x0e, 0x3c, 0x3f, 0x46, 0xb3, + 0x30, 0xc4, 0x2e, 0x11, 0x7e, 0x04, 0x0c, 0xd1, 0x86, 0xf0, 0x10, 0x7d, 0x10, 0xd8, 0xdb, 0x70, + 0xa6, 0x12, 0xdc, 0xf5, 0xef, 0x3a, 0xa1, 0xbb, 0x58, 0xab, 0x6a, 0xef, 0xeb, 0x75, 0xf9, 0xbe, + 0xe3, 0x76, 0x58, 0x99, 0x47, 0xaf, 0x56, 0x93, 0xb3, 0xb5, 0xab, 0x5e, 0x93, 0xe4, 0x48, 0x41, + 0xfe, 0x46, 0xc1, 0x68, 0x29, 0xc1, 0x57, 0x8a, 0x2a, 0x2b, 0x57, 0x51, 0xf5, 0x06, 0x94, 0xb6, + 0x3c, 0xd2, 0x74, 0x31, 0xd9, 0x12, 0x2b, 0xf1, 0xc9, 0x7c, 0xd3, 0x92, 0x55, 0x8a, 0x29, 0xa5, + 0x5e, 0xfc, 0x75, 0xb8, 0x2a, 0x2a, 0x63, 0x45, 0x06, 0xed, 0xc2, 0xb4, 0x7c, 0x30, 0x48, 0xa8, + 0x58, 0x97, 0x4f, 0xf5, 0x7a, 0x85, 0x98, 0xc4, 0x4f, 0xdf, 0x3f, 0x98, 0x9f, 0xc6, 0x29, 0x32, + 0xb8, 0x8b, 0x30, 0x7d, 0x0e, 0xb6, 0xe8, 0x09, 0x3c, 0xc4, 0x86, 0x9f, 0x3d, 0x07, 0xd9, 0xcb, + 0x96, 0x95, 0xda, 0x3f, 0x62, 0xc1, 0x23, 0x5d, 0x23, 0x23, 0x5e, 0xf8, 0xc7, 0x3c, 0x0b, 0xe9, + 0x17, 0x77, 0xa1, 0xff, 0x8b, 0xdb, 0xfe, 0x19, 0x0b, 0x4e, 0xaf, 0xb4, 0xda, 0xf1, 0x7e, 0xc5, + 0x33, 0xb5, 0x4a, 0x2f, 0xc3, 0x48, 0x8b, 0xb8, 0x5e, 0xa7, 0x25, 0x66, 0x6e, 0x5e, 0x9e, 0x52, + 0x6b, 0xac, 0xf4, 0xf0, 0x60, 0x7e, 0xa2, 0x1e, 0x07, 0xa1, 0xb3, 0x4d, 0x78, 0x01, 0x16, 0xe8, + 0xec, 0xac, 0xf7, 0xde, 0x25, 0x37, 0xbc, 0x96, 0x27, 0x4d, 0x85, 0x7a, 0xca, 0xec, 0x16, 0xe4, + 0x80, 0x2e, 0xbc, 0xd1, 0x71, 0xfc, 0xd8, 0x8b, 0xf7, 0x85, 0x42, 0x48, 0x12, 0xc1, 0x09, 0x3d, + 0xfb, 0x2b, 0x16, 0x4c, 0xc9, 0x75, 0xbf, 0xe8, 0xba, 0x21, 0x89, 0x22, 0x34, 0x07, 0x05, 0xaf, + 0x2d, 0x7a, 0x09, 0xa2, 0x97, 0x85, 0x6a, 0x0d, 0x17, 0xbc, 0xb6, 0x64, 0xcb, 0xd8, 0x41, 0x58, + 0x34, 0x75, 0x63, 0xd7, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x32, 0x94, 0xfc, 0xc0, 0xe5, 0xe6, 0x5a, + 0xfc, 0x4a, 0x63, 0x0b, 0x6c, 0x5d, 0x94, 0x61, 0x05, 0x45, 0x35, 0x28, 0x73, 0x4b, 0xa6, 0x64, + 0xd1, 0x0e, 0x64, 0x0f, 0xc5, 0xbe, 0x6c, 0x43, 0xd6, 0xc4, 0x09, 0x11, 0xfb, 0xfb, 0x2d, 0x18, + 0x97, 0x5f, 0x36, 0x20, 0xcf, 0x49, 0xb7, 0x56, 0xc2, 0x6f, 0x26, 0x5b, 0x8b, 0xf2, 0x8c, 0x0c, + 0x62, 0xb0, 0x8a, 0xc5, 0xa3, 0xb0, 0x8a, 0xf6, 0x0f, 0x17, 0x60, 0x52, 0x76, 0xa7, 0xde, 0xd9, + 0x8c, 0x48, 0x8c, 0x36, 0xa0, 0xec, 0xf0, 0x21, 0x27, 0x72, 0xc5, 0x3e, 0x91, 0x2d, 0x14, 0x30, + 0xe6, 0x27, 0xb9, 0xbd, 0x17, 0x65, 0x6d, 0x9c, 0x10, 0x42, 0x4d, 0x98, 0xf1, 0x83, 0x98, 0x9d, + 0xe4, 0x0a, 0xde, 0x4b, 0xf5, 0x92, 0xa6, 0x7e, 0x4e, 0x50, 0x9f, 0x59, 0x4f, 0x53, 0xc1, 0xdd, + 0x84, 0xd1, 0x8a, 0x14, 0xb4, 0x14, 0xf3, 0x5f, 0xf6, 0xfa, 0x2c, 0x64, 0xcb, 0x59, 0xec, 0x5f, + 0xb1, 0xa0, 0x2c, 0xd1, 0x4e, 0x42, 0xcb, 0xb6, 0x06, 0xa3, 0x11, 0x9b, 0x04, 0x39, 0x34, 0x76, + 0xaf, 0x8e, 0xf3, 0xf9, 0x4a, 0x2e, 0x28, 0xfe, 0x3f, 0xc2, 0x92, 0x06, 0x93, 0xb3, 0xab, 0xee, + 0xbf, 0x4f, 0xe4, 0xec, 0xaa, 0x3f, 0x39, 0x37, 0xcc, 0x1f, 0xb0, 0x3e, 0x6b, 0x82, 0x2b, 0xca, + 0x47, 0xb5, 0x43, 0xb2, 0xe5, 0xdd, 0x4b, 0xf3, 0x51, 0x35, 0x56, 0x8a, 0x05, 0x14, 0xbd, 0x05, + 0xe3, 0x0d, 0x29, 0x60, 0x4d, 0xb6, 0xeb, 0xa5, 0x9e, 0xc2, 0x7e, 0xa5, 0x17, 0xe2, 0x82, 0x8d, + 0x65, 0xad, 0x3e, 0x36, 0xa8, 0x99, 0x6a, 0xfe, 0x62, 0x3f, 0x35, 0x7f, 0x42, 0x37, 0x5f, 0xe9, + 0xfd, 0xa3, 0x16, 0x8c, 0x70, 0xc1, 0xda, 0x60, 0x72, 0x4d, 0x4d, 0x4d, 0x96, 0x8c, 0xdd, 0x6d, + 0x5a, 0x28, 0xd4, 0x5e, 0x68, 0x0d, 0xca, 0xec, 0x07, 0x13, 0x0c, 0x16, 0xf3, 0xad, 0xe2, 0x79, + 0xab, 0x7a, 0x07, 0x6f, 0xcb, 0x6a, 0x38, 0xa1, 0x60, 0xff, 0x60, 0x91, 0x1e, 0x55, 0x09, 0xaa, + 0x71, 0x83, 0x5b, 0x0f, 0xef, 0x06, 0x2f, 0x3c, 0xac, 0x1b, 0x7c, 0x1b, 0xa6, 0x1a, 0x9a, 0x52, + 0x2d, 0x99, 0xc9, 0xcb, 0x3d, 0x17, 0x89, 0xa6, 0x7f, 0xe3, 0x22, 0x93, 0x65, 0x93, 0x08, 0x4e, + 0x53, 0x45, 0xdf, 0x0c, 0xe3, 0x7c, 0x9e, 0x45, 0x2b, 0xdc, 0x52, 0xe2, 0x23, 0xf9, 0xeb, 0x45, + 0x6f, 0x82, 0x8b, 0xd8, 0xb4, 0xea, 0xd8, 0x20, 0x66, 0xff, 0xb1, 0x05, 0x68, 0xa5, 0xbd, 0x43, + 0x5a, 0x24, 0x74, 0x9a, 0x89, 0x6c, 0xfc, 0x2f, 0x5b, 0x30, 0x4b, 0xba, 0x8a, 0x97, 0x83, 0x56, + 0x4b, 0xbc, 0x40, 0x72, 0x1e, 0xc9, 0x2b, 0x39, 0x75, 0x94, 0xdb, 0xc0, 0x6c, 0x1e, 0x06, 0xce, + 0x6d, 0x0f, 0xad, 0xc1, 0x29, 0x7e, 0xe5, 0x29, 0x80, 0x66, 0x1b, 0xfd, 0xa8, 0x20, 0x7c, 0x6a, + 0xa3, 0x1b, 0x05, 0x67, 0xd5, 0xb3, 0xbf, 0x73, 0x1c, 0x72, 0x7b, 0xf1, 0x81, 0x52, 0xe0, 0x03, + 0xa5, 0xc0, 0x07, 0x4a, 0x81, 0x0f, 0x94, 0x02, 0x1f, 0x28, 0x05, 0xbe, 0xee, 0x95, 0x02, 0x7f, + 0x68, 0xc1, 0xa9, 0xee, 0x6b, 0xe0, 0x24, 0x18, 0xf3, 0x0e, 0x9c, 0xea, 0xbe, 0xeb, 0x7a, 0xda, + 0xc1, 0x75, 0xf7, 0x33, 0xb9, 0xf7, 0x32, 0xbe, 0x01, 0x67, 0xd1, 0xb7, 0x7f, 0xb1, 0x04, 0xc3, + 0x2b, 0x7b, 0xc4, 0x8f, 0x4f, 0xe0, 0x13, 0x1b, 0x30, 0xe9, 0xf9, 0x7b, 0x41, 0x73, 0x8f, 0xb8, + 0x1c, 0x7e, 0x94, 0xf7, 0xee, 0x59, 0x41, 0x7a, 0xb2, 0x6a, 0x90, 0xc0, 0x29, 0x92, 0x0f, 0x43, + 0xe6, 0x7c, 0x15, 0x46, 0xf8, 0xed, 0x20, 0x04, 0xce, 0x99, 0x97, 0x01, 0x1b, 0x44, 0x71, 0xe7, + 0x25, 0xf2, 0x70, 0x7e, 0xfb, 0x88, 0xea, 0xe8, 0x6d, 0x98, 0xdc, 0xf2, 0xc2, 0x28, 0xde, 0xf0, + 0x5a, 0x24, 0x8a, 0x9d, 0x56, 0xfb, 0x01, 0x64, 0xcc, 0x6a, 0x1c, 0x56, 0x0d, 0x4a, 0x38, 0x45, + 0x19, 0x6d, 0xc3, 0x44, 0xd3, 0xd1, 0x9b, 0x1a, 0x3d, 0x72, 0x53, 0xea, 0xda, 0xb9, 0xa1, 0x13, + 0xc2, 0x26, 0x5d, 0xba, 0x4f, 0x1b, 0x4c, 0x4c, 0x5a, 0x62, 0xc2, 0x03, 0xb5, 0x4f, 0xb9, 0x7c, + 0x94, 0xc3, 0x28, 0x07, 0xc5, 0x2c, 0x63, 0xcb, 0x26, 0x07, 0xa5, 0xd9, 0xbf, 0x7e, 0x1e, 0xca, + 0x84, 0x0e, 0x21, 0x25, 0x2c, 0x6e, 0xae, 0x2b, 0x83, 0xf5, 0x75, 0xcd, 0x6b, 0x84, 0x81, 0x29, + 0xdd, 0x5f, 0x91, 0x94, 0x70, 0x42, 0x14, 0x2d, 0xc3, 0x48, 0x44, 0x42, 0x8f, 0x44, 0xe2, 0x0e, + 0xeb, 0x31, 0x8d, 0x0c, 0x8d, 0x3b, 0x95, 0xf0, 0xdf, 0x58, 0x54, 0xa5, 0xcb, 0xcb, 0x61, 0x82, + 0x4f, 0x76, 0xcb, 0x68, 0xcb, 0x6b, 0x91, 0x95, 0x62, 0x01, 0x45, 0xaf, 0xc3, 0x68, 0x48, 0x9a, + 0x4c, 0x7d, 0x34, 0x31, 0xf8, 0x22, 0xe7, 0xda, 0x28, 0x5e, 0x0f, 0x4b, 0x02, 0xe8, 0x3a, 0xa0, + 0x90, 0x50, 0x0e, 0xcc, 0xf3, 0xb7, 0x95, 0xbd, 0xa8, 0x38, 0xc1, 0xd5, 0x8e, 0xc7, 0x09, 0x86, + 0xf4, 0xef, 0xc1, 0x19, 0xd5, 0xd0, 0x55, 0x98, 0x51, 0xa5, 0x55, 0x3f, 0x8a, 0x1d, 0x7a, 0x72, + 0x4e, 0x31, 0x5a, 0x4a, 0x00, 0x82, 0xd3, 0x08, 0xb8, 0xbb, 0x8e, 0xfd, 0x53, 0x16, 0xf0, 0x71, + 0x3e, 0x81, 0x67, 0xff, 0x6b, 0xe6, 0xb3, 0xff, 0x5c, 0xee, 0xcc, 0xe5, 0x3c, 0xf9, 0xef, 0x5b, + 0x30, 0xa6, 0xcd, 0x6c, 0xb2, 0x66, 0xad, 0x1e, 0x6b, 0xb6, 0x03, 0xd3, 0x74, 0xa5, 0xdf, 0xdc, + 0x8c, 0x48, 0xb8, 0x47, 0x5c, 0xb6, 0x30, 0x0b, 0x0f, 0xb6, 0x30, 0x95, 0x21, 0xdb, 0x8d, 0x14, + 0x41, 0xdc, 0xd5, 0x04, 0x7a, 0x59, 0xea, 0x52, 0x8a, 0x86, 0x1d, 0x38, 0xd7, 0x93, 0x1c, 0x1e, + 0xcc, 0x4f, 0x6b, 0x1f, 0xa2, 0xeb, 0x4e, 0xec, 0xcf, 0xcb, 0x6f, 0x54, 0x06, 0x83, 0x0d, 0xb5, + 0x58, 0x52, 0x06, 0x83, 0x6a, 0x39, 0xe0, 0x04, 0x87, 0xee, 0xd1, 0x9d, 0x20, 0x8a, 0xd3, 0x06, + 0x83, 0xd7, 0x82, 0x28, 0xc6, 0x0c, 0x62, 0xbf, 0x00, 0xb0, 0x72, 0x8f, 0x34, 0xf8, 0x52, 0xd7, + 0x9f, 0x33, 0x56, 0xfe, 0x73, 0xc6, 0xfe, 0x0f, 0x16, 0x4c, 0xae, 0x2e, 0x1b, 0x12, 0xe1, 0x05, + 0x00, 0xfe, 0x06, 0xbb, 0x73, 0x67, 0x5d, 0x6a, 0xdb, 0xb9, 0xc2, 0x54, 0x95, 0x62, 0x0d, 0x03, + 0x9d, 0x83, 0x62, 0xb3, 0xe3, 0x0b, 0xe9, 0xe4, 0x28, 0xbd, 0xb0, 0x6f, 0x74, 0x7c, 0x4c, 0xcb, + 0x34, 0x27, 0x84, 0xe2, 0xc0, 0x4e, 0x08, 0x7d, 0x83, 0x01, 0xa0, 0x79, 0x18, 0xbe, 0x7b, 0xd7, + 0x73, 0xb9, 0xcb, 0xa5, 0xb0, 0x04, 0xb8, 0x73, 0xa7, 0x5a, 0x89, 0x30, 0x2f, 0xb7, 0xbf, 0x54, + 0x84, 0xb9, 0xd5, 0x26, 0xb9, 0xf7, 0x1e, 0xdd, 0x4e, 0x07, 0x75, 0xa1, 0x38, 0x9a, 0x68, 0xe8, + 0xa8, 0x6e, 0x32, 0xfd, 0xc7, 0x63, 0x0b, 0x46, 0xb9, 0xbd, 0x9c, 0x74, 0x42, 0x7d, 0x35, 0xab, + 0xf5, 0xfc, 0x01, 0x59, 0xe0, 0x76, 0x77, 0xc2, 0x87, 0x4e, 0xdd, 0xb4, 0xa2, 0x14, 0x4b, 0xe2, + 0x73, 0x9f, 0x80, 0x71, 0x1d, 0xf3, 0x48, 0x0e, 0x6b, 0x7f, 0xa9, 0x08, 0xd3, 0xb4, 0x07, 0x0f, + 0x75, 0x22, 0x6e, 0x75, 0x4f, 0xc4, 0x71, 0x3b, 0x2d, 0xf5, 0x9f, 0x8d, 0xb7, 0xd2, 0xb3, 0xf1, + 0x5c, 0xde, 0x6c, 0x9c, 0xf4, 0x1c, 0x7c, 0x87, 0x05, 0xa7, 0x56, 0x9b, 0x41, 0x63, 0x37, 0xe5, + 0x58, 0xf4, 0x12, 0x8c, 0xd1, 0x73, 0x3c, 0x32, 0x7c, 0xde, 0x8d, 0x28, 0x08, 0x02, 0x84, 0x75, + 0x3c, 0xad, 0xda, 0xad, 0x5b, 0xd5, 0x4a, 0x56, 0xf0, 0x04, 0x01, 0xc2, 0x3a, 0x9e, 0xfd, 0x9b, + 0x16, 0x9c, 0xbf, 0xba, 0xbc, 0x92, 0x2c, 0xc5, 0xae, 0xf8, 0x0d, 0x97, 0x60, 0xa4, 0xed, 0x6a, + 0x5d, 0x49, 0x04, 0xbe, 0x15, 0xd6, 0x0b, 0x01, 0x7d, 0xbf, 0xc4, 0x26, 0xf9, 0x49, 0x0b, 0x4e, + 0x5d, 0xf5, 0x62, 0x7a, 0x2d, 0xa7, 0x23, 0x09, 0xd0, 0x7b, 0x39, 0xf2, 0xe2, 0x20, 0xdc, 0x4f, + 0x47, 0x12, 0xc0, 0x0a, 0x82, 0x35, 0x2c, 0xde, 0xf2, 0x9e, 0xc7, 0x2c, 0xb5, 0x0b, 0xa6, 0x1e, + 0x0b, 0x8b, 0x72, 0xac, 0x30, 0xe8, 0x87, 0xb9, 0x5e, 0xc8, 0xa4, 0x86, 0xfb, 0xe2, 0x84, 0x55, + 0x1f, 0x56, 0x91, 0x00, 0x9c, 0xe0, 0xd0, 0x07, 0xd4, 0xfc, 0xd5, 0x66, 0x27, 0x8a, 0x49, 0xb8, + 0x15, 0xe5, 0x9c, 0x8e, 0x2f, 0x40, 0x99, 0x48, 0x19, 0xbd, 0xe8, 0xb5, 0x62, 0x35, 0x95, 0xf0, + 0x9e, 0x07, 0x34, 0x50, 0x78, 0x03, 0xb8, 0x29, 0x1e, 0xcd, 0xcf, 0x6c, 0x15, 0x10, 0xd1, 0xdb, + 0xd2, 0x23, 0x3c, 0x30, 0x57, 0xf1, 0x95, 0x2e, 0x28, 0xce, 0xa8, 0x61, 0xff, 0x88, 0x05, 0x67, + 0xd4, 0x07, 0xbf, 0xef, 0x3e, 0xd3, 0xfe, 0xb9, 0x02, 0x4c, 0x5c, 0xdb, 0xd8, 0xa8, 0x5d, 0x25, + 0xb1, 0xb8, 0xb6, 0xfb, 0xab, 0xd1, 0xb1, 0xa6, 0x0d, 0xec, 0xf5, 0x0a, 0xec, 0xc4, 0x5e, 0x73, + 0x81, 0x07, 0x0a, 0x5a, 0xa8, 0xfa, 0xf1, 0xcd, 0xb0, 0x1e, 0x87, 0x9e, 0xbf, 0x9d, 0xa9, 0x3f, + 0x94, 0xcc, 0x45, 0x31, 0x8f, 0xb9, 0x40, 0x2f, 0xc0, 0x08, 0x8b, 0x54, 0x24, 0x27, 0xe1, 0x51, + 0xf5, 0x88, 0x62, 0xa5, 0x87, 0x07, 0xf3, 0xe5, 0x5b, 0xb8, 0xca, 0xff, 0x60, 0x81, 0x8a, 0x6e, + 0xc1, 0xd8, 0x4e, 0x1c, 0xb7, 0xaf, 0x11, 0xc7, 0xa5, 0xaf, 0x65, 0x7e, 0x1c, 0x5e, 0xc8, 0x3a, + 0x0e, 0xe9, 0x20, 0x70, 0xb4, 0xe4, 0x04, 0x49, 0xca, 0x22, 0xac, 0xd3, 0xb1, 0xeb, 0x00, 0x09, + 0xec, 0x98, 0x74, 0x27, 0xf6, 0xef, 0x5b, 0x30, 0xca, 0x83, 0x46, 0x84, 0xe8, 0x93, 0x30, 0x44, + 0xee, 0x91, 0x86, 0x60, 0x95, 0x33, 0x3b, 0x9c, 0x70, 0x5a, 0x5c, 0x06, 0x4c, 0xff, 0x63, 0x56, + 0x0b, 0x5d, 0x83, 0x51, 0xda, 0xdb, 0xab, 0x2a, 0x82, 0xc6, 0xe3, 0x79, 0x5f, 0xac, 0xa6, 0x9d, + 0x33, 0x67, 0xa2, 0x08, 0xcb, 0xea, 0x4c, 0xfb, 0xdc, 0x68, 0xd7, 0xe9, 0x89, 0x1d, 0xf7, 0x62, + 0x2c, 0x36, 0x96, 0x6b, 0x1c, 0x49, 0x50, 0xe3, 0xda, 0x67, 0x59, 0x88, 0x13, 0x22, 0xf6, 0x06, + 0x94, 0xe9, 0xa4, 0x2e, 0x36, 0x3d, 0xa7, 0xb7, 0x42, 0xfd, 0x69, 0x28, 0x4b, 0x75, 0x79, 0x24, + 0x9c, 0xc5, 0x19, 0x55, 0xa9, 0x4d, 0x8f, 0x70, 0x02, 0xb7, 0xb7, 0xe0, 0x34, 0x33, 0x7e, 0x74, + 0xe2, 0x1d, 0x63, 0x8f, 0xf5, 0x5f, 0xcc, 0xcf, 0x88, 0x97, 0x27, 0x9f, 0x99, 0x59, 0xcd, 0x1f, + 0x73, 0x5c, 0x52, 0x4c, 0x5e, 0xa1, 0xf6, 0x57, 0x87, 0xe0, 0xd1, 0x6a, 0x3d, 0x3f, 0x9e, 0xc8, + 0x2b, 0x30, 0xce, 0xf9, 0x52, 0xba, 0xb4, 0x9d, 0xa6, 0x68, 0x57, 0x09, 0x7f, 0x37, 0x34, 0x18, + 0x36, 0x30, 0xd1, 0x79, 0x28, 0x7a, 0xef, 0xf8, 0x69, 0xd7, 0xa6, 0xea, 0x1b, 0xeb, 0x98, 0x96, + 0x53, 0x30, 0x65, 0x71, 0xf9, 0xdd, 0xa1, 0xc0, 0x8a, 0xcd, 0x7d, 0x0d, 0x26, 0xbd, 0xa8, 0x11, + 0x79, 0x55, 0x9f, 0x9e, 0x33, 0xda, 0x49, 0xa5, 0xa4, 0x22, 0xb4, 0xd3, 0x0a, 0x8a, 0x53, 0xd8, + 0xda, 0x45, 0x36, 0x3c, 0x30, 0x9b, 0xdc, 0xd7, 0x7b, 0x9a, 0xbe, 0x00, 0xda, 0xec, 0xeb, 0x22, + 0x26, 0xc5, 0x17, 0x2f, 0x00, 0xfe, 0xc1, 0x11, 0x96, 0x30, 0xfa, 0xe4, 0x6c, 0xec, 0x38, 0xed, + 0xc5, 0x4e, 0xbc, 0x53, 0xf1, 0xa2, 0x46, 0xb0, 0x47, 0xc2, 0x7d, 0x26, 0x2d, 0x28, 0x25, 0x4f, + 0x4e, 0x05, 0x58, 0xbe, 0xb6, 0x58, 0xa3, 0x98, 0xb8, 0xbb, 0x0e, 0x5a, 0x84, 0x29, 0x59, 0x58, + 0x27, 0x11, 0xbb, 0xc2, 0xc6, 0x18, 0x19, 0xe5, 0x6c, 0x24, 0x8a, 0x15, 0x91, 0x34, 0xbe, 0xc9, + 0x49, 0xc3, 0x71, 0x70, 0xd2, 0x2f, 0xc3, 0x84, 0xe7, 0x7b, 0xb1, 0xe7, 0xc4, 0x01, 0x57, 0x41, + 0x71, 0xc1, 0x00, 0x93, 0xad, 0x57, 0x75, 0x00, 0x36, 0xf1, 0xec, 0xff, 0x36, 0x04, 0x33, 0x6c, + 0xda, 0x3e, 0x58, 0x61, 0x5f, 0x4f, 0x2b, 0xec, 0x56, 0xf7, 0x0a, 0x3b, 0x8e, 0x27, 0xc2, 0x03, + 0x2f, 0xb3, 0xb7, 0xa1, 0xac, 0xfc, 0xab, 0xa4, 0x83, 0xa5, 0x95, 0xe3, 0x60, 0xd9, 0x9f, 0xfb, + 0x90, 0x26, 0x6a, 0xc5, 0x4c, 0x13, 0xb5, 0xbf, 0x65, 0x41, 0xa2, 0x53, 0x41, 0xd7, 0xa0, 0xdc, + 0x0e, 0x98, 0xe5, 0x65, 0x28, 0xcd, 0x99, 0x1f, 0xcd, 0xbc, 0xa8, 0xf8, 0xa5, 0xc8, 0x3f, 0xbe, + 0x26, 0x6b, 0xe0, 0xa4, 0x32, 0x5a, 0x82, 0xd1, 0x76, 0x48, 0xea, 0x31, 0x0b, 0x2b, 0xd2, 0x97, + 0x0e, 0x5f, 0x23, 0x1c, 0x1f, 0xcb, 0x8a, 0xf6, 0xcf, 0x5b, 0x00, 0xdc, 0x0a, 0xcc, 0xf1, 0xb7, + 0xc9, 0x09, 0x88, 0xbb, 0x2b, 0x30, 0x14, 0xb5, 0x49, 0xa3, 0x97, 0x4d, 0x6c, 0xd2, 0x9f, 0x7a, + 0x9b, 0x34, 0x92, 0x01, 0xa7, 0xff, 0x30, 0xab, 0x6d, 0x7f, 0x17, 0xc0, 0x64, 0x82, 0x56, 0x8d, + 0x49, 0x0b, 0x3d, 0x6b, 0x84, 0x19, 0x38, 0x97, 0x0a, 0x33, 0x50, 0x66, 0xd8, 0x9a, 0x64, 0xf5, + 0x6d, 0x28, 0xb6, 0x9c, 0x7b, 0x42, 0x74, 0xf6, 0x74, 0xef, 0x6e, 0x50, 0xfa, 0x0b, 0x6b, 0xce, + 0x3d, 0xfe, 0x48, 0x7c, 0x5a, 0x2e, 0x90, 0x35, 0xe7, 0xde, 0x21, 0xb7, 0x7c, 0x65, 0x87, 0xd4, + 0x0d, 0x2f, 0x8a, 0xbf, 0xf0, 0x5f, 0x93, 0xff, 0x6c, 0xd9, 0xd1, 0x46, 0x58, 0x5b, 0x9e, 0x2f, + 0x6c, 0xa2, 0x06, 0x6a, 0xcb, 0xf3, 0xd3, 0x6d, 0x79, 0xfe, 0x00, 0x6d, 0x79, 0x3e, 0x7a, 0x17, + 0x46, 0x85, 0xfd, 0xa1, 0x08, 0xeb, 0x73, 0x65, 0x80, 0xf6, 0x84, 0xf9, 0x22, 0x6f, 0xf3, 0x8a, + 0x7c, 0x04, 0x8b, 0xd2, 0xbe, 0xed, 0xca, 0x06, 0xd1, 0x5f, 0xb7, 0x60, 0x52, 0xfc, 0xc6, 0xe4, + 0x9d, 0x0e, 0x89, 0x62, 0xc1, 0x7b, 0x7e, 0x7c, 0xf0, 0x3e, 0x88, 0x8a, 0xbc, 0x2b, 0x1f, 0x97, + 0xc7, 0xac, 0x09, 0xec, 0xdb, 0xa3, 0x54, 0x2f, 0xd0, 0x3f, 0xb0, 0xe0, 0x74, 0xcb, 0xb9, 0xc7, + 0x5b, 0xe4, 0x65, 0xd8, 0x89, 0xbd, 0x40, 0xa8, 0xfe, 0x3f, 0x39, 0xd8, 0xf4, 0x77, 0x55, 0xe7, + 0x9d, 0x94, 0xfa, 0xc9, 0xd3, 0x59, 0x28, 0x7d, 0xbb, 0x9a, 0xd9, 0xaf, 0xb9, 0x2d, 0x28, 0xc9, + 0xf5, 0x96, 0x21, 0x6a, 0xa8, 0xe8, 0x8c, 0xf5, 0x91, 0xcd, 0x3f, 0x75, 0x5f, 0x7f, 0xda, 0x8e, + 0x58, 0x6b, 0x0f, 0xb5, 0x9d, 0xb7, 0x61, 0x5c, 0x5f, 0x63, 0x0f, 0xb5, 0xad, 0x77, 0xe0, 0x54, + 0xc6, 0x5a, 0x7a, 0xa8, 0x4d, 0xde, 0x85, 0x73, 0xb9, 0xeb, 0xe3, 0x61, 0x36, 0x6c, 0xff, 0x9c, + 0xa5, 0x9f, 0x83, 0x27, 0xa0, 0x73, 0x58, 0x36, 0x75, 0x0e, 0x17, 0x7a, 0xef, 0x9c, 0x1c, 0xc5, + 0xc3, 0x5b, 0x7a, 0xa7, 0xe9, 0xa9, 0x8e, 0x5e, 0x87, 0x91, 0x26, 0x2d, 0x91, 0x86, 0xaf, 0x76, + 0xff, 0x1d, 0x99, 0xf0, 0x52, 0xac, 0x3c, 0xc2, 0x82, 0x82, 0xfd, 0x4b, 0x16, 0x0c, 0x9d, 0xc0, + 0x48, 0x60, 0x73, 0x24, 0x9e, 0xcd, 0x25, 0x2d, 0x22, 0x0e, 0x2f, 0x60, 0xe7, 0xee, 0xca, 0xbd, + 0x98, 0xf8, 0x11, 0x7b, 0x2a, 0x66, 0x0e, 0xcc, 0xb7, 0xc0, 0xa9, 0x1b, 0x81, 0xe3, 0x2e, 0x39, + 0x4d, 0xc7, 0x6f, 0x90, 0xb0, 0xea, 0x6f, 0x1f, 0xc9, 0x02, 0xbb, 0xd0, 0xcf, 0x02, 0xdb, 0xde, + 0x01, 0xa4, 0x37, 0x20, 0x5c, 0x59, 0x30, 0x8c, 0x7a, 0xbc, 0x29, 0x31, 0xfc, 0x4f, 0x66, 0xb3, + 0x66, 0x5d, 0x3d, 0xd3, 0x9c, 0x34, 0x78, 0x01, 0x96, 0x84, 0xec, 0x57, 0x20, 0xd3, 0x1f, 0xbe, + 0xbf, 0xd8, 0xc0, 0xfe, 0x0c, 0xcc, 0xb0, 0x9a, 0x47, 0x7c, 0xd2, 0xda, 0x29, 0xa9, 0x64, 0x46, + 0xf0, 0x3b, 0xfb, 0x8b, 0x16, 0x4c, 0xad, 0xa7, 0x62, 0x82, 0x5d, 0x62, 0x0a, 0xd0, 0x0c, 0x61, + 0x78, 0x9d, 0x95, 0x62, 0x01, 0x3d, 0x76, 0x19, 0xd4, 0x9f, 0x5b, 0x90, 0x84, 0xa8, 0x38, 0x01, + 0xc6, 0x6b, 0xd9, 0x60, 0xbc, 0x32, 0x65, 0x23, 0xaa, 0x3b, 0x79, 0x7c, 0x17, 0xba, 0xae, 0xe2, + 0x31, 0xf5, 0x10, 0x8b, 0x24, 0x64, 0x78, 0xf4, 0x9e, 0x49, 0x33, 0x68, 0x93, 0x8c, 0xd0, 0x64, + 0xff, 0xe7, 0x02, 0x20, 0x85, 0x3b, 0x70, 0xbc, 0xa8, 0xee, 0x1a, 0xc7, 0x13, 0x2f, 0x6a, 0x0f, + 0x10, 0x53, 0xe1, 0x87, 0x8e, 0x1f, 0x71, 0xb2, 0x9e, 0x90, 0xba, 0x1d, 0xcd, 0x3e, 0x60, 0x4e, + 0x34, 0x89, 0x6e, 0x74, 0x51, 0xc3, 0x19, 0x2d, 0x68, 0xa6, 0x19, 0xc3, 0x83, 0x9a, 0x66, 0x8c, + 0xf4, 0x71, 0x57, 0xfb, 0x59, 0x0b, 0x26, 0xd4, 0x30, 0xbd, 0x4f, 0xec, 0xcf, 0x55, 0x7f, 0x72, + 0x8e, 0xbe, 0x9a, 0xd6, 0x65, 0x76, 0x25, 0x7c, 0x23, 0x73, 0x3b, 0x74, 0x9a, 0xde, 0xbb, 0x44, + 0x45, 0xeb, 0x9b, 0x17, 0x6e, 0x84, 0xa2, 0xf4, 0xf0, 0x60, 0x7e, 0x42, 0xfd, 0xe3, 0xd1, 0x81, + 0x93, 0x2a, 0xf6, 0x8f, 0xd3, 0xcd, 0x6e, 0x2e, 0x45, 0xf4, 0x12, 0x0c, 0xb7, 0x77, 0x9c, 0x88, + 0xa4, 0x9c, 0x6e, 0x86, 0x6b, 0xb4, 0xf0, 0xf0, 0x60, 0x7e, 0x52, 0x55, 0x60, 0x25, 0x98, 0x63, + 0x0f, 0x1e, 0x85, 0xab, 0x7b, 0x71, 0xf6, 0x8d, 0xc2, 0xf5, 0xc7, 0x16, 0x0c, 0xad, 0x07, 0xee, + 0x49, 0x1c, 0x01, 0xaf, 0x19, 0x47, 0xc0, 0x63, 0x79, 0x81, 0xdb, 0x73, 0x77, 0xff, 0x6a, 0x6a, + 0xf7, 0x5f, 0xc8, 0xa5, 0xd0, 0x7b, 0xe3, 0xb7, 0x60, 0x8c, 0x85, 0x83, 0x17, 0x0e, 0x46, 0x2f, + 0x18, 0x1b, 0x7e, 0x3e, 0xb5, 0xe1, 0xa7, 0x34, 0x54, 0x6d, 0xa7, 0x3f, 0x05, 0xa3, 0xc2, 0xc9, + 0x25, 0xed, 0xbd, 0x29, 0x70, 0xb1, 0x84, 0xdb, 0x3f, 0x5a, 0x04, 0x23, 0xfc, 0x3c, 0xfa, 0x15, + 0x0b, 0x16, 0x42, 0x6e, 0xfc, 0xea, 0x56, 0x3a, 0xa1, 0xe7, 0x6f, 0xd7, 0x1b, 0x3b, 0xc4, 0xed, + 0x34, 0x3d, 0x7f, 0xbb, 0xba, 0xed, 0x07, 0xaa, 0x78, 0xe5, 0x1e, 0x69, 0x74, 0x98, 0xfa, 0xaa, + 0x4f, 0xac, 0x7b, 0x65, 0x44, 0xfe, 0xfc, 0xfd, 0x83, 0xf9, 0x05, 0x7c, 0x24, 0xda, 0xf8, 0x88, + 0x7d, 0x41, 0xbf, 0x69, 0xc1, 0x15, 0x1e, 0x95, 0x7d, 0xf0, 0xfe, 0xf7, 0x78, 0xe7, 0xd6, 0x24, + 0xa9, 0x84, 0xc8, 0x06, 0x09, 0x5b, 0x4b, 0x2f, 0x8b, 0x01, 0xbd, 0x52, 0x3b, 0x5a, 0x5b, 0xf8, + 0xa8, 0x9d, 0xb3, 0xff, 0x45, 0x11, 0x26, 0x44, 0x68, 0x27, 0x71, 0x07, 0xbc, 0x64, 0x2c, 0x89, + 0xc7, 0x53, 0x4b, 0x62, 0xc6, 0x40, 0x3e, 0x9e, 0xe3, 0x3f, 0x82, 0x19, 0x7a, 0x38, 0x5f, 0x23, + 0x4e, 0x18, 0x6f, 0x12, 0x87, 0x5b, 0x5c, 0x15, 0x8f, 0x7c, 0xfa, 0x2b, 0xc1, 0xda, 0x8d, 0x34, + 0x31, 0xdc, 0x4d, 0xff, 0xeb, 0xe9, 0xce, 0xf1, 0x61, 0xba, 0x2b, 0x3a, 0xd7, 0x9b, 0x50, 0x56, + 0x1e, 0x1a, 0xe2, 0xd0, 0xe9, 0x1d, 0xe4, 0x2e, 0x4d, 0x81, 0x0b, 0xbf, 0x12, 0xef, 0xa0, 0x84, + 0x9c, 0xfd, 0x0f, 0x0b, 0x46, 0x83, 0x7c, 0x12, 0xd7, 0xa1, 0xe4, 0x44, 0x91, 0xb7, 0xed, 0x13, + 0x57, 0xec, 0xd8, 0x0f, 0xe7, 0xed, 0x58, 0xa3, 0x19, 0xe6, 0x25, 0xb3, 0x28, 0x6a, 0x62, 0x45, + 0x03, 0x5d, 0xe3, 0x76, 0x6d, 0x7b, 0xf2, 0xa5, 0x36, 0x18, 0x35, 0x90, 0x96, 0x6f, 0x7b, 0x04, + 0x8b, 0xfa, 0xe8, 0xb3, 0xdc, 0xf0, 0xf0, 0xba, 0x1f, 0xdc, 0xf5, 0xaf, 0x06, 0x81, 0x0c, 0x9f, + 0x30, 0x18, 0xc1, 0x19, 0x69, 0x6e, 0xa8, 0xaa, 0x63, 0x93, 0xda, 0x60, 0x11, 0x2c, 0xbf, 0x15, + 0x4e, 0x51, 0xd2, 0xa6, 0x77, 0x73, 0x84, 0x08, 0x4c, 0x89, 0xb8, 0x61, 0xb2, 0x4c, 0x8c, 0x5d, + 0xe6, 0x23, 0xcc, 0xac, 0x9d, 0x48, 0x80, 0xaf, 0x9b, 0x24, 0x70, 0x9a, 0xa6, 0xfd, 0x13, 0x16, + 0x30, 0x4f, 0xcf, 0x13, 0xe0, 0x47, 0x3e, 0x65, 0xf2, 0x23, 0xb3, 0x79, 0x83, 0x9c, 0xc3, 0x8a, + 0xbc, 0xc8, 0x57, 0x56, 0x2d, 0x0c, 0xee, 0xed, 0x0b, 0xa3, 0x8f, 0xfe, 0xef, 0x0f, 0xfb, 0xff, + 0x58, 0xfc, 0x10, 0x53, 0xfe, 0x13, 0xe8, 0xdb, 0xa0, 0xd4, 0x70, 0xda, 0x4e, 0x83, 0xe7, 0x4a, + 0xc9, 0x95, 0xc5, 0x19, 0x95, 0x16, 0x96, 0x45, 0x0d, 0x2e, 0x5b, 0x92, 0xf1, 0xe7, 0x4a, 0xb2, + 0xb8, 0xaf, 0x3c, 0x49, 0x35, 0x39, 0xb7, 0x0b, 0x13, 0x06, 0xb1, 0x87, 0x2a, 0x88, 0xf8, 0x36, + 0x7e, 0xc5, 0xaa, 0x78, 0x89, 0x2d, 0x98, 0xf1, 0xb5, 0xff, 0xf4, 0x42, 0x91, 0x8f, 0xcb, 0x0f, + 0xf7, 0xbb, 0x44, 0xd9, 0xed, 0xa3, 0xf9, 0x9d, 0xa6, 0xc8, 0xe0, 0x6e, 0xca, 0xf6, 0x8f, 0x59, + 0xf0, 0x88, 0x8e, 0xa8, 0xb9, 0xb6, 0xf4, 0x93, 0xee, 0x57, 0xa0, 0x14, 0xb4, 0x49, 0xe8, 0xc4, + 0x41, 0x28, 0x6e, 0x8d, 0xcb, 0x72, 0xd0, 0x6f, 0x8a, 0xf2, 0x43, 0x11, 0x69, 0x5c, 0x52, 0x97, + 0xe5, 0x58, 0xd5, 0xa4, 0xaf, 0x4f, 0x36, 0x18, 0x91, 0x70, 0x62, 0x62, 0x67, 0x00, 0x53, 0x74, + 0x47, 0x58, 0x40, 0xec, 0xaf, 0x5a, 0x7c, 0x61, 0xe9, 0x5d, 0x47, 0xef, 0xc0, 0x74, 0xcb, 0x89, + 0x1b, 0x3b, 0x2b, 0xf7, 0xda, 0x21, 0xd7, 0x95, 0xc8, 0x71, 0x7a, 0xba, 0xdf, 0x38, 0x69, 0x1f, + 0x99, 0xd8, 0x52, 0xae, 0xa5, 0x88, 0xe1, 0x2e, 0xf2, 0x68, 0x13, 0xc6, 0x58, 0x19, 0xf3, 0xcf, + 0x8b, 0x7a, 0xb1, 0x06, 0x79, 0xad, 0x29, 0x5b, 0x81, 0xb5, 0x84, 0x0e, 0xd6, 0x89, 0xda, 0x3f, + 0x53, 0xe4, 0xbb, 0x9d, 0xb1, 0xf2, 0x4f, 0xc1, 0x68, 0x3b, 0x70, 0x97, 0xab, 0x15, 0x2c, 0x66, + 0x41, 0x5d, 0x23, 0x35, 0x5e, 0x8c, 0x25, 0x1c, 0x5d, 0x86, 0x92, 0xf8, 0x29, 0x75, 0x5b, 0xec, + 0x6c, 0x16, 0x78, 0x11, 0x56, 0x50, 0xf4, 0x3c, 0x40, 0x3b, 0x0c, 0xf6, 0x3c, 0x97, 0x05, 0x81, + 0x28, 0x9a, 0x66, 0x3e, 0x35, 0x05, 0xc1, 0x1a, 0x16, 0x7a, 0x15, 0x26, 0x3a, 0x7e, 0xc4, 0xd9, + 0x11, 0x67, 0x53, 0x04, 0xe5, 0x2e, 0x25, 0x06, 0x28, 0xb7, 0x74, 0x20, 0x36, 0x71, 0xd1, 0x22, + 0x8c, 0xc4, 0x0e, 0x33, 0x5b, 0x19, 0xce, 0xb7, 0xb7, 0xdd, 0xa0, 0x18, 0x7a, 0x5a, 0x0e, 0x5a, + 0x01, 0x8b, 0x8a, 0xe8, 0x4d, 0xe9, 0x2a, 0xcb, 0x0f, 0x76, 0x61, 0xe8, 0x3e, 0xd8, 0x25, 0xa0, + 0x39, 0xca, 0x0a, 0x03, 0x7a, 0x83, 0x16, 0x7a, 0x15, 0x80, 0xdc, 0x8b, 0x49, 0xe8, 0x3b, 0x4d, + 0x65, 0x15, 0xa6, 0xec, 0xa0, 0x2b, 0xc1, 0x7a, 0x10, 0xdf, 0x8a, 0xc8, 0xb7, 0xac, 0x28, 0x14, + 0xac, 0xa1, 0xdb, 0xbf, 0x59, 0x06, 0x48, 0x18, 0x77, 0xf4, 0x6e, 0xd7, 0xc9, 0xf5, 0x4c, 0x6f, + 0x56, 0xff, 0xf8, 0x8e, 0x2d, 0xf4, 0xdd, 0x16, 0x8c, 0x39, 0xcd, 0x66, 0xd0, 0x70, 0x62, 0x36, + 0x45, 0x85, 0xde, 0x27, 0xa7, 0x68, 0x7f, 0x31, 0xa9, 0xc1, 0xbb, 0xf0, 0x82, 0x5c, 0xa2, 0x1a, + 0xa4, 0x6f, 0x2f, 0xf4, 0x86, 0xd1, 0xc7, 0xe4, 0x5b, 0x91, 0xaf, 0xad, 0xb9, 0xf4, 0x5b, 0xb1, + 0xcc, 0x2e, 0x09, 0xfd, 0x99, 0x78, 0xcb, 0x78, 0x26, 0x0e, 0xe5, 0x3b, 0x03, 0x1a, 0xfc, 0x6b, + 0xbf, 0x17, 0x22, 0xaa, 0xe9, 0x81, 0x01, 0x86, 0xf3, 0x3d, 0xef, 0xb4, 0x87, 0x52, 0x9f, 0xa0, + 0x00, 0x6f, 0xc3, 0x94, 0x6b, 0x72, 0x01, 0x62, 0x29, 0x3e, 0x99, 0x47, 0x37, 0xc5, 0x34, 0x24, + 0xf7, 0x7e, 0x0a, 0x80, 0xd3, 0x84, 0x51, 0x8d, 0x07, 0x7d, 0xa8, 0xfa, 0x5b, 0x81, 0xf0, 0xb6, + 0xb0, 0x73, 0xe7, 0x72, 0x3f, 0x8a, 0x49, 0x8b, 0x62, 0x26, 0xd7, 0xfb, 0xba, 0xa8, 0x8b, 0x15, + 0x15, 0xf4, 0x3a, 0x8c, 0x30, 0xd7, 0xab, 0x68, 0xb6, 0x94, 0x2f, 0x2c, 0x36, 0xa3, 0x98, 0x25, + 0x3b, 0x92, 0xfd, 0x8d, 0xb0, 0xa0, 0x80, 0xae, 0x49, 0xc7, 0xc6, 0xa8, 0xea, 0xdf, 0x8a, 0x08, + 0x73, 0x6c, 0x2c, 0x2f, 0x7d, 0x38, 0xf1, 0x59, 0xe4, 0xe5, 0x99, 0xd9, 0xbb, 0x8c, 0x9a, 0x94, + 0x8d, 0x12, 0xff, 0x65, 0x52, 0xb0, 0x59, 0xc8, 0xef, 0x9e, 0x99, 0x38, 0x2c, 0x19, 0xce, 0xdb, + 0x26, 0x09, 0x9c, 0xa6, 0x49, 0x59, 0x52, 0xbe, 0xed, 0x85, 0xbf, 0x46, 0xbf, 0xc3, 0x83, 0xbf, + 0xc4, 0xd9, 0x75, 0xc4, 0x4b, 0xb0, 0xa8, 0x7f, 0xa2, 0xfc, 0xc1, 0x9c, 0x0f, 0xd3, 0xe9, 0x2d, + 0xfa, 0x50, 0xf9, 0x91, 0xdf, 0x1f, 0x82, 0x49, 0x73, 0x49, 0xa1, 0x2b, 0x50, 0x16, 0x44, 0x54, + 0x20, 0x7f, 0xb5, 0x4b, 0xd6, 0x24, 0x00, 0x27, 0x38, 0x2c, 0x7f, 0x03, 0xab, 0xae, 0xd9, 0xd9, + 0x26, 0xf9, 0x1b, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x65, 0xb5, 0x19, 0x04, 0xb1, 0xba, 0x91, 0xd4, + 0xba, 0x5b, 0x62, 0xa5, 0x58, 0x40, 0xe9, 0x4d, 0xb4, 0x4b, 0x42, 0x9f, 0x34, 0xcd, 0xf8, 0xc0, + 0xea, 0x26, 0xba, 0xae, 0x03, 0xb1, 0x89, 0x4b, 0xef, 0xd3, 0x20, 0x62, 0x0b, 0x59, 0xbc, 0xdf, + 0x12, 0xbb, 0xe5, 0x3a, 0xf7, 0xad, 0x96, 0x70, 0xf4, 0x19, 0x78, 0x44, 0xc5, 0x40, 0xc2, 0x5c, + 0x11, 0x21, 0x5b, 0x1c, 0x31, 0xc4, 0x2d, 0x8f, 0x2c, 0x67, 0xa3, 0xe1, 0xbc, 0xfa, 0xe8, 0x35, + 0x98, 0x14, 0x3c, 0xbe, 0xa4, 0x38, 0x6a, 0xda, 0xc6, 0x5c, 0x37, 0xa0, 0x38, 0x85, 0x2d, 0x23, + 0x1c, 0x33, 0x36, 0x5b, 0x52, 0x28, 0x75, 0x47, 0x38, 0xd6, 0xe1, 0xb8, 0xab, 0x06, 0x5a, 0x84, + 0x29, 0xce, 0x84, 0x79, 0xfe, 0x36, 0x9f, 0x13, 0xe1, 0x4e, 0xa5, 0xb6, 0xd4, 0x4d, 0x13, 0x8c, + 0xd3, 0xf8, 0xe8, 0x15, 0x18, 0x77, 0xc2, 0xc6, 0x8e, 0x17, 0x93, 0x46, 0xdc, 0x09, 0xb9, 0x9f, + 0x95, 0x66, 0x5c, 0xb4, 0xa8, 0xc1, 0xb0, 0x81, 0x69, 0xbf, 0x0b, 0xa7, 0x32, 0x82, 0x2e, 0xd0, + 0x85, 0xe3, 0xb4, 0x3d, 0xf9, 0x4d, 0x29, 0x0b, 0xe4, 0xc5, 0x5a, 0x55, 0x7e, 0x8d, 0x86, 0x45, + 0x57, 0x27, 0x0b, 0xce, 0xa0, 0xe5, 0x00, 0x54, 0xab, 0x73, 0x55, 0x02, 0x70, 0x82, 0x63, 0xff, + 0xcf, 0x02, 0x4c, 0x65, 0x28, 0x57, 0x58, 0x1e, 0xba, 0xd4, 0x2b, 0x25, 0x49, 0x3b, 0x67, 0x06, + 0xcc, 0x2e, 0x1c, 0x21, 0x60, 0x76, 0xb1, 0x5f, 0xc0, 0xec, 0xa1, 0xf7, 0x12, 0x30, 0xdb, 0x1c, + 0xb1, 0xe1, 0x81, 0x46, 0x2c, 0x23, 0xc8, 0xf6, 0xc8, 0x11, 0x83, 0x6c, 0x1b, 0x83, 0x3e, 0x3a, + 0xc0, 0xa0, 0xff, 0x60, 0x01, 0xa6, 0xd3, 0x46, 0x90, 0x27, 0x20, 0xb8, 0x7d, 0xdd, 0x10, 0xdc, + 0x66, 0x67, 0x75, 0x4c, 0x9b, 0x66, 0xe6, 0x09, 0x71, 0x71, 0x4a, 0x88, 0xfb, 0xd1, 0x81, 0xa8, + 0xf5, 0x16, 0xe8, 0xfe, 0x9d, 0x02, 0x9c, 0x49, 0x57, 0x59, 0x6e, 0x3a, 0x5e, 0xeb, 0x04, 0xc6, + 0xe6, 0xa6, 0x31, 0x36, 0xcf, 0x0e, 0xf2, 0x35, 0xac, 0x6b, 0xb9, 0x03, 0x74, 0x27, 0x35, 0x40, + 0x57, 0x06, 0x27, 0xd9, 0x7b, 0x94, 0xbe, 0x52, 0x84, 0x0b, 0x99, 0xf5, 0x12, 0xb9, 0xe7, 0xaa, + 0x21, 0xf7, 0x7c, 0x3e, 0x25, 0xf7, 0xb4, 0x7b, 0xd7, 0x3e, 0x1e, 0x41, 0xa8, 0x70, 0x91, 0x65, + 0x11, 0x04, 0x1e, 0x50, 0x08, 0x6a, 0xb8, 0xc8, 0x2a, 0x42, 0xd8, 0xa4, 0xfb, 0xf5, 0x24, 0xfc, + 0xfc, 0x37, 0x16, 0x9c, 0xcb, 0x9c, 0x9b, 0x13, 0x10, 0x76, 0xad, 0x9b, 0xc2, 0xae, 0xa7, 0x06, + 0x5e, 0xad, 0x39, 0xd2, 0xaf, 0x5f, 0x1f, 0xca, 0xf9, 0x16, 0xf6, 0x94, 0xbf, 0x09, 0x63, 0x4e, + 0xa3, 0x41, 0xa2, 0x68, 0x2d, 0x70, 0x55, 0x4c, 0xe0, 0x67, 0xd9, 0x3b, 0x2b, 0x29, 0x3e, 0x3c, + 0x98, 0x9f, 0x4b, 0x93, 0x48, 0xc0, 0x58, 0xa7, 0x80, 0x3e, 0x0b, 0xa5, 0x48, 0xdc, 0x9b, 0x62, + 0xee, 0x5f, 0x18, 0x70, 0x70, 0x9c, 0x4d, 0xd2, 0x34, 0xe3, 0x1c, 0x29, 0x51, 0x85, 0x22, 0x69, + 0xc6, 0x44, 0x29, 0x1c, 0x6b, 0x4c, 0x94, 0xe7, 0x01, 0xf6, 0xd4, 0x63, 0x20, 0x2d, 0x80, 0xd0, + 0x9e, 0x09, 0x1a, 0x16, 0xfa, 0x26, 0x98, 0x8e, 0x78, 0x54, 0xbf, 0xe5, 0xa6, 0x13, 0x31, 0x3f, + 0x17, 0xb1, 0x0a, 0x59, 0x2c, 0xa5, 0x7a, 0x0a, 0x86, 0xbb, 0xb0, 0xd1, 0xaa, 0x6c, 0x95, 0x85, + 0x20, 0xe4, 0x0b, 0xf3, 0x52, 0xd2, 0xa2, 0xc8, 0x82, 0x7b, 0x3a, 0x3d, 0xfc, 0x6c, 0xe0, 0xb5, + 0x9a, 0xe8, 0xb3, 0x00, 0x74, 0xf9, 0x08, 0x41, 0xc4, 0x68, 0xfe, 0xe1, 0x49, 0x4f, 0x15, 0x37, + 0xd3, 0x2c, 0x97, 0x39, 0xa7, 0x56, 0x14, 0x11, 0xac, 0x11, 0xb4, 0x7f, 0x70, 0x08, 0x1e, 0xed, + 0x71, 0x46, 0xa2, 0x45, 0x53, 0x11, 0xfb, 0x74, 0xfa, 0x71, 0x3d, 0x97, 0x59, 0xd9, 0x78, 0x6d, + 0xa7, 0x96, 0x62, 0xe1, 0x3d, 0x2f, 0xc5, 0xef, 0xb3, 0x34, 0xb1, 0x07, 0x37, 0xd6, 0xfc, 0xd4, + 0x11, 0xcf, 0xfe, 0x63, 0x94, 0x83, 0x6c, 0x65, 0x08, 0x13, 0x9e, 0x1f, 0xb8, 0x3b, 0x03, 0x4b, + 0x17, 0x4e, 0x56, 0x4c, 0xfc, 0x05, 0x0b, 0x1e, 0xcf, 0xec, 0xaf, 0x61, 0x92, 0x73, 0x05, 0xca, + 0x0d, 0x5a, 0xa8, 0xf9, 0x22, 0x26, 0x4e, 0xda, 0x12, 0x80, 0x13, 0x1c, 0xc3, 0xf2, 0xa6, 0xd0, + 0xd7, 0xf2, 0xe6, 0x9f, 0x5b, 0xd0, 0xb5, 0x3f, 0x4e, 0xe0, 0xa0, 0xae, 0x9a, 0x07, 0xf5, 0x87, + 0x07, 0x99, 0xcb, 0x9c, 0x33, 0xfa, 0x8f, 0xa6, 0xe0, 0x6c, 0x8e, 0x2f, 0xce, 0x1e, 0xcc, 0x6c, + 0x37, 0x88, 0xe9, 0xe5, 0x29, 0x3e, 0x26, 0xd3, 0x21, 0xb6, 0xa7, 0x4b, 0x28, 0x4b, 0x69, 0x39, + 0xd3, 0x85, 0x82, 0xbb, 0x9b, 0x40, 0x5f, 0xb0, 0xe0, 0xb4, 0x73, 0x37, 0xea, 0xca, 0x81, 0x2f, + 0xd6, 0xcc, 0x8b, 0x99, 0x42, 0x90, 0x3e, 0x39, 0xf3, 0x79, 0x8e, 0xcf, 0x2c, 0x2c, 0x9c, 0xd9, + 0x16, 0xc2, 0x22, 0x4a, 0x3c, 0x65, 0xe7, 0x7b, 0xf8, 0x21, 0x67, 0x39, 0x4d, 0xf1, 0x1b, 0x44, + 0x42, 0xb0, 0xa2, 0x83, 0x3e, 0x0f, 0xe5, 0x6d, 0xe9, 0xc9, 0x98, 0x71, 0x43, 0x25, 0x03, 0xd9, + 0xdb, 0xbf, 0x93, 0xab, 0x32, 0x15, 0x12, 0x4e, 0x88, 0xa2, 0xd7, 0xa0, 0xe8, 0x6f, 0x45, 0xbd, + 0xd2, 0x64, 0xa6, 0x6c, 0xd6, 0xb8, 0xb7, 0xff, 0xfa, 0x6a, 0x1d, 0xd3, 0x8a, 0xe8, 0x1a, 0x14, + 0xc3, 0x4d, 0x57, 0x48, 0xf0, 0x32, 0xcf, 0x70, 0xbc, 0x54, 0xc9, 0xe9, 0x15, 0xa3, 0x84, 0x97, + 0x2a, 0x98, 0x92, 0x40, 0x35, 0x18, 0x66, 0x0e, 0x2c, 0xe2, 0x3e, 0xc8, 0xe4, 0x7c, 0x7b, 0x38, + 0x82, 0xf1, 0x90, 0x00, 0x0c, 0x01, 0x73, 0x42, 0x68, 0x03, 0x46, 0x1a, 0x2c, 0xa5, 0xa2, 0x08, + 0x48, 0xf6, 0xb1, 0x4c, 0x59, 0x5d, 0x8f, 0x5c, 0x93, 0x42, 0x74, 0xc5, 0x30, 0xb0, 0xa0, 0xc5, + 0xa8, 0x92, 0xf6, 0xce, 0x56, 0x24, 0x52, 0x00, 0x67, 0x53, 0xed, 0x91, 0x42, 0x55, 0x50, 0x65, + 0x18, 0x58, 0xd0, 0x42, 0x9f, 0x80, 0xc2, 0x56, 0x43, 0x38, 0xa7, 0x64, 0x0a, 0xed, 0xcc, 0x80, + 0x0d, 0x4b, 0x23, 0xf7, 0x0f, 0xe6, 0x0b, 0xab, 0xcb, 0xb8, 0xb0, 0xd5, 0x40, 0xeb, 0x30, 0xba, + 0xc5, 0x5d, 0xbc, 0x85, 0x5c, 0xee, 0xc9, 0x6c, 0xef, 0xf3, 0x2e, 0x2f, 0x70, 0xee, 0x97, 0x21, + 0x00, 0x58, 0x12, 0x61, 0x41, 0xd7, 0x95, 0xab, 0xba, 0x88, 0xdd, 0xb5, 0x70, 0xb4, 0xf0, 0x02, + 0xfc, 0x7e, 0x4e, 0x1c, 0xde, 0xb1, 0x46, 0x91, 0xae, 0x6a, 0x47, 0xe6, 0x61, 0x17, 0xb1, 0x58, + 0x32, 0x57, 0x75, 0x9f, 0x14, 0xf5, 0x7c, 0x55, 0x2b, 0x24, 0x9c, 0x10, 0x45, 0xbb, 0x30, 0xb1, + 0x17, 0xb5, 0x77, 0x88, 0xdc, 0xd2, 0x2c, 0x34, 0x4b, 0xce, 0x15, 0x76, 0x5b, 0x20, 0x7a, 0x61, + 0xdc, 0x71, 0x9a, 0x5d, 0xa7, 0x10, 0xd3, 0x7f, 0xdf, 0xd6, 0x89, 0x61, 0x93, 0x36, 0x1d, 0xfe, + 0x77, 0x3a, 0xc1, 0xe6, 0x7e, 0x4c, 0x44, 0xc8, 0xad, 0xcc, 0xe1, 0x7f, 0x83, 0xa3, 0x74, 0x0f, + 0xbf, 0x00, 0x60, 0x49, 0x04, 0xdd, 0x16, 0xc3, 0xc3, 0x4e, 0xcf, 0xe9, 0xfc, 0xb8, 0x98, 0x8b, + 0x12, 0x29, 0x67, 0x50, 0xd8, 0x69, 0x99, 0x90, 0x62, 0xa7, 0x64, 0x7b, 0x27, 0x88, 0x03, 0x3f, + 0x75, 0x42, 0xcf, 0xe4, 0x9f, 0x92, 0xb5, 0x0c, 0xfc, 0xee, 0x53, 0x32, 0x0b, 0x0b, 0x67, 0xb6, + 0x85, 0x5c, 0x98, 0x6c, 0x07, 0x61, 0x7c, 0x37, 0x08, 0xe5, 0xfa, 0x42, 0x3d, 0xe4, 0x0a, 0x06, + 0xa6, 0x68, 0x91, 0x45, 0xb3, 0x33, 0x21, 0x38, 0x45, 0x13, 0x7d, 0x1a, 0x46, 0xa3, 0x86, 0xd3, + 0x24, 0xd5, 0x9b, 0xb3, 0xa7, 0xf2, 0xaf, 0x9f, 0x3a, 0x47, 0xc9, 0x59, 0x5d, 0x3c, 0x42, 0x3b, + 0x47, 0xc1, 0x92, 0x1c, 0x5a, 0x85, 0x61, 0x96, 0x54, 0x8b, 0xc5, 0x87, 0xcb, 0x09, 0xef, 0xd9, + 0x65, 0x41, 0xcc, 0xcf, 0x26, 0x56, 0x8c, 0x79, 0x75, 0xba, 0x07, 0x04, 0x7b, 0x1d, 0x44, 0xb3, + 0x67, 0xf2, 0xf7, 0x80, 0xe0, 0xca, 0x6f, 0xd6, 0x7b, 0xed, 0x01, 0x85, 0x84, 0x13, 0xa2, 0xf4, + 0x64, 0xa6, 0xa7, 0xe9, 0xd9, 0x1e, 0xa6, 0x2f, 0xb9, 0x67, 0x29, 0x3b, 0x99, 0xe9, 0x49, 0x4a, + 0x49, 0xd8, 0xbf, 0x3b, 0xda, 0xcd, 0xb3, 0xb0, 0x07, 0xd9, 0x77, 0x5a, 0x5d, 0xba, 0xba, 0x8f, + 0x0f, 0x2a, 0x1f, 0x3a, 0x46, 0x6e, 0xf5, 0x0b, 0x16, 0x9c, 0x6d, 0x67, 0x7e, 0x88, 0x60, 0x00, + 0x06, 0x13, 0x33, 0xf1, 0x4f, 0x57, 0xb1, 0x04, 0xb3, 0xe1, 0x38, 0xa7, 0xa5, 0xf4, 0x8b, 0xa0, + 0xf8, 0x9e, 0x5f, 0x04, 0x6b, 0x50, 0x62, 0x4c, 0x66, 0x9f, 0x14, 0xc3, 0xe9, 0x87, 0x11, 0x63, + 0x25, 0x96, 0x45, 0x45, 0xac, 0x48, 0xa0, 0xef, 0xb7, 0xe0, 0x7c, 0xba, 0xeb, 0x98, 0x30, 0xb0, + 0x08, 0x40, 0xc8, 0xdf, 0x82, 0xab, 0xe2, 0xfb, 0xcf, 0xd7, 0x7a, 0x21, 0x1f, 0xf6, 0x43, 0xc0, + 0xbd, 0x1b, 0x43, 0x95, 0x8c, 0xc7, 0xe8, 0x88, 0x29, 0x80, 0x1f, 0xe0, 0x41, 0xfa, 0x22, 0x8c, + 0xb7, 0x82, 0x8e, 0x1f, 0x0b, 0x4b, 0x19, 0xa1, 0xb5, 0x67, 0xda, 0xea, 0x35, 0xad, 0x1c, 0x1b, + 0x58, 0xa9, 0x67, 0x6c, 0xe9, 0x81, 0x9f, 0xb1, 0x6f, 0xc1, 0xb8, 0xaf, 0x99, 0x76, 0x0a, 0x7e, + 0xe0, 0x52, 0x7e, 0xf0, 0x50, 0xdd, 0x10, 0x94, 0xf7, 0x52, 0x2f, 0xc1, 0x06, 0xb5, 0x93, 0x7d, + 0x1b, 0xfd, 0x94, 0x95, 0xc1, 0xd4, 0xf3, 0xd7, 0xf2, 0x27, 0xcd, 0xd7, 0xf2, 0xa5, 0xf4, 0x6b, + 0xb9, 0x4b, 0xf8, 0x6a, 0x3c, 0x94, 0x07, 0x4f, 0x74, 0x32, 0x68, 0x9c, 0x40, 0xbb, 0x09, 0x17, + 0xfb, 0x5d, 0x4b, 0xcc, 0x64, 0xca, 0x55, 0xaa, 0xb6, 0xc4, 0x64, 0xca, 0xad, 0x56, 0x30, 0x83, + 0x0c, 0x1a, 0x48, 0xc6, 0xfe, 0x1f, 0x16, 0x14, 0x6b, 0x81, 0x7b, 0x02, 0xc2, 0xe4, 0x4f, 0x19, + 0xc2, 0xe4, 0x47, 0xb3, 0x2f, 0x44, 0x37, 0x57, 0x74, 0xbc, 0x92, 0x12, 0x1d, 0x9f, 0xcf, 0x23, + 0xd0, 0x5b, 0x50, 0xfc, 0xe3, 0x45, 0x18, 0xab, 0x05, 0xae, 0xb2, 0x57, 0xfe, 0xf5, 0x07, 0xb1, + 0x57, 0xce, 0x8d, 0xf0, 0xaf, 0x51, 0x66, 0x96, 0x56, 0xd2, 0xc9, 0xf2, 0x2f, 0x98, 0xd9, 0xf2, + 0x1d, 0xe2, 0x6d, 0xef, 0xc4, 0xc4, 0x4d, 0x7f, 0xce, 0xc9, 0x99, 0x2d, 0xff, 0x77, 0x0b, 0xa6, + 0x52, 0xad, 0xa3, 0x26, 0x4c, 0x34, 0x75, 0xc1, 0xa4, 0x58, 0xa7, 0x0f, 0x24, 0xd3, 0x14, 0x66, + 0x9f, 0x5a, 0x11, 0x36, 0x89, 0xa3, 0x05, 0x00, 0xa5, 0xa9, 0x93, 0x12, 0x30, 0xc6, 0xf5, 0x2b, + 0x55, 0x5e, 0x84, 0x35, 0x0c, 0xf4, 0x12, 0x8c, 0xc5, 0x41, 0x3b, 0x68, 0x06, 0xdb, 0xfb, 0xd7, + 0x89, 0x0c, 0x5d, 0xa4, 0x8c, 0xb9, 0x36, 0x12, 0x10, 0xd6, 0xf1, 0xec, 0x9f, 0x2c, 0xf2, 0x0f, + 0xf5, 0x63, 0xef, 0x83, 0x35, 0xf9, 0xfe, 0x5e, 0x93, 0x5f, 0xb1, 0x60, 0x9a, 0xb6, 0xce, 0xcc, + 0x45, 0xe4, 0x65, 0xab, 0x82, 0x06, 0x5b, 0x3d, 0x82, 0x06, 0x5f, 0xa2, 0x67, 0x97, 0x1b, 0x74, + 0x62, 0x21, 0x41, 0xd3, 0x0e, 0x27, 0x5a, 0x8a, 0x05, 0x54, 0xe0, 0x91, 0x30, 0x14, 0x3e, 0x6e, + 0x3a, 0x1e, 0x09, 0x43, 0x2c, 0xa0, 0x32, 0xa6, 0xf0, 0x50, 0x76, 0x4c, 0x61, 0x1e, 0x88, 0x51, + 0x18, 0x16, 0x08, 0xb6, 0x47, 0x0b, 0xc4, 0x28, 0x2d, 0x0e, 0x12, 0x1c, 0xfb, 0xe7, 0x8a, 0x30, + 0x5e, 0x0b, 0xdc, 0x44, 0x57, 0xf6, 0xa2, 0xa1, 0x2b, 0xbb, 0x98, 0xd2, 0x95, 0x4d, 0xeb, 0xb8, + 0x1f, 0x68, 0xc6, 0xbe, 0x56, 0x9a, 0xb1, 0x7f, 0x66, 0xb1, 0x59, 0xab, 0xac, 0xd7, 0xb9, 0xf5, + 0x11, 0x7a, 0x0e, 0xc6, 0xd8, 0x81, 0xc4, 0x9c, 0x2a, 0xa5, 0x02, 0x89, 0xe5, 0x50, 0x5a, 0x4f, + 0x8a, 0xb1, 0x8e, 0x83, 0x2e, 0x43, 0x29, 0x22, 0x4e, 0xd8, 0xd8, 0x51, 0x67, 0x9c, 0xd0, 0xf6, + 0xf0, 0x32, 0xac, 0xa0, 0xe8, 0x8d, 0x24, 0x06, 0x60, 0x31, 0xdf, 0x49, 0x4b, 0xef, 0x0f, 0xdf, + 0x22, 0xf9, 0x81, 0xff, 0xec, 0x3b, 0x80, 0xba, 0xf1, 0x07, 0x08, 0x7e, 0x35, 0x6f, 0x06, 0xbf, + 0x2a, 0x77, 0x05, 0xbe, 0xfa, 0x33, 0x0b, 0x26, 0x6b, 0x81, 0x4b, 0xb7, 0xee, 0xd7, 0xd3, 0x3e, + 0xd5, 0x03, 0xa0, 0x8e, 0xf4, 0x08, 0x80, 0xfa, 0x04, 0x0c, 0xd7, 0x02, 0xb7, 0x5a, 0xeb, 0xe5, + 0xdc, 0x6c, 0xff, 0x5d, 0x0b, 0x46, 0x6b, 0x81, 0x7b, 0x02, 0xc2, 0xf9, 0x4f, 0x9a, 0xc2, 0xf9, + 0x47, 0x72, 0xd6, 0x4d, 0x8e, 0x3c, 0xfe, 0x17, 0x8a, 0x30, 0x41, 0xfb, 0x19, 0x6c, 0xcb, 0xa9, + 0x34, 0x86, 0xcd, 0x1a, 0x60, 0xd8, 0x28, 0x2f, 0x1c, 0x34, 0x9b, 0xc1, 0xdd, 0xf4, 0xb4, 0xae, + 0xb2, 0x52, 0x2c, 0xa0, 0xe8, 0x19, 0x28, 0xb5, 0x43, 0xb2, 0xe7, 0x05, 0x82, 0xc9, 0xd4, 0x54, + 0x1d, 0x35, 0x51, 0x8e, 0x15, 0x06, 0x7d, 0x9c, 0x45, 0x9e, 0xdf, 0x20, 0x75, 0xd2, 0x08, 0x7c, + 0x97, 0xcb, 0xaf, 0x8b, 0x22, 0x6f, 0x80, 0x56, 0x8e, 0x0d, 0x2c, 0x74, 0x07, 0xca, 0xec, 0x3f, + 0x3b, 0x76, 0x8e, 0x9e, 0x4e, 0x52, 0xa4, 0x17, 0x13, 0x04, 0x70, 0x42, 0x0b, 0x3d, 0x0f, 0x10, + 0xcb, 0x10, 0xd9, 0x91, 0x08, 0x74, 0xa4, 0x18, 0x72, 0x15, 0x3c, 0x3b, 0xc2, 0x1a, 0x16, 0x7a, + 0x1a, 0xca, 0xb1, 0xe3, 0x35, 0x6f, 0x78, 0x3e, 0x89, 0x98, 0x5c, 0xba, 0x28, 0xb3, 0x7c, 0x89, + 0x42, 0x9c, 0xc0, 0x29, 0x43, 0xc4, 0xa2, 0x00, 0xf0, 0x64, 0xb4, 0x25, 0x86, 0xcd, 0x18, 0xa2, + 0x1b, 0xaa, 0x14, 0x6b, 0x18, 0xf6, 0x2b, 0x70, 0xa6, 0x16, 0xb8, 0xb5, 0x20, 0x8c, 0x57, 0x83, + 0xf0, 0xae, 0x13, 0xba, 0x72, 0xfe, 0xe6, 0x65, 0x72, 0x10, 0x7a, 0x40, 0x0d, 0xf3, 0xed, 0x6b, + 0xa4, 0xa8, 0x7a, 0x81, 0xb1, 0x44, 0x47, 0xf4, 0x11, 0x69, 0xb0, 0xcb, 0x59, 0xa5, 0x81, 0xb8, + 0xea, 0xc4, 0x04, 0xdd, 0x64, 0xb9, 0x6a, 0x93, 0x7b, 0x4a, 0x54, 0x7f, 0x4a, 0xcb, 0x55, 0x9b, + 0x00, 0x33, 0x2f, 0x36, 0xb3, 0xbe, 0xfd, 0x33, 0x43, 0xec, 0xc8, 0x4a, 0xa5, 0x12, 0x40, 0x9f, + 0x83, 0xc9, 0x88, 0xdc, 0xf0, 0xfc, 0xce, 0x3d, 0xf9, 0x52, 0xef, 0xe1, 0xe5, 0x53, 0x5f, 0xd1, + 0x31, 0xb9, 0xbc, 0xcf, 0x2c, 0xc3, 0x29, 0x6a, 0xa8, 0x05, 0x93, 0x77, 0x3d, 0xdf, 0x0d, 0xee, + 0x46, 0x92, 0x7e, 0x29, 0x5f, 0xec, 0x77, 0x87, 0x63, 0xa6, 0xfa, 0x68, 0x34, 0x77, 0xc7, 0x20, + 0x86, 0x53, 0xc4, 0xe9, 0xb2, 0x08, 0x3b, 0xfe, 0x62, 0x74, 0x2b, 0x22, 0xa1, 0xc8, 0x3a, 0xcc, + 0x96, 0x05, 0x96, 0x85, 0x38, 0x81, 0xd3, 0x65, 0xc1, 0xfe, 0x5c, 0x0d, 0x83, 0x0e, 0x0f, 0x2f, + 0x2f, 0x96, 0x05, 0x56, 0xa5, 0x58, 0xc3, 0xa0, 0xdb, 0x86, 0xfd, 0x5b, 0x0f, 0x7c, 0x1c, 0x04, + 0xb1, 0xdc, 0x68, 0x2c, 0xcf, 0xa5, 0x56, 0x8e, 0x0d, 0x2c, 0xb4, 0x0a, 0x28, 0xea, 0xb4, 0xdb, + 0x4d, 0x66, 0x3d, 0xe0, 0x34, 0x19, 0x29, 0xae, 0xb9, 0x2d, 0xf2, 0xe0, 0x99, 0xf5, 0x2e, 0x28, + 0xce, 0xa8, 0x41, 0x4f, 0xd0, 0x2d, 0xd1, 0xd5, 0x61, 0xd6, 0x55, 0xae, 0x22, 0xa8, 0xf3, 0x7e, + 0x4a, 0x18, 0x5a, 0x81, 0xd1, 0x68, 0x3f, 0x6a, 0xc4, 0x22, 0x0a, 0x58, 0x4e, 0xb6, 0x98, 0x3a, + 0x43, 0xd1, 0x92, 0x95, 0xf1, 0x2a, 0x58, 0xd6, 0xb5, 0xbf, 0x8d, 0x5d, 0xd0, 0x2c, 0x47, 0x6d, + 0xdc, 0x09, 0x09, 0x6a, 0xc1, 0x44, 0x9b, 0xad, 0x30, 0x11, 0x2f, 0x5d, 0x2c, 0x93, 0x17, 0x07, + 0x7c, 0x69, 0xdf, 0xa5, 0xe7, 0x9a, 0x92, 0x84, 0xb1, 0x27, 0x4c, 0x4d, 0x27, 0x87, 0x4d, 0xea, + 0xf6, 0x57, 0xce, 0xb2, 0x23, 0xbe, 0xce, 0x9f, 0xcf, 0xa3, 0xc2, 0xdc, 0x59, 0xbc, 0x15, 0xe6, + 0xf2, 0xe5, 0x38, 0xc9, 0x17, 0x09, 0x93, 0x69, 0x2c, 0xeb, 0xa2, 0xcf, 0xc2, 0x24, 0x65, 0xbd, + 0xb5, 0x7c, 0x11, 0xa7, 0xf3, 0xfd, 0xd2, 0x93, 0x34, 0x11, 0x5a, 0x2e, 0x05, 0xbd, 0x32, 0x4e, + 0x11, 0x43, 0x6f, 0x30, 0xc5, 0xbc, 0x99, 0x8a, 0xa2, 0x0f, 0x69, 0x5d, 0x07, 0x2f, 0xc9, 0x6a, + 0x44, 0xf2, 0xd2, 0x5c, 0xd8, 0x0f, 0x37, 0xcd, 0x05, 0xba, 0x01, 0x13, 0x22, 0x51, 0xab, 0x10, + 0x3f, 0x16, 0x0d, 0xf1, 0xd2, 0x04, 0xd6, 0x81, 0x87, 0xe9, 0x02, 0x6c, 0x56, 0x46, 0xdb, 0x70, + 0x5e, 0xcb, 0xb5, 0x72, 0x35, 0x74, 0x98, 0x8e, 0xd8, 0x63, 0x27, 0x91, 0x76, 0xf9, 0x3c, 0x7e, + 0xff, 0x60, 0xfe, 0xfc, 0x46, 0x2f, 0x44, 0xdc, 0x9b, 0x0e, 0xba, 0x09, 0x67, 0xb8, 0x57, 0x65, + 0x85, 0x38, 0x6e, 0xd3, 0xf3, 0xd5, 0xed, 0xc6, 0x77, 0xcb, 0xb9, 0xfb, 0x07, 0xf3, 0x67, 0x16, + 0xb3, 0x10, 0x70, 0x76, 0x3d, 0xf4, 0x49, 0x28, 0xbb, 0x7e, 0x24, 0xc6, 0x60, 0xc4, 0x48, 0x67, + 0x53, 0xae, 0xac, 0xd7, 0xd5, 0xf7, 0x27, 0x7f, 0x70, 0x52, 0x01, 0x6d, 0x73, 0x11, 0xa4, 0x7a, + 0xf1, 0x8f, 0x76, 0xc5, 0x83, 0x49, 0xcb, 0x8e, 0x0c, 0xbf, 0x2a, 0x2e, 0x7b, 0x57, 0xd6, 0xc6, + 0x86, 0xcb, 0x95, 0x41, 0x18, 0xbd, 0x0e, 0x88, 0xb2, 0xc4, 0x5e, 0x83, 0x2c, 0x36, 0x58, 0x30, + 0x7e, 0x26, 0xb1, 0x2d, 0x19, 0xde, 0x29, 0xa8, 0xde, 0x85, 0x81, 0x33, 0x6a, 0xa1, 0x6b, 0xf4, + 0x36, 0xd0, 0x4b, 0x85, 0xd5, 0xb4, 0x4a, 0x3e, 0x56, 0x21, 0xed, 0x90, 0x34, 0x9c, 0x98, 0xb8, + 0x26, 0x45, 0x9c, 0xaa, 0x87, 0x5c, 0x78, 0xcc, 0xe9, 0xc4, 0x01, 0x93, 0xee, 0x9a, 0xa8, 0x1b, + 0xc1, 0x2e, 0xf1, 0x99, 0x62, 0xa5, 0xb4, 0x74, 0xf1, 0xfe, 0xc1, 0xfc, 0x63, 0x8b, 0x3d, 0xf0, + 0x70, 0x4f, 0x2a, 0x94, 0xed, 0x51, 0xa9, 0x43, 0xc1, 0x0c, 0x73, 0x93, 0x91, 0x3e, 0xf4, 0x25, + 0x18, 0xdb, 0x09, 0xa2, 0x78, 0x9d, 0xc4, 0x77, 0x83, 0x70, 0x57, 0x04, 0x2b, 0x4c, 0x02, 0xdc, + 0x26, 0x20, 0xac, 0xe3, 0xd1, 0x77, 0x0d, 0x53, 0xfb, 0x57, 0x2b, 0x4c, 0xe3, 0x5a, 0x4a, 0xce, + 0x98, 0x6b, 0xbc, 0x18, 0x4b, 0xb8, 0x44, 0xad, 0xd6, 0x96, 0x99, 0xf6, 0x34, 0x85, 0x5a, 0xad, + 0x2d, 0x63, 0x09, 0xa7, 0xcb, 0x35, 0xda, 0x71, 0x42, 0x52, 0x0b, 0x83, 0x06, 0x89, 0xb4, 0xb0, + 0xca, 0x8f, 0xf2, 0x50, 0x8c, 0x74, 0xb9, 0xd6, 0xb3, 0x10, 0x70, 0x76, 0x3d, 0x44, 0xba, 0xf3, + 0x0c, 0x4d, 0xe6, 0x8b, 0xbd, 0xbb, 0x59, 0x81, 0x01, 0x53, 0x0d, 0xf9, 0x30, 0xad, 0x32, 0x1c, + 0xf1, 0xe0, 0x8b, 0xd1, 0xec, 0x14, 0x5b, 0xdb, 0x83, 0x47, 0x6e, 0x54, 0x8a, 0x84, 0x6a, 0x8a, + 0x12, 0xee, 0xa2, 0x6d, 0x44, 0x32, 0x9a, 0xee, 0x9b, 0x4b, 0xf6, 0x0a, 0x94, 0xa3, 0xce, 0xa6, + 0x1b, 0xb4, 0x1c, 0xcf, 0x67, 0xda, 0x53, 0x8d, 0xc1, 0xae, 0x4b, 0x00, 0x4e, 0x70, 0xd0, 0x2a, + 0x94, 0x1c, 0xa9, 0x25, 0x40, 0xf9, 0x01, 0x30, 0x94, 0x6e, 0x80, 0xfb, 0x84, 0x4b, 0xbd, 0x80, + 0xaa, 0x8b, 0x5e, 0x85, 0x09, 0xe1, 0x15, 0x28, 0x92, 0xeb, 0x9d, 0x32, 0x3d, 0x37, 0xea, 0x3a, + 0x10, 0x9b, 0xb8, 0xe8, 0x16, 0x8c, 0xc5, 0x41, 0x93, 0xb9, 0x1f, 0x50, 0x0e, 0xe9, 0x6c, 0x7e, + 0x10, 0xad, 0x0d, 0x85, 0xa6, 0x0b, 0xe8, 0x54, 0x55, 0xac, 0xd3, 0x41, 0x1b, 0x7c, 0xbd, 0xb3, + 0xf0, 0xc2, 0x24, 0x9a, 0x7d, 0x24, 0xff, 0x4e, 0x52, 0x51, 0x88, 0xcd, 0xed, 0x20, 0x6a, 0x62, + 0x9d, 0x0c, 0xba, 0x0a, 0x33, 0xed, 0xd0, 0x0b, 0xd8, 0x9a, 0x50, 0x0a, 0xa2, 0x59, 0x33, 0x29, + 0x4a, 0x2d, 0x8d, 0x80, 0xbb, 0xeb, 0x30, 0xa7, 0x4e, 0x51, 0x38, 0x7b, 0x8e, 0x27, 0xd3, 0xe5, + 0xef, 0x15, 0x5e, 0x86, 0x15, 0x14, 0xad, 0xb1, 0x93, 0x98, 0x3f, 0xb5, 0x67, 0xe7, 0xf2, 0x63, + 0x6e, 0xe8, 0x4f, 0x72, 0xce, 0xf7, 0xa9, 0xbf, 0x38, 0xa1, 0x80, 0x5c, 0x2d, 0x51, 0x1b, 0x65, + 0xb6, 0xa3, 0xd9, 0xc7, 0x7a, 0xd8, 0x5e, 0xa5, 0x38, 0xf3, 0x84, 0x21, 0x30, 0x8a, 0x23, 0x9c, + 0xa2, 0x89, 0xbe, 0x09, 0xa6, 0x45, 0x8c, 0xaf, 0x64, 0x98, 0xce, 0x27, 0x46, 0x9d, 0x38, 0x05, + 0xc3, 0x5d, 0xd8, 0x3c, 0xec, 0xba, 0xb3, 0xd9, 0x24, 0xe2, 0xe8, 0xbb, 0xe1, 0xf9, 0xbb, 0xd1, + 0xec, 0x05, 0x76, 0x3e, 0x88, 0xb0, 0xeb, 0x69, 0x28, 0xce, 0xa8, 0x81, 0x36, 0x60, 0xba, 0x1d, + 0x12, 0xd2, 0x62, 0x3c, 0xb2, 0xb8, 0xcf, 0xe6, 0xb9, 0x4f, 0x33, 0xed, 0x49, 0x2d, 0x05, 0x3b, + 0xcc, 0x28, 0xc3, 0x5d, 0x14, 0xd0, 0x5d, 0x28, 0x05, 0x7b, 0x24, 0xdc, 0x21, 0x8e, 0x3b, 0x7b, + 0xb1, 0x87, 0x91, 0xb1, 0xb8, 0xdc, 0x6e, 0x0a, 0xdc, 0x94, 0x52, 0x59, 0x16, 0xf7, 0x57, 0x2a, + 0xcb, 0xc6, 0xd0, 0x0f, 0x58, 0x70, 0x4e, 0xca, 0xa1, 0xeb, 0x6d, 0x3a, 0xea, 0xcb, 0x81, 0x1f, + 0xc5, 0x21, 0xf7, 0xc2, 0x7d, 0x3c, 0xdf, 0x31, 0x75, 0x23, 0xa7, 0x92, 0x92, 0xf6, 0x9d, 0xcb, + 0xc3, 0x88, 0x70, 0x7e, 0x8b, 0x73, 0xdf, 0x08, 0x33, 0x5d, 0x37, 0xf7, 0x51, 0x32, 0x41, 0xcc, + 0xed, 0xc2, 0x84, 0x31, 0x3a, 0x0f, 0x55, 0x9f, 0xf8, 0xaf, 0x47, 0xa1, 0xac, 0x74, 0x4d, 0xe8, + 0x8a, 0xa9, 0x42, 0x3c, 0x97, 0x56, 0x21, 0x96, 0xe8, 0x6b, 0x56, 0xd7, 0x1a, 0x6e, 0x64, 0xc4, + 0x3c, 0xca, 0xdb, 0x8b, 0x83, 0xfb, 0xb2, 0x6a, 0xa2, 0xc3, 0xe2, 0xc0, 0xba, 0xc8, 0xa1, 0x9e, + 0xd2, 0xc8, 0xab, 0x30, 0xe3, 0x07, 0x8c, 0x5d, 0x24, 0xae, 0xe4, 0x05, 0xd8, 0x95, 0x5f, 0xd6, + 0x83, 0x08, 0xa4, 0x10, 0x70, 0x77, 0x1d, 0xda, 0x20, 0xbf, 0xb3, 0xd3, 0xe2, 0x4f, 0x7e, 0xa5, + 0x63, 0x01, 0x45, 0x4f, 0xc0, 0x70, 0x3b, 0x70, 0xab, 0x35, 0xc1, 0x2a, 0x6a, 0x59, 0x41, 0xdd, + 0x6a, 0x0d, 0x73, 0x18, 0x5a, 0x84, 0x11, 0xf6, 0x23, 0x9a, 0x1d, 0xcf, 0xf7, 0x16, 0x67, 0x35, + 0xb4, 0x3c, 0x1b, 0xac, 0x02, 0x16, 0x15, 0x99, 0x18, 0x86, 0xf2, 0xd7, 0x4c, 0x0c, 0x33, 0xfa, + 0x80, 0x62, 0x18, 0x49, 0x00, 0x27, 0xb4, 0xd0, 0x3d, 0x38, 0x63, 0xbc, 0x69, 0xf8, 0x12, 0x21, + 0x91, 0x70, 0x58, 0x7d, 0xa2, 0xe7, 0x63, 0x46, 0xe8, 0x2e, 0xcf, 0x8b, 0x4e, 0x9f, 0xa9, 0x66, + 0x51, 0xc2, 0xd9, 0x0d, 0xa0, 0x26, 0xcc, 0x34, 0xba, 0x5a, 0x2d, 0x0d, 0xde, 0xaa, 0x9a, 0xd0, + 0xee, 0x16, 0xbb, 0x09, 0xa3, 0x57, 0xa1, 0xf4, 0x4e, 0x10, 0xb1, 0x63, 0x56, 0xb0, 0xb7, 0xd2, + 0xdb, 0xb1, 0xf4, 0xc6, 0xcd, 0x3a, 0x2b, 0x3f, 0x3c, 0x98, 0x1f, 0xab, 0x05, 0xae, 0xfc, 0x8b, + 0x55, 0x05, 0xf4, 0x3d, 0x16, 0xcc, 0x75, 0x3f, 0x9a, 0x54, 0xa7, 0x27, 0x06, 0xef, 0xb4, 0x2d, + 0x1a, 0x9d, 0x5b, 0xc9, 0x25, 0x87, 0x7b, 0x34, 0x65, 0xff, 0x32, 0xd7, 0x33, 0x0a, 0x6d, 0x04, + 0x89, 0x3a, 0xcd, 0x93, 0xc8, 0x4b, 0xb8, 0x62, 0x28, 0x4a, 0x1e, 0x58, 0x97, 0xfd, 0x6b, 0x16, + 0xd3, 0x65, 0x6f, 0x90, 0x56, 0xbb, 0xe9, 0xc4, 0x27, 0xe1, 0x2c, 0xf7, 0x06, 0x94, 0x62, 0xd1, + 0x5a, 0xaf, 0x54, 0x8a, 0x5a, 0xa7, 0x98, 0x3e, 0x5f, 0x31, 0x9b, 0xb2, 0x14, 0x2b, 0x32, 0xf6, + 0x3f, 0xe6, 0x33, 0x20, 0x21, 0x27, 0x20, 0x8f, 0xae, 0x98, 0xf2, 0xe8, 0xf9, 0x3e, 0x5f, 0x90, + 0x23, 0x97, 0xfe, 0x47, 0x66, 0xbf, 0x99, 0x90, 0xe5, 0xfd, 0x6e, 0x44, 0x61, 0xff, 0x90, 0x05, + 0xa7, 0xb3, 0xac, 0x0e, 0xe9, 0x03, 0x81, 0x8b, 0x78, 0x94, 0x51, 0x89, 0x1a, 0xc1, 0xdb, 0xa2, + 0x1c, 0x2b, 0x8c, 0x81, 0xb3, 0x14, 0x1d, 0x2d, 0x6a, 0xe7, 0x4d, 0x98, 0xa8, 0x85, 0x44, 0xbb, + 0xd0, 0x5e, 0xe3, 0xde, 0xaf, 0xbc, 0x3f, 0xcf, 0x1c, 0xd9, 0xf3, 0xd5, 0xfe, 0xe9, 0x02, 0x9c, + 0xe6, 0x5a, 0xe1, 0xc5, 0xbd, 0xc0, 0x73, 0x6b, 0x81, 0x2b, 0x32, 0x4c, 0xbd, 0x09, 0xe3, 0x6d, + 0x4d, 0x2e, 0xd7, 0x2b, 0x02, 0x9d, 0x2e, 0xbf, 0x4b, 0x24, 0x09, 0x7a, 0x29, 0x36, 0x68, 0x21, + 0x17, 0xc6, 0xc9, 0x9e, 0xd7, 0x50, 0xaa, 0xc5, 0xc2, 0x91, 0x2f, 0x17, 0xd5, 0xca, 0x8a, 0x46, + 0x07, 0x1b, 0x54, 0x1f, 0x42, 0xd2, 0x51, 0xfb, 0x87, 0x2d, 0x78, 0x24, 0x27, 0x5e, 0x1d, 0x6d, + 0xee, 0x2e, 0xd3, 0xbf, 0x8b, 0xfc, 0x85, 0xaa, 0x39, 0xae, 0x95, 0xc7, 0x02, 0x8a, 0x3e, 0x0d, + 0xc0, 0xb5, 0xea, 0xf4, 0x85, 0xda, 0x2f, 0xb0, 0x97, 0x11, 0x93, 0x48, 0x0b, 0x2f, 0x23, 0xeb, + 0x63, 0x8d, 0x96, 0xfd, 0x13, 0x45, 0x18, 0xe6, 0x99, 0x97, 0x57, 0x61, 0x74, 0x87, 0xc7, 0xdd, + 0x1f, 0x24, 0xc4, 0x7f, 0x22, 0x3b, 0xe0, 0x05, 0x58, 0x56, 0x46, 0x6b, 0x70, 0x8a, 0xe7, 0x2d, + 0x68, 0x56, 0x48, 0xd3, 0xd9, 0x97, 0x82, 0x2e, 0x9e, 0xf3, 0x4f, 0x09, 0xfc, 0xaa, 0xdd, 0x28, + 0x38, 0xab, 0x1e, 0x7a, 0x0d, 0x26, 0xe9, 0xc3, 0x23, 0xe8, 0xc4, 0x92, 0x12, 0xcf, 0x58, 0xa0, + 0x5e, 0x3a, 0x1b, 0x06, 0x14, 0xa7, 0xb0, 0xe9, 0xdb, 0xb7, 0xdd, 0x25, 0xd2, 0x1b, 0x4e, 0xde, + 0xbe, 0xa6, 0x18, 0xcf, 0xc4, 0x65, 0xe6, 0x86, 0x1d, 0x66, 0x5c, 0xb9, 0xb1, 0x13, 0x92, 0x68, + 0x27, 0x68, 0xba, 0x8c, 0xd1, 0x1a, 0xd6, 0xcc, 0x0d, 0x53, 0x70, 0xdc, 0x55, 0x83, 0x52, 0xd9, + 0x72, 0xbc, 0x66, 0x27, 0x24, 0x09, 0x95, 0x11, 0x93, 0xca, 0x6a, 0x0a, 0x8e, 0xbb, 0x6a, 0xd0, + 0x75, 0x74, 0xa6, 0x16, 0x06, 0xf4, 0xf0, 0x92, 0x31, 0x38, 0x94, 0x0d, 0xe9, 0xa8, 0x74, 0x17, + 0xec, 0x11, 0xae, 0x4a, 0x58, 0xd9, 0x71, 0x0a, 0x86, 0x02, 0xb9, 0x2e, 0x1c, 0x05, 0x25, 0x15, + 0xf4, 0x1c, 0x8c, 0x89, 0x68, 0xf4, 0xcc, 0xd4, 0x91, 0x4f, 0x1d, 0x53, 0x78, 0x57, 0x92, 0x62, + 0xac, 0xe3, 0xd8, 0xdf, 0x5b, 0x80, 0x53, 0x19, 0xb6, 0xea, 0xfc, 0xa8, 0xda, 0xf6, 0xa2, 0x58, + 0xe5, 0x35, 0xd3, 0x8e, 0x2a, 0x5e, 0x8e, 0x15, 0x06, 0xdd, 0x0f, 0xfc, 0x30, 0x4c, 0x1f, 0x80, + 0xc2, 0x16, 0x54, 0x40, 0x8f, 0x98, 0x21, 0xec, 0x22, 0x0c, 0x75, 0x22, 0x22, 0x03, 0xcd, 0xa9, + 0xf3, 0x9b, 0x69, 0x5c, 0x18, 0x84, 0xb2, 0xc7, 0xdb, 0x4a, 0x79, 0xa1, 0xb1, 0xc7, 0x5c, 0x7d, + 0xc1, 0x61, 0xb4, 0x73, 0x31, 0xf1, 0x1d, 0x3f, 0x16, 0x4c, 0x74, 0x12, 0x31, 0x89, 0x95, 0x62, + 0x01, 0xb5, 0xbf, 0x54, 0x84, 0x73, 0xb9, 0xde, 0x2b, 0xb4, 0xeb, 0xad, 0xc0, 0xf7, 0xe2, 0x40, + 0x59, 0x12, 0xf0, 0x28, 0x49, 0xa4, 0xbd, 0xb3, 0x26, 0xca, 0xb1, 0xc2, 0x40, 0x97, 0x60, 0x98, + 0x09, 0x9d, 0xba, 0x32, 0xbc, 0x2d, 0x55, 0x78, 0xd4, 0x0c, 0x0e, 0x1e, 0x38, 0x7b, 0xe6, 0x13, + 0x30, 0xd4, 0x0e, 0x82, 0x66, 0xfa, 0xd0, 0xa2, 0xdd, 0x0d, 0x82, 0x26, 0x66, 0x40, 0xf4, 0x11, + 0x31, 0x5e, 0x29, 0xd5, 0x39, 0x76, 0xdc, 0x20, 0xd2, 0x06, 0xed, 0x29, 0x18, 0xdd, 0x25, 0xfb, + 0xa1, 0xe7, 0x6f, 0xa7, 0x4d, 0x2a, 0xae, 0xf3, 0x62, 0x2c, 0xe1, 0x66, 0xb2, 0x9e, 0xd1, 0xe3, + 0x4e, 0x7b, 0x59, 0xea, 0x7b, 0x05, 0x7e, 0x5f, 0x11, 0xa6, 0xf0, 0x52, 0xe5, 0x83, 0x89, 0xb8, + 0xd5, 0x3d, 0x11, 0xc7, 0x9d, 0xf6, 0xb2, 0xff, 0x6c, 0xfc, 0x82, 0x05, 0x53, 0x2c, 0x26, 0xbe, + 0x08, 0xaf, 0xe3, 0x05, 0xfe, 0x09, 0xb0, 0x78, 0x4f, 0xc0, 0x70, 0x48, 0x1b, 0x4d, 0xa7, 0x76, + 0x63, 0x3d, 0xc1, 0x1c, 0x86, 0x1e, 0x83, 0x21, 0xd6, 0x05, 0x3a, 0x79, 0xe3, 0x3c, 0x2b, 0x4e, + 0xc5, 0x89, 0x1d, 0xcc, 0x4a, 0x59, 0xcc, 0x08, 0x4c, 0xda, 0x4d, 0x8f, 0x77, 0x3a, 0x51, 0x09, + 0xbe, 0x3f, 0x62, 0x46, 0x64, 0x76, 0xed, 0xbd, 0xc5, 0x8c, 0xc8, 0x26, 0xd9, 0xfb, 0xf9, 0xf4, + 0x87, 0x05, 0xb8, 0x90, 0x59, 0x6f, 0xe0, 0x98, 0x11, 0xbd, 0x6b, 0x3f, 0xcc, 0xd8, 0xe9, 0xc5, + 0x13, 0x34, 0x58, 0x1b, 0x1a, 0x94, 0xc3, 0x1c, 0x1e, 0x20, 0x94, 0x43, 0xe6, 0x90, 0xbd, 0x4f, + 0x42, 0x39, 0x64, 0xf6, 0x2d, 0xe7, 0xf9, 0xf7, 0xe7, 0x85, 0x9c, 0x6f, 0x61, 0x0f, 0xc1, 0xcb, + 0xf4, 0x9c, 0x61, 0xc0, 0x48, 0x70, 0xcc, 0xe3, 0xfc, 0x8c, 0xe1, 0x65, 0x58, 0x41, 0xd1, 0x22, + 0x4c, 0xb5, 0x3c, 0x9f, 0x1e, 0x3e, 0xfb, 0x26, 0xe3, 0xa7, 0x22, 0xed, 0xac, 0x99, 0x60, 0x9c, + 0xc6, 0x47, 0x9e, 0x16, 0xe6, 0xa1, 0x90, 0x9f, 0x2c, 0x39, 0xb7, 0xb7, 0x0b, 0xa6, 0xba, 0x54, + 0x8d, 0x62, 0x46, 0xc8, 0x87, 0x35, 0xed, 0xfd, 0x5f, 0x1c, 0xfc, 0xfd, 0x3f, 0x9e, 0xfd, 0xf6, + 0x9f, 0x7b, 0x15, 0x26, 0x1e, 0x58, 0xe0, 0x6b, 0x7f, 0xa5, 0x08, 0x8f, 0xf6, 0xd8, 0xf6, 0xfc, + 0xac, 0x37, 0xe6, 0x40, 0x3b, 0xeb, 0xbb, 0xe6, 0xa1, 0x06, 0xa7, 0xb7, 0x3a, 0xcd, 0xe6, 0x3e, + 0xb3, 0x09, 0x27, 0xae, 0xc4, 0x10, 0x3c, 0xe5, 0x63, 0x32, 0x0f, 0xd1, 0x6a, 0x06, 0x0e, 0xce, + 0xac, 0x49, 0x19, 0x7a, 0x7a, 0x93, 0xec, 0x2b, 0x52, 0x29, 0x86, 0x1e, 0xeb, 0x40, 0x6c, 0xe2, + 0xa2, 0xab, 0x30, 0xe3, 0xec, 0x39, 0x1e, 0x0f, 0x96, 0x29, 0x09, 0x70, 0x8e, 0x5e, 0xc9, 0xe9, + 0x16, 0xd3, 0x08, 0xb8, 0xbb, 0x0e, 0x7a, 0x1d, 0x50, 0x20, 0x92, 0xbd, 0x5f, 0x25, 0xbe, 0xd0, + 0x6a, 0xb1, 0xb9, 0x2b, 0x26, 0x47, 0xc2, 0xcd, 0x2e, 0x0c, 0x9c, 0x51, 0x2b, 0x15, 0x36, 0x61, + 0x24, 0x3f, 0x6c, 0x42, 0xef, 0x73, 0xb1, 0x6f, 0xd8, 0xfe, 0xff, 0x62, 0xd1, 0xeb, 0x8b, 0x33, + 0xf9, 0x66, 0xf4, 0xaf, 0x57, 0x99, 0x41, 0x17, 0x97, 0xe1, 0x69, 0x11, 0x0c, 0xce, 0x68, 0x06, + 0x5d, 0x09, 0x10, 0x9b, 0xb8, 0x7c, 0x41, 0x44, 0x89, 0xe3, 0x9c, 0xc1, 0xe2, 0x8b, 0x10, 0x25, + 0x0a, 0x03, 0x7d, 0x06, 0x46, 0x5d, 0x6f, 0xcf, 0x8b, 0x82, 0x50, 0xac, 0xf4, 0x23, 0xaa, 0x0b, + 0x92, 0x73, 0xb0, 0xc2, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0xbe, 0x02, 0x4c, 0xc8, 0x16, 0xdf, 0xe8, + 0x04, 0xb1, 0x73, 0x02, 0xd7, 0xf2, 0x55, 0xe3, 0x5a, 0xfe, 0x48, 0xaf, 0x38, 0x2d, 0xac, 0x4b, + 0xb9, 0xd7, 0xf1, 0xcd, 0xd4, 0x75, 0xfc, 0x64, 0x7f, 0x52, 0xbd, 0xaf, 0xe1, 0x7f, 0x62, 0xc1, + 0x8c, 0x81, 0x7f, 0x02, 0xb7, 0xc1, 0xaa, 0x79, 0x1b, 0x3c, 0xde, 0xf7, 0x1b, 0x72, 0x6e, 0x81, + 0xef, 0x2a, 0xa6, 0xfa, 0xce, 0x4e, 0xff, 0x77, 0x60, 0x68, 0xc7, 0x09, 0xdd, 0x5e, 0x81, 0xa9, + 0xbb, 0x2a, 0x2d, 0x5c, 0x73, 0x42, 0xa1, 0xd6, 0x7b, 0x46, 0xe5, 0x2a, 0x76, 0xc2, 0xfe, 0x2a, + 0x3d, 0xd6, 0x14, 0x7a, 0x05, 0x46, 0xa2, 0x46, 0xd0, 0x56, 0x56, 0xdc, 0x17, 0x79, 0x1e, 0x63, + 0x5a, 0x72, 0x78, 0x30, 0x8f, 0xcc, 0xe6, 0x68, 0x31, 0x16, 0xf8, 0xe8, 0x4d, 0x98, 0x60, 0xbf, + 0x94, 0x8d, 0x4d, 0x31, 0x3f, 0x89, 0x4d, 0x5d, 0x47, 0xe4, 0x06, 0x68, 0x46, 0x11, 0x36, 0x49, + 0xcd, 0x6d, 0x43, 0x59, 0x7d, 0xd6, 0x43, 0xd5, 0xc7, 0xfd, 0xfb, 0x22, 0x9c, 0xca, 0x58, 0x73, + 0x28, 0x32, 0x66, 0xe2, 0xb9, 0x01, 0x97, 0xea, 0x7b, 0x9c, 0x8b, 0x88, 0xbd, 0x86, 0x5c, 0xb1, + 0xb6, 0x06, 0x6e, 0xf4, 0x56, 0x44, 0xd2, 0x8d, 0xd2, 0xa2, 0xfe, 0x8d, 0xd2, 0xc6, 0x4e, 0x6c, + 0xa8, 0x69, 0x43, 0xaa, 0xa7, 0x0f, 0x75, 0x4e, 0xff, 0xa4, 0x08, 0xa7, 0xb3, 0x42, 0x47, 0xa1, + 0x6f, 0x4d, 0x25, 0x34, 0x7b, 0x71, 0xd0, 0xa0, 0x53, 0x3c, 0xcb, 0x19, 0x97, 0x01, 0x2f, 0x2d, + 0x98, 0x29, 0xce, 0xfa, 0x0e, 0xb3, 0x68, 0x93, 0x39, 0x85, 0x87, 0x3c, 0x11, 0x9d, 0x3c, 0x3e, + 0x3e, 0x3e, 0x70, 0x07, 0x44, 0x06, 0xbb, 0x28, 0xa5, 0xbf, 0x97, 0xc5, 0xfd, 0xf5, 0xf7, 0xb2, + 0xe5, 0x39, 0x0f, 0xc6, 0xb4, 0xaf, 0x79, 0xa8, 0x33, 0xbe, 0x4b, 0x6f, 0x2b, 0xad, 0xdf, 0x0f, + 0x75, 0xd6, 0x7f, 0xd8, 0x82, 0x94, 0x35, 0xb4, 0x12, 0x8b, 0x59, 0xb9, 0x62, 0xb1, 0x8b, 0x30, + 0x14, 0x06, 0x4d, 0x92, 0xce, 0x1f, 0x86, 0x83, 0x26, 0xc1, 0x0c, 0x42, 0x31, 0xe2, 0x44, 0xd8, + 0x31, 0xae, 0x3f, 0xe4, 0xc4, 0x13, 0xed, 0x09, 0x18, 0x6e, 0x92, 0x3d, 0xd2, 0x4c, 0xa7, 0x79, + 0xb8, 0x41, 0x0b, 0x31, 0x87, 0xd9, 0xbf, 0x30, 0x04, 0xe7, 0x7b, 0x86, 0x55, 0xa0, 0xcf, 0xa1, + 0x6d, 0x27, 0x26, 0x77, 0x9d, 0xfd, 0x74, 0x3c, 0xf6, 0xab, 0xbc, 0x18, 0x4b, 0x38, 0xf3, 0x22, + 0xe1, 0x51, 0x55, 0x53, 0x42, 0x44, 0x11, 0x4c, 0x55, 0x40, 0x4d, 0xa1, 0x54, 0xf1, 0x38, 0x84, + 0x52, 0xcf, 0x03, 0x44, 0x51, 0x93, 0x1b, 0xbe, 0xb8, 0xc2, 0x3d, 0x25, 0x89, 0xbe, 0x5b, 0xbf, + 0x21, 0x20, 0x58, 0xc3, 0x42, 0x15, 0x98, 0x6e, 0x87, 0x41, 0xcc, 0x65, 0xb2, 0x15, 0x6e, 0x1b, + 0x36, 0x6c, 0x7a, 0xb4, 0xd7, 0x52, 0x70, 0xdc, 0x55, 0x03, 0xbd, 0x04, 0x63, 0xc2, 0xcb, 0xbd, + 0x16, 0x04, 0x4d, 0x21, 0x06, 0x52, 0xe6, 0x52, 0xf5, 0x04, 0x84, 0x75, 0x3c, 0xad, 0x1a, 0x13, + 0xf4, 0x8e, 0x66, 0x56, 0xe3, 0xc2, 0x5e, 0x0d, 0x2f, 0x15, 0x46, 0xae, 0x34, 0x50, 0x18, 0xb9, + 0x44, 0x30, 0x56, 0x1e, 0x58, 0xb7, 0x05, 0x7d, 0x45, 0x49, 0x3f, 0x3b, 0x04, 0xa7, 0xc4, 0xc2, + 0x79, 0xd8, 0xcb, 0xe5, 0x56, 0xf7, 0x72, 0x39, 0x0e, 0xd1, 0xd9, 0x07, 0x6b, 0xe6, 0xa4, 0xd7, + 0xcc, 0xf7, 0x5b, 0x60, 0xb2, 0x57, 0xe8, 0xff, 0xcb, 0x4d, 0x68, 0xf1, 0x52, 0x2e, 0xbb, 0xe6, + 0xca, 0x0b, 0xe4, 0x3d, 0xa6, 0xb6, 0xb0, 0xff, 0x93, 0x05, 0x8f, 0xf7, 0xa5, 0x88, 0x56, 0xa0, + 0xcc, 0x78, 0x40, 0xed, 0x75, 0xf6, 0xa4, 0xb2, 0x1d, 0x95, 0x80, 0x1c, 0x96, 0x34, 0xa9, 0x89, + 0x56, 0xba, 0x32, 0x87, 0x3c, 0x95, 0x91, 0x39, 0xe4, 0x8c, 0x31, 0x3c, 0x0f, 0x98, 0x3a, 0xe4, + 0x97, 0x8b, 0x30, 0xc2, 0x57, 0xfc, 0x09, 0x3c, 0xc3, 0x56, 0x85, 0xdc, 0xb6, 0x47, 0x9c, 0x3a, + 0xde, 0x97, 0x85, 0x8a, 0x13, 0x3b, 0x9c, 0x4d, 0x50, 0xb7, 0x55, 0x22, 0xe1, 0x45, 0x9f, 0x03, + 0x88, 0xe2, 0xd0, 0xf3, 0xb7, 0x69, 0x99, 0x88, 0x60, 0xf8, 0xd1, 0x1e, 0xd4, 0xea, 0x0a, 0x99, + 0xd3, 0x4c, 0x76, 0xae, 0x02, 0x60, 0x8d, 0x22, 0x5a, 0x30, 0xee, 0xcb, 0xb9, 0x94, 0xe0, 0x13, + 0x38, 0xd5, 0xe4, 0xf6, 0x9c, 0x7b, 0x19, 0xca, 0x8a, 0x78, 0x3f, 0x29, 0xce, 0xb8, 0xce, 0x5c, + 0x7c, 0x0a, 0xa6, 0x52, 0x7d, 0x3b, 0x92, 0x10, 0xe8, 0x17, 0x2d, 0x98, 0xe2, 0x9d, 0x59, 0xf1, + 0xf7, 0xc4, 0x99, 0xfa, 0x2e, 0x9c, 0x6e, 0x66, 0x9c, 0x6d, 0x62, 0x46, 0x07, 0x3f, 0x0b, 0x95, + 0xd0, 0x27, 0x0b, 0x8a, 0x33, 0xdb, 0x40, 0x97, 0xe9, 0xba, 0xa5, 0x67, 0x97, 0xd3, 0x14, 0xce, + 0x86, 0xe3, 0x7c, 0xcd, 0xf2, 0x32, 0xac, 0xa0, 0xf6, 0x6f, 0x5b, 0x30, 0xc3, 0x7b, 0x7e, 0x9d, + 0xec, 0xab, 0x1d, 0xfe, 0xb5, 0xec, 0xbb, 0x48, 0xe6, 0x53, 0xc8, 0x49, 0xe6, 0xa3, 0x7f, 0x5a, + 0xb1, 0xe7, 0xa7, 0xfd, 0xb4, 0x05, 0x62, 0x85, 0x9c, 0xc0, 0x53, 0xfe, 0x1b, 0xcd, 0xa7, 0xfc, + 0x5c, 0xfe, 0x26, 0xc8, 0x79, 0xc3, 0xff, 0x99, 0x05, 0xd3, 0x1c, 0x21, 0xd1, 0x39, 0x7f, 0x4d, + 0xe7, 0x61, 0x90, 0x94, 0x9f, 0xd7, 0xc9, 0xfe, 0x46, 0x50, 0x73, 0xe2, 0x9d, 0xec, 0x8f, 0x32, + 0x26, 0x6b, 0xa8, 0xe7, 0x64, 0xb9, 0x72, 0x03, 0x1d, 0x21, 0x8f, 0xf0, 0x91, 0x43, 0xdd, 0xdb, + 0x5f, 0xb5, 0x00, 0xf1, 0x66, 0x0c, 0xf6, 0x87, 0x32, 0x15, 0xac, 0x54, 0xbb, 0x2e, 0x92, 0xa3, + 0x49, 0x41, 0xb0, 0x86, 0x75, 0x2c, 0xc3, 0x93, 0x32, 0x1c, 0x28, 0xf6, 0x37, 0x1c, 0x38, 0xc2, + 0x88, 0xfe, 0xc1, 0x30, 0xa4, 0x3d, 0x40, 0xd0, 0x6d, 0x18, 0x6f, 0x38, 0x6d, 0x67, 0xd3, 0x6b, + 0x7a, 0xb1, 0x47, 0xa2, 0x5e, 0x16, 0x47, 0xcb, 0x1a, 0x9e, 0x50, 0xf5, 0x6a, 0x25, 0xd8, 0xa0, + 0x83, 0x16, 0x00, 0xda, 0xa1, 0xb7, 0xe7, 0x35, 0xc9, 0x36, 0x93, 0x38, 0x30, 0xf7, 0x66, 0x6e, + 0x46, 0x23, 0x4b, 0xb1, 0x86, 0x91, 0xe1, 0xa9, 0x5a, 0x7c, 0xc8, 0x9e, 0xaa, 0x70, 0x62, 0x9e, + 0xaa, 0x43, 0x47, 0xf2, 0x54, 0x2d, 0x1d, 0xd9, 0x53, 0x75, 0x78, 0x20, 0x4f, 0x55, 0x0c, 0x67, + 0x25, 0x07, 0x47, 0xff, 0xaf, 0x7a, 0x4d, 0x22, 0xd8, 0x76, 0xee, 0x93, 0x3d, 0x77, 0xff, 0x60, + 0xfe, 0x2c, 0xce, 0xc4, 0xc0, 0x39, 0x35, 0xd1, 0xa7, 0x61, 0xd6, 0x69, 0x36, 0x83, 0xbb, 0x6a, + 0x52, 0x57, 0xa2, 0x86, 0xd3, 0xe4, 0xa2, 0xfc, 0x51, 0x46, 0xf5, 0xb1, 0xfb, 0x07, 0xf3, 0xb3, + 0x8b, 0x39, 0x38, 0x38, 0xb7, 0x36, 0xfa, 0x24, 0x94, 0xdb, 0x61, 0xd0, 0x58, 0xd3, 0xdc, 0xd4, + 0x2e, 0xd0, 0x01, 0xac, 0xc9, 0xc2, 0xc3, 0x83, 0xf9, 0x09, 0xf5, 0x87, 0x5d, 0xf8, 0x49, 0x05, + 0x7b, 0x17, 0x4e, 0xd5, 0x49, 0xe8, 0xb1, 0xac, 0xc0, 0x6e, 0x72, 0x7e, 0x6c, 0x40, 0x39, 0x4c, + 0x9d, 0x98, 0x03, 0xc5, 0x76, 0xd3, 0x62, 0x82, 0xcb, 0x13, 0x32, 0x21, 0x64, 0xff, 0x6f, 0x0b, + 0x46, 0x85, 0x47, 0xc6, 0x09, 0x30, 0x6a, 0x8b, 0x86, 0xbc, 0x7c, 0x3e, 0xfb, 0x56, 0x61, 0x9d, + 0xc9, 0x95, 0x94, 0x57, 0x53, 0x92, 0xf2, 0xc7, 0x7b, 0x11, 0xe9, 0x2d, 0x23, 0xff, 0x9b, 0x45, + 0x98, 0x34, 0x5d, 0xf7, 0x4e, 0x60, 0x08, 0xd6, 0x61, 0x34, 0x12, 0xbe, 0x69, 0x85, 0x7c, 0x8b, + 0xec, 0xf4, 0x24, 0x26, 0xd6, 0x5a, 0xc2, 0x1b, 0x4d, 0x12, 0xc9, 0x74, 0x7a, 0x2b, 0x3e, 0x44, + 0xa7, 0xb7, 0x7e, 0xde, 0x93, 0x43, 0xc7, 0xe1, 0x3d, 0x69, 0x7f, 0x99, 0xdd, 0x6c, 0x7a, 0xf9, + 0x09, 0x30, 0x3d, 0x57, 0xcd, 0x3b, 0xd0, 0xee, 0xb1, 0xb2, 0x44, 0xa7, 0x72, 0x98, 0x9f, 0x9f, + 0xb7, 0xe0, 0x7c, 0xc6, 0x57, 0x69, 0x9c, 0xd0, 0x33, 0x50, 0x72, 0x3a, 0xae, 0xa7, 0xf6, 0xb2, + 0xa6, 0x35, 0x5b, 0x14, 0xe5, 0x58, 0x61, 0xa0, 0x65, 0x98, 0x21, 0xf7, 0xda, 0x1e, 0x57, 0x18, + 0xea, 0x26, 0x95, 0x45, 0x1e, 0xef, 0x7a, 0x25, 0x0d, 0xc4, 0xdd, 0xf8, 0x2a, 0xd8, 0x43, 0x31, + 0x37, 0xd8, 0xc3, 0xdf, 0xb7, 0x60, 0x4c, 0x79, 0x67, 0x3d, 0xf4, 0xd1, 0xfe, 0x26, 0x73, 0xb4, + 0x1f, 0xed, 0x31, 0xda, 0x39, 0xc3, 0xfc, 0xb7, 0x0b, 0xaa, 0xbf, 0xb5, 0x20, 0x8c, 0x07, 0xe0, + 0xb0, 0x5e, 0x81, 0x52, 0x3b, 0x0c, 0xe2, 0xa0, 0x11, 0x34, 0x05, 0x83, 0xf5, 0x58, 0x12, 0x8b, + 0x84, 0x97, 0x1f, 0x6a, 0xbf, 0xb1, 0xc2, 0x66, 0xa3, 0x17, 0x84, 0xb1, 0x60, 0x6a, 0x92, 0xd1, + 0x0b, 0xc2, 0x18, 0x33, 0x08, 0x72, 0x01, 0x62, 0x27, 0xdc, 0x26, 0x31, 0x2d, 0x13, 0xb1, 0x8f, + 0xf2, 0x0f, 0x8f, 0x4e, 0xec, 0x35, 0x17, 0x3c, 0x3f, 0x8e, 0xe2, 0x70, 0xa1, 0xea, 0xc7, 0x37, + 0x43, 0xfe, 0x5e, 0xd3, 0x82, 0x8b, 0x28, 0x5a, 0x58, 0xa3, 0x2b, 0xdd, 0x8a, 0x59, 0x1b, 0xc3, + 0xa6, 0xfe, 0x7d, 0x5d, 0x94, 0x63, 0x85, 0x61, 0xbf, 0xcc, 0xae, 0x12, 0x36, 0x40, 0x47, 0x8b, + 0xfb, 0xf1, 0x9d, 0x65, 0x35, 0xb4, 0x4c, 0xf9, 0x56, 0xd1, 0xa3, 0x8b, 0xf4, 0x3e, 0xb9, 0x69, + 0xc3, 0xba, 0x8b, 0x51, 0x12, 0x82, 0x04, 0x7d, 0x73, 0x97, 0x4d, 0xc5, 0xb3, 0x7d, 0xae, 0x80, + 0x23, 0x58, 0x51, 0xb0, 0x18, 0xfc, 0x2c, 0x42, 0x79, 0xb5, 0x26, 0x16, 0xb9, 0x16, 0x83, 0x5f, + 0x00, 0x70, 0x82, 0x83, 0xae, 0x88, 0xd7, 0xf8, 0x90, 0x91, 0x79, 0x52, 0xbe, 0xc6, 0xe5, 0xe7, + 0x6b, 0xc2, 0xec, 0xe7, 0x60, 0x4c, 0x65, 0xa0, 0xac, 0xf1, 0xc4, 0x86, 0x22, 0x12, 0xd4, 0x4a, + 0x52, 0x8c, 0x75, 0x1c, 0xb4, 0x01, 0x53, 0x11, 0x17, 0xf5, 0xa8, 0x80, 0x9f, 0x5c, 0x64, 0xf6, + 0x51, 0x69, 0x88, 0x52, 0x37, 0xc1, 0x87, 0xac, 0x88, 0x1f, 0x1d, 0xd2, 0x95, 0x37, 0x4d, 0x02, + 0xbd, 0x06, 0x93, 0xcd, 0xc0, 0x71, 0x97, 0x9c, 0xa6, 0xe3, 0x37, 0xd8, 0xf7, 0x96, 0xcc, 0x44, + 0x66, 0x37, 0x0c, 0x28, 0x4e, 0x61, 0x53, 0xce, 0x47, 0x2f, 0x11, 0x41, 0x6a, 0x1d, 0x7f, 0x9b, + 0x44, 0x22, 0x9f, 0x20, 0xe3, 0x7c, 0x6e, 0xe4, 0xe0, 0xe0, 0xdc, 0xda, 0xe8, 0x15, 0x18, 0x97, + 0x9f, 0xaf, 0x79, 0xbe, 0x27, 0xb6, 0xf7, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x2e, 0x9c, 0x91, 0xff, + 0x37, 0x42, 0x67, 0x6b, 0xcb, 0x6b, 0x08, 0x77, 0x50, 0xee, 0x18, 0xb7, 0x28, 0xbd, 0xb7, 0x56, + 0xb2, 0x90, 0x0e, 0x0f, 0xe6, 0x2f, 0x8a, 0x51, 0xcb, 0x84, 0xb3, 0x49, 0xcc, 0xa6, 0x8f, 0xd6, + 0xe0, 0xd4, 0x0e, 0x71, 0x9a, 0xf1, 0xce, 0xf2, 0x0e, 0x69, 0xec, 0xca, 0x4d, 0xc4, 0xfc, 0xe9, + 0x35, 0x8b, 0xf5, 0x6b, 0xdd, 0x28, 0x38, 0xab, 0x1e, 0x7a, 0x0b, 0x66, 0xdb, 0x9d, 0xcd, 0xa6, + 0x17, 0xed, 0xac, 0x07, 0x31, 0xb3, 0x46, 0x51, 0x09, 0x2d, 0x85, 0xe3, 0xbd, 0x8a, 0x58, 0x50, + 0xcb, 0xc1, 0xc3, 0xb9, 0x14, 0xd0, 0xbb, 0x70, 0x26, 0xb5, 0x18, 0x84, 0xeb, 0xf1, 0x64, 0x7e, + 0xc8, 0xef, 0x7a, 0x56, 0x05, 0xe1, 0xc5, 0x9f, 0x05, 0xc2, 0xd9, 0x4d, 0xa0, 0x17, 0xa1, 0xe4, + 0xb5, 0x57, 0x9d, 0x96, 0xd7, 0xdc, 0x67, 0x31, 0xcb, 0xcb, 0x2c, 0x8e, 0x77, 0xa9, 0x5a, 0xe3, + 0x65, 0x87, 0xda, 0x6f, 0xac, 0x30, 0xdf, 0x9b, 0x35, 0xd2, 0x3b, 0xb4, 0xb2, 0xc6, 0xca, 0xa1, + 0xcf, 0xc3, 0xb8, 0xbe, 0xf6, 0xc4, 0xb5, 0x74, 0x29, 0x9b, 0xd3, 0xd1, 0xd6, 0x28, 0x67, 0x04, + 0xd5, 0x3a, 0xd4, 0x61, 0xd8, 0xa0, 0x68, 0x13, 0xc8, 0x1e, 0x15, 0x74, 0x03, 0x4a, 0x8d, 0xa6, + 0x47, 0xfc, 0xb8, 0x5a, 0xeb, 0x15, 0x88, 0x68, 0x59, 0xe0, 0x88, 0x61, 0x16, 0x91, 0x95, 0x79, + 0x19, 0x56, 0x14, 0xec, 0x5f, 0x2d, 0xc0, 0x7c, 0x9f, 0x30, 0xdd, 0x29, 0xa1, 0xb9, 0x35, 0x90, + 0xd0, 0x7c, 0x51, 0x26, 0xf5, 0x5c, 0x4f, 0x49, 0x12, 0x52, 0x09, 0x3b, 0x13, 0x79, 0x42, 0x1a, + 0x7f, 0x60, 0x23, 0x66, 0x5d, 0xee, 0x3e, 0xd4, 0xd7, 0x0c, 0xdf, 0xd0, 0xb7, 0x0d, 0x0f, 0xfe, + 0x7c, 0xc9, 0xd5, 0x9d, 0xd8, 0x5f, 0x2e, 0xc0, 0x19, 0x35, 0x84, 0x5f, 0xbf, 0x03, 0x77, 0xab, + 0x7b, 0xe0, 0x8e, 0x41, 0xf3, 0x64, 0xdf, 0x84, 0x11, 0x1e, 0x59, 0x69, 0x00, 0xb6, 0xe9, 0x09, + 0x33, 0x34, 0xa0, 0xba, 0xdc, 0x8d, 0xf0, 0x80, 0xdf, 0x63, 0xc1, 0xd4, 0xc6, 0x72, 0xad, 0x1e, + 0x34, 0x76, 0x49, 0xbc, 0xc8, 0xd9, 0x5c, 0x2c, 0xb8, 0x26, 0xeb, 0x01, 0xb9, 0xa1, 0x2c, 0x3e, + 0xeb, 0x22, 0x0c, 0xed, 0x04, 0x51, 0x9c, 0x56, 0x4b, 0x5f, 0x0b, 0xa2, 0x18, 0x33, 0x88, 0xfd, + 0x3b, 0x16, 0x0c, 0xb3, 0x3c, 0xd6, 0xfd, 0x32, 0xa9, 0x0f, 0xf2, 0x5d, 0xe8, 0x25, 0x18, 0x21, + 0x5b, 0x5b, 0xa4, 0x11, 0x8b, 0x59, 0x95, 0x7e, 0xc4, 0x23, 0x2b, 0xac, 0x94, 0xb2, 0x0a, 0xac, + 0x31, 0xfe, 0x17, 0x0b, 0x64, 0x74, 0x07, 0xca, 0xb1, 0xd7, 0x22, 0x8b, 0xae, 0x2b, 0x14, 0x7b, + 0x0f, 0xe0, 0x0b, 0xbd, 0x21, 0x09, 0xe0, 0x84, 0x96, 0xfd, 0xa5, 0x02, 0x40, 0x12, 0x57, 0xa3, + 0xdf, 0x27, 0x2e, 0x75, 0xa9, 0x7c, 0x2e, 0x65, 0xa8, 0x7c, 0x50, 0x42, 0x30, 0x43, 0xdf, 0xa3, + 0x86, 0xa9, 0x38, 0xd0, 0x30, 0x0d, 0x1d, 0x65, 0x98, 0x96, 0x61, 0x26, 0x89, 0x0b, 0x62, 0x86, + 0x45, 0x62, 0x4f, 0x9b, 0x8d, 0x34, 0x10, 0x77, 0xe3, 0xdb, 0x04, 0x2e, 0xaa, 0xf0, 0x08, 0xe2, + 0xae, 0x61, 0x76, 0xa3, 0x47, 0x48, 0xaa, 0x9f, 0xe8, 0xb4, 0x0a, 0xb9, 0x3a, 0xad, 0x1f, 0xb3, + 0xe0, 0x74, 0xba, 0x1d, 0xe6, 0xc8, 0xf7, 0x45, 0x0b, 0xce, 0x30, 0xcd, 0x1e, 0x6b, 0xb5, 0x5b, + 0x8f, 0xf8, 0x62, 0xcf, 0x90, 0x0f, 0x39, 0x3d, 0x4e, 0x1c, 0xd6, 0xd7, 0xb2, 0x48, 0xe3, 0xec, + 0x16, 0xed, 0xff, 0x58, 0x80, 0xd9, 0xbc, 0x58, 0x11, 0xcc, 0xac, 0xdc, 0xb9, 0x57, 0xdf, 0x25, + 0x77, 0x85, 0xf1, 0x6e, 0x62, 0x56, 0xce, 0x8b, 0xb1, 0x84, 0xa7, 0x23, 0x2f, 0x17, 0x06, 0x8b, + 0xbc, 0x8c, 0x76, 0x60, 0xe6, 0xee, 0x0e, 0xf1, 0x6f, 0xf9, 0x91, 0x13, 0x7b, 0xd1, 0x96, 0xc7, + 0x32, 0xa2, 0xf3, 0x75, 0xf3, 0x09, 0x69, 0x62, 0x7b, 0x27, 0x8d, 0x70, 0x78, 0x30, 0x7f, 0xde, + 0x28, 0x48, 0xba, 0xcc, 0x0f, 0x12, 0xdc, 0x4d, 0xb4, 0x3b, 0x70, 0xf5, 0xd0, 0x43, 0x0c, 0x5c, + 0x6d, 0x7f, 0xd1, 0x82, 0x73, 0xb9, 0x89, 0xe5, 0xd0, 0x65, 0x28, 0x39, 0x6d, 0x8f, 0x8b, 0x40, + 0xc5, 0x31, 0xca, 0x9e, 0xf2, 0xb5, 0x2a, 0x17, 0x80, 0x2a, 0xa8, 0x4a, 0x78, 0x5b, 0xc8, 0x4d, + 0x78, 0xdb, 0x37, 0x7f, 0xad, 0xfd, 0xdd, 0x16, 0x08, 0x97, 0xb8, 0x01, 0xce, 0xee, 0x37, 0x65, + 0xbe, 0x70, 0x23, 0xb9, 0xc5, 0xc5, 0x7c, 0x1f, 0x41, 0x91, 0xd2, 0x42, 0xf1, 0x4a, 0x46, 0x22, + 0x0b, 0x83, 0x96, 0xed, 0x82, 0x80, 0x56, 0x08, 0x13, 0x20, 0xf6, 0xef, 0xcd, 0xf3, 0x00, 0x2e, + 0xc3, 0xd5, 0xb2, 0x06, 0xab, 0x9b, 0xb9, 0xa2, 0x20, 0x58, 0xc3, 0xb2, 0xff, 0x6d, 0x01, 0xc6, + 0x64, 0x32, 0x85, 0x8e, 0x3f, 0xc8, 0x33, 0xff, 0x48, 0xd9, 0xd5, 0x58, 0x9a, 0x6d, 0x4a, 0xb8, + 0x96, 0x48, 0x47, 0x92, 0x34, 0xdb, 0x12, 0x80, 0x13, 0x1c, 0xba, 0x8b, 0xa2, 0xce, 0x26, 0x43, + 0x4f, 0x39, 0x70, 0xd5, 0x79, 0x31, 0x96, 0x70, 0xf4, 0x69, 0x98, 0xe6, 0xf5, 0xc2, 0xa0, 0xed, + 0x6c, 0x73, 0xd9, 0xf2, 0xb0, 0xf2, 0xbc, 0x9e, 0x5e, 0x4b, 0xc1, 0x0e, 0x0f, 0xe6, 0x4f, 0xa7, + 0xcb, 0x98, 0xd2, 0xa4, 0x8b, 0x0a, 0x33, 0xc4, 0xe0, 0x8d, 0xd0, 0xdd, 0xdf, 0x65, 0xbf, 0x91, + 0x80, 0xb0, 0x8e, 0x67, 0x7f, 0x1e, 0x50, 0x77, 0x5a, 0x09, 0xf4, 0x3a, 0xb7, 0xbe, 0xf3, 0x42, + 0xe2, 0xf6, 0x52, 0xa2, 0xe8, 0xfe, 0xc5, 0xd2, 0xf7, 0x82, 0xd7, 0xc2, 0xaa, 0xbe, 0xfd, 0x57, + 0x8a, 0x30, 0x9d, 0xf6, 0x36, 0x45, 0xd7, 0x60, 0x84, 0xb3, 0x1e, 0x82, 0x7c, 0x0f, 0x1d, 0xbd, + 0xe6, 0xa3, 0xca, 0x0e, 0x61, 0xc1, 0xbd, 0x88, 0xfa, 0xe8, 0x2d, 0x18, 0x73, 0x83, 0xbb, 0xfe, + 0x5d, 0x27, 0x74, 0x17, 0x6b, 0x55, 0xb1, 0x9c, 0x33, 0xdf, 0x3d, 0x95, 0x04, 0x4d, 0xf7, 0x7b, + 0x65, 0xfa, 0xa8, 0x04, 0x84, 0x75, 0x72, 0x68, 0x83, 0x45, 0xc1, 0xdd, 0xf2, 0xb6, 0xd7, 0x9c, + 0x76, 0x2f, 0x53, 0xec, 0x65, 0x89, 0xa4, 0x51, 0x9e, 0x10, 0xa1, 0x72, 0x39, 0x00, 0x27, 0x84, + 0xd0, 0xb7, 0xc2, 0xa9, 0x28, 0x47, 0x54, 0x9a, 0x97, 0x65, 0xa8, 0x97, 0xf4, 0x70, 0xe9, 0x11, + 0xfa, 0x22, 0xcd, 0x12, 0xaa, 0x66, 0x35, 0x63, 0xff, 0xda, 0x29, 0x30, 0x36, 0xb1, 0x91, 0x74, + 0xce, 0x3a, 0xa6, 0xa4, 0x73, 0x18, 0x4a, 0xa4, 0xd5, 0x8e, 0xf7, 0x2b, 0x5e, 0xd8, 0x2b, 0x6b, + 0xe9, 0x8a, 0xc0, 0xe9, 0xa6, 0x29, 0x21, 0x58, 0xd1, 0xc9, 0xce, 0x0c, 0x58, 0xfc, 0x1a, 0x66, + 0x06, 0x1c, 0x3a, 0xc1, 0xcc, 0x80, 0xeb, 0x30, 0xba, 0xed, 0xc5, 0x98, 0xb4, 0x03, 0xc1, 0xf4, + 0x67, 0xae, 0xc3, 0xab, 0x1c, 0xa5, 0x3b, 0x07, 0x95, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xae, 0x76, + 0xe0, 0x48, 0xfe, 0x9b, 0xb9, 0x5b, 0x99, 0x9c, 0xb9, 0x07, 0x45, 0xfe, 0xbf, 0xd1, 0x07, 0xcd, + 0xff, 0xb7, 0x2a, 0xb3, 0xf6, 0x95, 0xf2, 0xfd, 0x26, 0x58, 0x52, 0xbe, 0x3e, 0xb9, 0xfa, 0x6e, + 0xeb, 0x99, 0x0e, 0xcb, 0xf9, 0x27, 0x81, 0x4a, 0x62, 0x38, 0x60, 0x7e, 0xc3, 0xef, 0xb6, 0xe0, + 0x4c, 0x3b, 0x2b, 0xe9, 0xa7, 0xd0, 0xbb, 0xbe, 0x34, 0x70, 0x56, 0x53, 0xa3, 0x41, 0x26, 0x72, + 0xc9, 0x44, 0xc3, 0xd9, 0xcd, 0xd1, 0x81, 0x0e, 0x37, 0x5d, 0x91, 0xa0, 0xef, 0x89, 0x9c, 0x44, + 0x89, 0x3d, 0xd2, 0x23, 0x6e, 0x64, 0x24, 0xe5, 0xfb, 0x70, 0x5e, 0x52, 0xbe, 0x81, 0x53, 0xf1, + 0xbd, 0xae, 0x52, 0x24, 0x4e, 0xe4, 0x2f, 0x25, 0x9e, 0x00, 0xb1, 0x6f, 0x62, 0xc4, 0xd7, 0x55, + 0x62, 0xc4, 0x1e, 0x11, 0x21, 0x79, 0xda, 0xc3, 0xbe, 0xe9, 0x10, 0xb5, 0x94, 0x86, 0x53, 0xc7, + 0x93, 0xd2, 0xd0, 0xb8, 0x6a, 0x78, 0x56, 0xbd, 0xa7, 0xfb, 0x5c, 0x35, 0x06, 0xdd, 0xde, 0x97, + 0x0d, 0x4f, 0xdf, 0x38, 0xf3, 0x40, 0xe9, 0x1b, 0x6f, 0xeb, 0xe9, 0x10, 0x51, 0x9f, 0x7c, 0x7f, + 0x14, 0x69, 0xc0, 0x24, 0x88, 0xb7, 0xf5, 0x0b, 0xf0, 0x54, 0x3e, 0x5d, 0x75, 0xcf, 0x75, 0xd3, + 0xcd, 0xbc, 0x02, 0xbb, 0x92, 0x2b, 0x9e, 0x3e, 0x99, 0xe4, 0x8a, 0x67, 0x8e, 0x3d, 0xb9, 0xe2, + 0xd9, 0x13, 0x48, 0xae, 0xf8, 0xc8, 0x09, 0x26, 0x57, 0xbc, 0xcd, 0x8c, 0x15, 0x78, 0x60, 0x11, + 0x11, 0xc1, 0x32, 0x3b, 0x5a, 0x62, 0x56, 0xf4, 0x11, 0xfe, 0x71, 0x0a, 0x84, 0x13, 0x52, 0x19, + 0x49, 0x1b, 0x67, 0x1f, 0x42, 0xd2, 0xc6, 0xf5, 0x24, 0x69, 0xe3, 0xb9, 0xfc, 0xa9, 0xce, 0x30, + 0x12, 0xcf, 0x49, 0xd5, 0x78, 0x5b, 0x4f, 0xb1, 0xf8, 0x68, 0x0f, 0xa1, 0x7a, 0x96, 0xe0, 0xb1, + 0x47, 0x62, 0xc5, 0xd7, 0x78, 0x62, 0xc5, 0xc7, 0xf2, 0x4f, 0xf2, 0xf4, 0x75, 0x67, 0xa6, 0x53, + 0xfc, 0xde, 0x02, 0x5c, 0xe8, 0xbd, 0x2f, 0x12, 0xa9, 0x67, 0x2d, 0xd1, 0xed, 0xa5, 0xa4, 0x9e, + 0xfc, 0x6d, 0x95, 0x60, 0x0d, 0x1c, 0x73, 0xea, 0x2a, 0xcc, 0x28, 0x2b, 0xf0, 0xa6, 0xd7, 0xd8, + 0xd7, 0x32, 0xc8, 0x2b, 0xcf, 0xd9, 0x7a, 0x1a, 0x01, 0x77, 0xd7, 0x41, 0x8b, 0x30, 0x65, 0x14, + 0x56, 0x2b, 0xe2, 0x0d, 0xa5, 0xc4, 0xac, 0x75, 0x13, 0x8c, 0xd3, 0xf8, 0xf6, 0x4f, 0x59, 0xf0, + 0x48, 0x4e, 0xde, 0xa2, 0x81, 0x43, 0x2a, 0x6d, 0xc1, 0x54, 0xdb, 0xac, 0xda, 0x27, 0xf2, 0x9a, + 0x91, 0x1d, 0x49, 0xf5, 0x35, 0x05, 0xc0, 0x69, 0xa2, 0xf6, 0x9f, 0x5a, 0x70, 0xbe, 0xa7, 0x41, + 0x16, 0xc2, 0x70, 0x76, 0xbb, 0x15, 0x39, 0xcb, 0x21, 0x71, 0x89, 0x1f, 0x7b, 0x4e, 0xb3, 0xde, + 0x26, 0x0d, 0x4d, 0x6e, 0xcd, 0x2c, 0x9b, 0xae, 0xae, 0xd5, 0x17, 0xbb, 0x31, 0x70, 0x4e, 0x4d, + 0xb4, 0x0a, 0xa8, 0x1b, 0x22, 0x66, 0x98, 0x45, 0x67, 0xed, 0xa6, 0x87, 0x33, 0x6a, 0xa0, 0x97, + 0x61, 0x42, 0x19, 0x7a, 0x69, 0x33, 0xce, 0x0e, 0x60, 0xac, 0x03, 0xb0, 0x89, 0xb7, 0x74, 0xf9, + 0x37, 0x7e, 0xef, 0xc2, 0x87, 0x7e, 0xeb, 0xf7, 0x2e, 0x7c, 0xe8, 0xb7, 0x7f, 0xef, 0xc2, 0x87, + 0xbe, 0xfd, 0xfe, 0x05, 0xeb, 0x37, 0xee, 0x5f, 0xb0, 0x7e, 0xeb, 0xfe, 0x05, 0xeb, 0xb7, 0xef, + 0x5f, 0xb0, 0x7e, 0xf7, 0xfe, 0x05, 0xeb, 0x4b, 0xbf, 0x7f, 0xe1, 0x43, 0x6f, 0x16, 0xf6, 0x9e, + 0xfb, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4e, 0x11, 0xe2, 0x4d, 0x14, 0xfc, 0x00, 0x00, +} + +func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AWSElasticBlockStoreVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AWSElasticBlockStoreVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n68, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n68 + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n69, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n69 - return i, nil + i -= len(m.VolumeID) + copy(dAtA[i:], m.VolumeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *LimitRangeItem) Marshal() (dAtA []byte, err error) { +func (m *Affinity) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Affinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Affinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if len(m.Max) > 0 { - keysForMax := make([]string, 0, len(m.Max)) - for k := range m.Max { - keysForMax = append(keysForMax, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMax) - for _, k := range keysForMax { - dAtA[i] = 0x12 - i++ - v := m.Max[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n70, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n70 - } - } - if len(m.Min) > 0 { - keysForMin := make([]string, 0, len(m.Min)) - for k := range m.Min { - keysForMin = append(keysForMin, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMin) - for _, k := range keysForMin { - dAtA[i] = 0x1a - i++ - v := m.Min[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n71, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n71 - } - } - if len(m.Default) > 0 { - keysForDefault := make([]string, 0, len(m.Default)) - for k := range m.Default { - keysForDefault = append(keysForDefault, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) - for _, k := range keysForDefault { - dAtA[i] = 0x22 - i++ - v := m.Default[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n72, err := (&v).MarshalTo(dAtA[i:]) + if m.PodAntiAffinity != nil { + { + size, err := m.PodAntiAffinity.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n72 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if len(m.DefaultRequest) > 0 { - keysForDefaultRequest := make([]string, 0, len(m.DefaultRequest)) - for k := range m.DefaultRequest { - keysForDefaultRequest = append(keysForDefaultRequest, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) - for _, k := range keysForDefaultRequest { - dAtA[i] = 0x2a - i++ - v := m.DefaultRequest[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n73, err := (&v).MarshalTo(dAtA[i:]) + if m.PodAffinity != nil { + { + size, err := m.PodAffinity.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n73 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - if len(m.MaxLimitRequestRatio) > 0 { - keysForMaxLimitRequestRatio := make([]string, 0, len(m.MaxLimitRequestRatio)) - for k := range m.MaxLimitRequestRatio { - keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) - for _, k := range keysForMaxLimitRequestRatio { - dAtA[i] = 0x32 - i++ - v := m.MaxLimitRequestRatio[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n74, err := (&v).MarshalTo(dAtA[i:]) + if m.NodeAffinity != nil { + { + size, err := m.NodeAffinity.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n74 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *LimitRangeList) Marshal() (dAtA []byte, err error) { +func (m *AttachedVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AttachedVolume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AttachedVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.DevicePath) + copy(dAtA[i:], m.DevicePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n75, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n75 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil + return len(dAtA) - i, nil } -func (m *LimitRangeSpec) Marshal() (dAtA []byte, err error) { +func (m *AvoidPods) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LimitRangeSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Limits) > 0 { - for _, msg := range m.Limits { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *List) Marshal() (dAtA []byte, err error) { +func (m *AvoidPods) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *List) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AvoidPods) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n76, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n76 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.PreferAvoidPods) > 0 { + for iNdEx := len(m.PreferAvoidPods) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PreferAvoidPods[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } -func (m *LoadBalancerIngress) Marshal() (dAtA []byte, err error) { +func (m *AzureDiskVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) - i += copy(dAtA[i:], m.IP) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) - i += copy(dAtA[i:], m.Hostname) - return i, nil -} - -func (m *LoadBalancerStatus) Marshal() (dAtA []byte, err error) { +func (m *AzureDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AzureDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ingress) > 0 { - for _, msg := range m.Ingress { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if m.Kind != nil { + i -= len(*m.Kind) + copy(dAtA[i:], *m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) + i-- + dAtA[i] = 0x32 + } + if m.ReadOnly != nil { + i-- + if *m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x28 + } + if m.FSType != nil { + i -= len(*m.FSType) + copy(dAtA[i:], *m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) + i-- + dAtA[i] = 0x22 + } + if m.CachingMode != nil { + i -= len(*m.CachingMode) + copy(dAtA[i:], *m.CachingMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CachingMode))) + i-- + dAtA[i] = 0x1a } - return i, nil + i -= len(m.DataDiskURI) + copy(dAtA[i:], m.DataDiskURI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DataDiskURI))) + i-- + dAtA[i] = 0x12 + i -= len(m.DiskName) + copy(dAtA[i:], m.DiskName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DiskName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *LocalObjectReference) Marshal() (dAtA []byte, err error) { +func (m *AzureFilePersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LocalObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil -} - -func (m *LocalVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *AzureFilePersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LocalVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AzureFilePersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - if m.FSType != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) - i += copy(dAtA[i:], *m.FSType) + if m.SecretNamespace != nil { + i -= len(*m.SecretNamespace) + copy(dAtA[i:], *m.SecretNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SecretNamespace))) + i-- + dAtA[i] = 0x22 + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.ShareName) + copy(dAtA[i:], m.ShareName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) + i-- + dAtA[i] = 0x12 + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NFSVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *AzureFileVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AzureFileVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AzureFileVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Server))) - i += copy(dAtA[i:], m.Server) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x18 - i++ + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.ShareName) + copy(dAtA[i:], m.ShareName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) + i-- + dAtA[i] = 0x12 + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Namespace) Marshal() (dAtA []byte, err error) { +func (m *Binding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Binding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Binding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n77, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n77 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n78, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n78 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n79, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n79 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NamespaceList) Marshal() (dAtA []byte, err error) { +func (m *CSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n80, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.ControllerExpandSecretRef != nil { + { + size, err := m.ControllerExpandSecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a } - i += n80 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.NodePublishSecretRef != nil { + { + size, err := m.NodePublishSecretRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x42 } - return i, nil -} - -func (m *NamespaceSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err + if m.NodeStageSecretRef != nil { + { + size, err := m.NodeStageSecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a } - return dAtA[:n], nil -} - -func (m *NamespaceSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.ControllerPublishSecretRef != nil { + { + size, err := m.ControllerPublishSecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if len(m.VolumeAttributes) > 0 { + keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) + for k := range m.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + for iNdEx := len(keysForVolumeAttributes) - 1; iNdEx >= 0; iNdEx-- { + v := m.VolumeAttributes[string(keysForVolumeAttributes[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForVolumeAttributes[iNdEx]) + copy(dAtA[i:], keysForVolumeAttributes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForVolumeAttributes[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a } } - return i, nil + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x22 + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.VolumeHandle) + copy(dAtA[i:], m.VolumeHandle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeHandle))) + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NamespaceStatus) Marshal() (dAtA []byte, err error) { +func (m *CSIVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NamespaceStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.NodePublishSecretRef != nil { + { + size, err := m.NodePublishSecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.VolumeAttributes) > 0 { + keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) + for k := range m.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + for iNdEx := len(keysForVolumeAttributes) - 1; iNdEx >= 0; iNdEx-- { + v := m.VolumeAttributes[string(keysForVolumeAttributes[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForVolumeAttributes[iNdEx]) + copy(dAtA[i:], keysForVolumeAttributes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForVolumeAttributes[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if m.FSType != nil { + i -= len(*m.FSType) + copy(dAtA[i:], *m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) + i-- + dAtA[i] = 0x1a + } + if m.ReadOnly != nil { + i-- + if *m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - return i, nil + return len(dAtA) - i, nil } -func (m *Node) Marshal() (dAtA []byte, err error) { +func (m *Capabilities) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Node) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Capabilities) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Capabilities) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n81, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n81 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n82, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Drop) > 0 { + for iNdEx := len(m.Drop) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Drop[iNdEx]) + copy(dAtA[i:], m.Drop[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Drop[iNdEx]))) + i-- + dAtA[i] = 0x12 + } } - i += n82 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n83, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Add) > 0 { + for iNdEx := len(m.Add) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Add[iNdEx]) + copy(dAtA[i:], m.Add[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Add[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - i += n83 - return i, nil + return len(dAtA) - i, nil } -func (m *NodeAddress) Marshal() (dAtA []byte, err error) { +func (m *CephFSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeAddress) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CephFSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CephFSPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.SecretFile) + copy(dAtA[i:], m.SecretFile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) + i-- + dAtA[i] = 0x22 + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Address))) - i += copy(dAtA[i:], m.Address) - return i, nil + if len(m.Monitors) > 0 { + for iNdEx := len(m.Monitors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Monitors[iNdEx]) + copy(dAtA[i:], m.Monitors[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Monitors[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *NodeAffinity) Marshal() (dAtA []byte, err error) { +func (m *CephFSVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CephFSVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) - n84, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n84 + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x30 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.SecretFile) + copy(dAtA[i:], m.SecretFile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) + i-- + dAtA[i] = 0x22 + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + if len(m.Monitors) > 0 { + for iNdEx := len(m.Monitors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Monitors[iNdEx]) + copy(dAtA[i:], m.Monitors[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Monitors[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } -func (m *NodeCondition) Marshal() (dAtA []byte, err error) { +func (m *CinderPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CinderPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CinderPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastHeartbeatTime.Size())) - n85, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - i += n85 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n86, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n86 - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeID) + copy(dAtA[i:], m.VolumeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeConfigSource) Marshal() (dAtA []byte, err error) { +func (m *CinderVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CinderVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CinderVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ConfigMap != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n87, err := m.ConfigMap.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n87 + i-- + dAtA[i] = 0x22 + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeID) + copy(dAtA[i:], m.VolumeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeConfigStatus) Marshal() (dAtA []byte, err error) { +func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeConfigStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ClientIPConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClientIPConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Assigned != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Assigned.Size())) - n88, err := m.Assigned.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n88 - } - if m.Active != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Active.Size())) - n89, err := m.Active.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n89 - } - if m.LastKnownGood != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastKnownGood.Size())) - n90, err := m.LastKnownGood.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n90 + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x8 } - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - return i, nil + return len(dAtA) - i, nil } -func (m *NodeDaemonEndpoints) Marshal() (dAtA []byte, err error) { +func (m *ComponentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ComponentCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ComponentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.KubeletEndpoint.Size())) - n91, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n91 - return i, nil + return len(dAtA) - i, nil } -func (m *NodeList) Marshal() (dAtA []byte, err error) { +func (m *ComponentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ComponentStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ComponentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n92, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n92 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeProxyOptions) Marshal() (dAtA []byte, err error) { +func (m *ComponentStatusList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeProxyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ComponentStatusList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ComponentStatusList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - return i, nil + return len(dAtA) - i, nil } -func (m *NodeResources) Marshal() (dAtA []byte, err error) { +func (m *ConfigMap) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMap) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMap) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + if len(m.BinaryData) > 0 { + keysForBinaryData := make([]string, 0, len(m.BinaryData)) + for k := range m.BinaryData { + keysForBinaryData = append(keysForBinaryData, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for _, k := range keysForCapacity { + github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) + for iNdEx := len(keysForBinaryData) - 1; iNdEx >= 0; iNdEx-- { + v := m.BinaryData[string(keysForBinaryData[iNdEx])] + baseI := i + if v != nil { + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + } + i -= len(keysForBinaryData[iNdEx]) + copy(dAtA[i:], keysForBinaryData[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForBinaryData[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - v := m.Capacity[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Data) > 0 { + keysForData := make([]string, 0, len(m.Data)) + for k := range m.Data { + keysForData = append(keysForData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + for iNdEx := len(keysForData) - 1; iNdEx >= 0; iNdEx-- { + v := m.Data[string(keysForData[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForData[iNdEx]) + copy(dAtA[i:], keysForData[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForData[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n93, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n93 } } - return i, nil + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSelector) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapEnvSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSelector) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMapEnvSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapEnvSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.NodeSelectorTerms) > 0 { - for _, msg := range m.NodeSelectorTerms { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSelectorRequirement) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapKeySelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMapKeySelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapKeySelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + i -= len(m.Key) + copy(dAtA[i:], m.Key) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i += copy(dAtA[i:], m.Operator) - if len(m.Values) > 0 { - for _, s := range m.Values { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSelectorTerm) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSelectorTerm) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMapList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.MatchFields) > 0 { - for _, msg := range m.MatchFields { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSpec) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapNodeConfigSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMapNodeConfigSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapNodeConfigSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDR))) - i += copy(dAtA[i:], m.PodCIDR) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DoNotUse_ExternalID))) - i += copy(dAtA[i:], m.DoNotUse_ExternalID) + i -= len(m.KubeletConfigKey) + copy(dAtA[i:], m.KubeletConfigKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletConfigKey))) + i-- + dAtA[i] = 0x2a + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x22 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderID))) - i += copy(dAtA[i:], m.ProviderID) - dAtA[i] = 0x20 - i++ - if m.Unschedulable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConfigMapProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - i++ - if len(m.Taints) > 0 { - for _, msg := range m.Taints { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *ConfigMapProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if m.ConfigSource != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigSource.Size())) - n94, err := m.ConfigSource.MarshalTo(dAtA[i:]) + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n94 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeStatus) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMapVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for _, k := range keysForCapacity { - dAtA[i] = 0xa - i++ - v := m.Capacity[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n95, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n95 + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x20 } - if len(m.Allocatable) > 0 { - keysForAllocatable := make([]string, 0, len(m.Allocatable)) - for k := range m.Allocatable { - keysForAllocatable = append(keysForAllocatable, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) - for _, k := range keysForAllocatable { - dAtA[i] = 0x12 - i++ - v := m.Allocatable[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + if m.DefaultMode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + i-- + dAtA[i] = 0x18 + } + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n96, err := (&v).MarshalTo(dAtA[i:]) + } + } + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Container) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Container) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.StartupProbe != nil { + { + size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n96 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.VolumeDevices) > 0 { + for iNdEx := len(m.VolumeDevices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeDevices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa } } - if len(m.Addresses) > 0 { - for _, msg := range m.Addresses { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.TerminationMessagePolicy) + copy(dAtA[i:], m.TerminationMessagePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + if len(m.EnvFrom) > 0 { + for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a } } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DaemonEndpoints.Size())) - n97, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n97 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeInfo.Size())) - n98, err := m.NodeInfo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + i-- + if m.StdinOnce { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n98 - if len(m.Images) > 0 { - for _, msg := range m.Images { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + i-- + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x7a } - if len(m.VolumesInUse) > 0 { - for _, s := range m.VolumesInUse { - dAtA[i] = 0x4a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + i -= len(m.ImagePullPolicy) + copy(dAtA[i:], m.ImagePullPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy))) + i-- + dAtA[i] = 0x72 + i -= len(m.TerminationMessagePath) + copy(dAtA[i:], m.TerminationMessagePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath))) + i-- + dAtA[i] = 0x6a + if m.Lifecycle != nil { + { + size, err := m.Lifecycle.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x62 } - if len(m.VolumesAttached) > 0 { - for _, msg := range m.VolumesAttached { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.ReadinessProbe != nil { + { + size, err := m.ReadinessProbe.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.Config != nil { + i-- dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Config.Size())) - n99, err := m.Config.MarshalTo(dAtA[i:]) + } + if m.LivenessProbe != nil { + { + size, err := m.LivenessProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n99 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.WorkingDir) + copy(dAtA[i:], m.WorkingDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir))) + i-- + dAtA[i] = 0x2a + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0x1a + } } - return i, nil + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSystemInfo) Marshal() (dAtA []byte, err error) { +func (m *ContainerImage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSystemInfo) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerImage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerImage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MachineID))) - i += copy(dAtA[i:], m.MachineID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SystemUUID))) - i += copy(dAtA[i:], m.SystemUUID) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.BootID))) - i += copy(dAtA[i:], m.BootID) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KernelVersion))) - i += copy(dAtA[i:], m.KernelVersion) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.OSImage))) - i += copy(dAtA[i:], m.OSImage) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerRuntimeVersion))) - i += copy(dAtA[i:], m.ContainerRuntimeVersion) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletVersion))) - i += copy(dAtA[i:], m.KubeletVersion) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeProxyVersion))) - i += copy(dAtA[i:], m.KubeProxyVersion) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.OperatingSystem))) - i += copy(dAtA[i:], m.OperatingSystem) - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture))) - i += copy(dAtA[i:], m.Architecture) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.SizeBytes)) + i-- + dAtA[i] = 0x10 + if len(m.Names) > 0 { + for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Names[iNdEx]) + copy(dAtA[i:], m.Names[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Names[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *ObjectFieldSelector) Marshal() (dAtA []byte, err error) { +func (m *ContainerPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ObjectFieldSelector) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerPort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.HostIP) + copy(dAtA[i:], m.HostIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) + i-- + dAtA[i] = 0x2a + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.ContainerPort)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.HostPort)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) - i += copy(dAtA[i:], m.FieldPath) - return i, nil + return len(dAtA) - i, nil } -func (m *ObjectReference) Marshal() (dAtA []byte, err error) { +func (m *ContainerState) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) - i += copy(dAtA[i:], m.FieldPath) - return i, nil + if m.Terminated != nil { + { + size, err := m.Terminated.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Running != nil { + { + size, err := m.Running.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Waiting != nil { + { + size, err := m.Waiting.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *PersistentVolume) Marshal() (dAtA []byte, err error) { +func (m *ContainerStateRunning) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerStateRunning) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStateRunning) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n100, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n100 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n101, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n101 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n102, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n102 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaim) Marshal() (dAtA []byte, err error) { +func (m *ContainerStateTerminated) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerStateTerminated) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStateTerminated) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n103, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.ContainerID) + copy(dAtA[i:], m.ContainerID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) + i-- + dAtA[i] = 0x3a + { + size, err := m.FinishedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n103 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n104, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x32 + { + size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n104 + i-- + dAtA[i] = 0x2a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n105, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n105 - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Signal)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ExitCode)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimCondition) Marshal() (dAtA []byte, err error) { +func (m *ContainerStateWaiting) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerStateWaiting) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStateWaiting) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n106, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n106 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n107, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n107 - dAtA[i] = 0x2a - i++ + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimList) Marshal() (dAtA []byte, err error) { +func (m *ContainerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n108, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Started != nil { + i-- + if *m.Started { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 } - i += n108 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i -= len(m.ContainerID) + copy(dAtA[i:], m.ContainerID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) + i-- + dAtA[i] = 0x42 + i -= len(m.ImageID) + copy(dAtA[i:], m.ImageID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageID))) + i-- + dAtA[i] = 0x3a + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.RestartCount)) + i-- + dAtA[i] = 0x28 + i-- + if m.Ready { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + { + size, err := m.LastTerminationState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.State.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimSpec) Marshal() (dAtA []byte, err error) { +func (m *DaemonEndpoint) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DaemonEndpoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonEndpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n109, err := m.Resources.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *DownwardAPIProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { - return 0, err - } - i += n109 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - if m.Selector != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n110, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n110 - } - if m.StorageClassName != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) - i += copy(dAtA[i:], *m.StorageClassName) - } - if m.VolumeMode != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) - i += copy(dAtA[i:], *m.VolumeMode) + return nil, err } - if m.DataSource != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DataSource.Size())) - n111, err := m.DataSource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *DownwardAPIProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DownwardAPIProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n111 } - return i, nil + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimStatus) Marshal() (dAtA []byte, err error) { +func (m *DownwardAPIVolumeFile) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DownwardAPIVolumeFile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DownwardAPIVolumeFile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + if m.Mode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) + i-- + dAtA[i] = 0x20 } - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for _, k := range keysForCapacity { - dAtA[i] = 0x1a - i++ - v := m.Capacity[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n112, err := (&v).MarshalTo(dAtA[i:]) + if m.ResourceFieldRef != nil { + { + size, err := m.ResourceFieldRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n112 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.FieldRef != nil { + { + size, err := m.FieldRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *DownwardAPIVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DownwardAPIVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DownwardAPIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClaimName))) - i += copy(dAtA[i:], m.ClaimName) - dAtA[i] = 0x10 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.DefaultMode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + i-- + dAtA[i] = 0x10 + } + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - i++ - return i, nil + return len(dAtA) - i, nil } -func (m *PersistentVolumeList) Marshal() (dAtA []byte, err error) { +func (m *EmptyDirVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EmptyDirVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n113, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n113 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.SizeLimit != nil { + { + size, err := m.SizeLimit.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Medium) + copy(dAtA[i:], m.Medium) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Medium))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *EndpointAddress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EndpointAddress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.GCEPersistentDisk != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n114, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n114 + if m.NodeName != nil { + i -= len(*m.NodeName) + copy(dAtA[i:], *m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) + i-- + dAtA[i] = 0x22 } - if m.AWSElasticBlockStore != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n115, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n115 - } - if m.HostPath != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n116, err := m.HostPath.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n116 - } - if m.Glusterfs != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n117, err := m.Glusterfs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n117 - } - if m.NFS != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n118, err := m.NFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n118 - } - if m.RBD != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n119, err := m.RBD.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n119 - } - if m.ISCSI != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n120, err := m.ISCSI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n120 - } - if m.Cinder != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n121, err := m.Cinder.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n121 - } - if m.CephFS != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n122, err := m.CephFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n122 - } - if m.FC != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n123, err := m.FC.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n123 - } - if m.Flocker != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n124, err := m.Flocker.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n124 - } - if m.FlexVolume != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n125, err := m.FlexVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n125 - } - if m.AzureFile != nil { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n126, err := m.AzureFile.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n126 - } - if m.VsphereVolume != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n127, err := m.VsphereVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n127 - } - if m.Quobyte != nil { - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n128, err := m.Quobyte.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n128 - } - if m.AzureDisk != nil { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n129, err := m.AzureDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n129 - } - if m.PhotonPersistentDisk != nil { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n130, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n130 - } - if m.PortworxVolume != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n131, err := m.PortworxVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n131 - } - if m.ScaleIO != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n132, err := m.ScaleIO.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n132 - } - if m.Local != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Local.Size())) - n133, err := m.Local.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n133 - } - if m.StorageOS != nil { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n134, err := m.StorageOS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n134 - } - if m.CSI != nil { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size())) - n135, err := m.CSI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n135 - } - return i, nil -} - -func (m *PersistentVolumeSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for _, k := range keysForCapacity { - dAtA[i] = 0xa - i++ - v := m.Capacity[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n136, err := (&v).MarshalTo(dAtA[i:]) + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x1a + if m.TargetRef != nil { + { + size, err := m.TargetRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n136 - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeSource.Size())) - n137, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n137 - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.ClaimRef != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClaimRef.Size())) - n138, err := m.ClaimRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n138 - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PersistentVolumeReclaimPolicy))) - i += copy(dAtA[i:], m.PersistentVolumeReclaimPolicy) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageClassName))) - i += copy(dAtA[i:], m.StorageClassName) - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.VolumeMode != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) - i += copy(dAtA[i:], *m.VolumeMode) - } - if m.NodeAffinity != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeAffinity.Size())) - n139, err := m.NodeAffinity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n139 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeStatus) Marshal() (dAtA []byte, err error) { +func (m *EndpointPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - return i, nil -} - -func (m *PhotonPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PhotonPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PdID))) - i += copy(dAtA[i:], m.PdID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - return i, nil + return len(dAtA) - i, nil } -func (m *Pod) Marshal() (dAtA []byte, err error) { +func (m *EndpointSubset) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Pod) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EndpointSubset) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointSubset) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n140, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - i += n140 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n141, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.NotReadyAddresses) > 0 { + for iNdEx := len(m.NotReadyAddresses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NotReadyAddresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - i += n141 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n142, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - i += n142 - return i, nil + return len(dAtA) - i, nil } -func (m *PodAffinity) Marshal() (dAtA []byte, err error) { +func (m *Endpoints) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodAffinity) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Endpoints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Endpoints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Subsets) > 0 { + for iNdEx := len(m.Subsets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subsets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodAffinityTerm) Marshal() (dAtA []byte, err error) { +func (m *EndpointsList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EndpointsList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.LabelSelector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LabelSelector.Size())) - n143, err := m.LabelSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n143 } - if len(m.Namespaces) > 0 { - for _, s := range m.Namespaces { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey))) - i += copy(dAtA[i:], m.TopologyKey) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodAntiAffinity) Marshal() (dAtA []byte, err error) { +func (m *EnvFromSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodAntiAffinity) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EnvFromSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvFromSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.ConfigMapRef != nil { + { + size, err := m.ConfigMapRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodAttachOptions) Marshal() (dAtA []byte, err error) { +func (m *EnvVar) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodAttachOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x10 - i++ - if m.Stdout { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x18 - i++ - if m.Stderr { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x20 - i++ - if m.TTY { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) - i += copy(dAtA[i:], m.Container) - return i, nil -} - -func (m *PodCondition) Marshal() (dAtA []byte, err error) { +func (m *EnvVar) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EnvVar) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n144, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n144 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n145, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.ValueFrom != nil { + { + size, err := m.ValueFrom.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i += n145 - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodDNSConfig) Marshal() (dAtA []byte, err error) { +func (m *EnvVarSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodDNSConfig) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EnvVarSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvVarSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Nameservers) > 0 { - for _, s := range m.Nameservers { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.SecretKeyRef != nil { + { + size, err := m.SecretKeyRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - if len(m.Searches) > 0 { - for _, s := range m.Searches { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.ConfigMapKeyRef != nil { + { + size, err := m.ConfigMapKeyRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if len(m.Options) > 0 { - for _, msg := range m.Options { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.ResourceFieldRef != nil { + { + size, err := m.ResourceFieldRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.FieldRef != nil { + { + size, err := m.FieldRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *PodDNSConfigOption) Marshal() (dAtA []byte, err error) { +func (m *EphemeralContainer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodDNSConfigOption) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EphemeralContainer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EphemeralContainer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.Value != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) - i += copy(dAtA[i:], *m.Value) + i -= len(m.TargetContainerName) + copy(dAtA[i:], m.TargetContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetContainerName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.EphemeralContainerCommon.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodExecOptions) Marshal() (dAtA []byte, err error) { +func (m *EphemeralContainerCommon) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodExecOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EphemeralContainerCommon) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EphemeralContainerCommon) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.StartupProbe != nil { + { + size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - i++ - dAtA[i] = 0x10 - i++ - if m.Stdout { + if len(m.VolumeDevices) > 0 { + for iNdEx := len(m.VolumeDevices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeDevices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + } + i -= len(m.TerminationMessagePolicy) + copy(dAtA[i:], m.TerminationMessagePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + if len(m.EnvFrom) > 0 { + for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + } + i-- + if m.TTY { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x18 - i++ - if m.Stderr { + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + i-- + if m.StdinOnce { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x20 - i++ - if m.TTY { + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + i-- + if m.Stdin { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) - i += copy(dAtA[i:], m.Container) - if len(m.Command) > 0 { - for _, s := range m.Command { - dAtA[i] = 0x32 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x7a } - return i, nil -} - -func (m *PodList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n146, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n146 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i -= len(m.ImagePullPolicy) + copy(dAtA[i:], m.ImagePullPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy))) + i-- + dAtA[i] = 0x72 + i -= len(m.TerminationMessagePath) + copy(dAtA[i:], m.TerminationMessagePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath))) + i-- + dAtA[i] = 0x6a + if m.Lifecycle != nil { + { + size, err := m.Lifecycle.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x62 } - return i, nil -} - -func (m *PodLogOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) - i += copy(dAtA[i:], m.Container) - dAtA[i] = 0x10 - i++ - if m.Follow { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.ReadinessProbe != nil { + { + size, err := m.ReadinessProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a } - i++ - dAtA[i] = 0x18 - i++ - if m.Previous { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.LivenessProbe != nil { + { + size, err := m.LivenessProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 } - i++ - if m.SinceSeconds != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } } - if m.SinceTime != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SinceTime.Size())) - n147, err := m.SinceTime.MarshalTo(dAtA[i:]) + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n147 - } - dAtA[i] = 0x30 - i++ - if m.Timestamps { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.TailLines != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) - } - if m.LimitBytes != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil -} - -func (m *PodPortForwardOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err + i-- + dAtA[i] = 0x42 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } } - return dAtA[:n], nil -} - -func (m *PodPortForwardOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l if len(m.Ports) > 0 { - for _, num := range m.Ports { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(num)) + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } } - return i, nil -} - -func (m *PodProxyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err + i -= len(m.WorkingDir) + copy(dAtA[i:], m.WorkingDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir))) + i-- + dAtA[i] = 0x2a + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x22 + } } - return dAtA[:n], nil -} - -func (m *PodProxyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - return i, nil + return len(dAtA) - i, nil } -func (m *PodReadinessGate) Marshal() (dAtA []byte, err error) { +func (m *EphemeralContainers) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodReadinessGate) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConditionType))) - i += copy(dAtA[i:], m.ConditionType) - return i, nil -} - -func (m *PodSecurityContext) Marshal() (dAtA []byte, err error) { +func (m *EphemeralContainers) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EphemeralContainers) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.SELinuxOptions != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n148, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n148 - } - if m.RunAsUser != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - dAtA[i] = 0x18 - i++ - if *m.RunAsNonRoot { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.SupplementalGroups) > 0 { - for _, num := range m.SupplementalGroups { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(num)) - } - } - if m.FSGroup != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.FSGroup)) - } - if m.RunAsGroup != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) - } - if len(m.Sysctls) > 0 { - for _, msg := range m.Sysctls { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.EphemeralContainers) > 0 { + for iNdEx := len(m.EphemeralContainers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EphemeralContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil -} - -func (m *PodSignature) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PodController != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodController.Size())) - n149, err := m.PodController.MarshalTo(dAtA[i:]) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n149 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodSpec) Marshal() (dAtA []byte, err error) { +func (m *Event) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Volumes) > 0 { - for _, msg := range m.Volumes { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i -= len(m.ReportingInstance) + copy(dAtA[i:], m.ReportingInstance) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) + i-- + dAtA[i] = 0x7a + i -= len(m.ReportingController) + copy(dAtA[i:], m.ReportingController) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) + i-- + dAtA[i] = 0x72 + if m.Related != nil { + { + size, err := m.Related.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x6a } - if len(m.Containers) > 0 { - for _, msg := range m.Containers { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i -= len(m.Action) + copy(dAtA[i:], m.Action) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i-- + dAtA[i] = 0x62 + if m.Series != nil { + { + size, err := m.Series.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x5a } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy))) - i += copy(dAtA[i:], m.RestartPolicy) - if m.TerminationGracePeriodSeconds != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminationGracePeriodSeconds)) - } - if m.ActiveDeadlineSeconds != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) - } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSPolicy))) - i += copy(dAtA[i:], m.DNSPolicy) - if len(m.NodeSelector) > 0 { - keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) - for k := range m.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - for _, k := range keysForNodeSelector { - dAtA[i] = 0x3a - i++ - v := m.NodeSelector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + { + size, err := m.EventTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) - i += copy(dAtA[i:], m.ServiceAccountName) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedServiceAccount))) - i += copy(dAtA[i:], m.DeprecatedServiceAccount) + i-- dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i += copy(dAtA[i:], m.NodeName) - dAtA[i] = 0x58 - i++ - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x60 - i++ - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x68 - i++ - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecurityContext != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n150, err := m.SecurityContext.MarshalTo(dAtA[i:]) + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x4a + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x40 + { + size, err := m.LastTimestamp.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n150 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.ImagePullSecrets) > 0 { - for _, msg := range m.ImagePullSecrets { - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) - i += copy(dAtA[i:], m.Hostname) - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) - i += copy(dAtA[i:], m.Subdomain) - if m.Affinity != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Affinity.Size())) - n151, err := m.Affinity.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x3a + { + size, err := m.FirstTimestamp.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n151 - } - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) - i += copy(dAtA[i:], m.SchedulerName) - if len(m.InitContainers) > 0 { - for _, msg := range m.InitContainers { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.AutomountServiceAccountToken != nil { - dAtA[i] = 0xa8 - i++ - dAtA[i] = 0x1 - i++ - if *m.AutomountServiceAccountToken { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.Tolerations) > 0 { - for _, msg := range m.Tolerations { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.HostAliases) > 0 { - for _, msg := range m.HostAliases { - dAtA[i] = 0xba - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0xc2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) - i += copy(dAtA[i:], m.PriorityClassName) - if m.Priority != nil { - dAtA[i] = 0xc8 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.DNSConfig != nil { - dAtA[i] = 0xd2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DNSConfig.Size())) - n152, err := m.DNSConfig.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x32 + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n152 - } - if m.ShareProcessNamespace != nil { - dAtA[i] = 0xd8 - i++ - dAtA[i] = 0x1 - i++ - if *m.ShareProcessNamespace { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.ReadinessGates) > 0 { - for _, msg := range m.ReadinessGates { - dAtA[i] = 0xe2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i-- + dAtA[i] = 0x2a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + { + size, err := m.InvolvedObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.RuntimeClassName != nil { - dAtA[i] = 0xea - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RuntimeClassName))) - i += copy(dAtA[i:], *m.RuntimeClassName) - } - if m.EnableServiceLinks != nil { - dAtA[i] = 0xf0 - i++ - dAtA[i] = 0x1 - i++ - if *m.EnableServiceLinks { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodStatus) Marshal() (dAtA []byte, err error) { +func (m *EventList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EventList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) - i += copy(dAtA[i:], m.HostIP) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodIP))) - i += copy(dAtA[i:], m.PodIP) - if m.StartTime != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) - n153, err := m.StartTime.MarshalTo(dAtA[i:]) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n153 - } - if len(m.ContainerStatuses) > 0 { - for _, msg := range m.ContainerStatuses { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.QOSClass))) - i += copy(dAtA[i:], m.QOSClass) - if len(m.InitContainerStatuses) > 0 { - for _, msg := range m.InitContainerStatuses { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NominatedNodeName))) - i += copy(dAtA[i:], m.NominatedNodeName) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodStatusResult) Marshal() (dAtA []byte, err error) { +func (m *EventSeries) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n154, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x1a + { + size, err := m.LastObservedTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n154 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n155, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n155 - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *PodTemplate) Marshal() (dAtA []byte, err error) { +func (m *EventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EventSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n156, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n156 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n157, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n157 - return i, nil + i -= len(m.Component) + copy(dAtA[i:], m.Component) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Component))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodTemplateList) Marshal() (dAtA []byte, err error) { +func (m *ExecAction) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ExecAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n158, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n158 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } -func (m *PodTemplateSpec) Marshal() (dAtA []byte, err error) { +func (m *FCVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n159, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n159 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n160, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n160 - return i, nil -} - -func (m *PortworxVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *FCVolumeSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PortworxVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *FCVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i += copy(dAtA[i:], m.VolumeID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ + if len(m.WWIDs) > 0 { + for iNdEx := len(m.WWIDs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.WWIDs[iNdEx]) + copy(dAtA[i:], m.WWIDs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WWIDs[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x20 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + if m.Lun != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Lun)) + i-- + dAtA[i] = 0x10 + } + if len(m.TargetWWNs) > 0 { + for iNdEx := len(m.TargetWWNs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TargetWWNs[iNdEx]) + copy(dAtA[i:], m.TargetWWNs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetWWNs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *Preconditions) Marshal() (dAtA []byte, err error) { +func (m *FlexPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *FlexPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlexPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.UID != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) - i += copy(dAtA[i:], *m.UID) + if len(m.Options) > 0 { + keysForOptions := make([]string, 0, len(m.Options)) + for k := range m.Options { + keysForOptions = append(keysForOptions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + for iNdEx := len(keysForOptions) - 1; iNdEx >= 0; iNdEx-- { + v := m.Options[string(keysForOptions[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForOptions[iNdEx]) + copy(dAtA[i:], keysForOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOptions[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - return i, nil + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PreferAvoidPodsEntry) Marshal() (dAtA []byte, err error) { +func (m *FlexVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlexVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSignature.Size())) - n161, err := m.PodSignature.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Options) > 0 { + keysForOptions := make([]string, 0, len(m.Options)) + for k := range m.Options { + keysForOptions = append(keysForOptions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + for iNdEx := len(keysForOptions) - 1; iNdEx >= 0; iNdEx-- { + v := m.Options[string(keysForOptions[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForOptions[iNdEx]) + copy(dAtA[i:], keysForOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOptions[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } } - i += n161 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EvictionTime.Size())) - n162, err := m.EvictionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n162 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x20 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PreferredSchedulingTerm) Marshal() (dAtA []byte, err error) { +func (m *FlockerVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *FlockerVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlockerVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) + i -= len(m.DatasetUUID) + copy(dAtA[i:], m.DatasetUUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetUUID))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Preference.Size())) - n163, err := m.Preference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n163 - return i, nil + i -= len(m.DatasetName) + copy(dAtA[i:], m.DatasetName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Probe) Marshal() (dAtA []byte, err error) { +func (m *GCEPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Probe) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GCEPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GCEPersistentDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Handler.Size())) - n164, err := m.Handler.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n164 - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TimeoutSeconds)) + i-- dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PeriodSeconds)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SuccessThreshold)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FailureThreshold)) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.PDName) + copy(dAtA[i:], m.PDName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PDName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ProjectedVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *GitRepoVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ProjectedVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GitRepoVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitRepoVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Sources) > 0 { - for _, msg := range m.Sources { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.DefaultMode != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - } - return i, nil + i -= len(m.Directory) + copy(dAtA[i:], m.Directory) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Directory))) + i-- + dAtA[i] = 0x1a + i -= len(m.Revision) + copy(dAtA[i:], m.Revision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Revision))) + i-- + dAtA[i] = 0x12 + i -= len(m.Repository) + copy(dAtA[i:], m.Repository) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Repository))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *QuobyteVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *GlusterfsPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *QuobyteVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GlusterfsPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GlusterfsPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Registry))) - i += copy(dAtA[i:], m.Registry) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volume))) - i += copy(dAtA[i:], m.Volume) - dAtA[i] = 0x18 - i++ + if m.EndpointsNamespace != nil { + i -= len(*m.EndpointsNamespace) + copy(dAtA[i:], *m.EndpointsNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.EndpointsNamespace))) + i-- + dAtA[i] = 0x22 + } + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tenant))) - i += copy(dAtA[i:], m.Tenant) - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.EndpointsName) + copy(dAtA[i:], m.EndpointsName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *RBDPersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *GlusterfsVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GlusterfsVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GlusterfsVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) - i += copy(dAtA[i:], m.RBDImage) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) - i += copy(dAtA[i:], m.RBDPool) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) - i += copy(dAtA[i:], m.RadosUser) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) - i += copy(dAtA[i:], m.Keyring) - if m.SecretRef != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n165, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n165 - } - dAtA[i] = 0x40 - i++ + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.EndpointsName) + copy(dAtA[i:], m.EndpointsName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *RBDVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *HTTPGetAction) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPGetAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.HTTPHeaders) > 0 { + for iNdEx := len(m.HTTPHeaders) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HTTPHeaders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x2a } } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) - i += copy(dAtA[i:], m.RBDImage) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) + i -= len(m.Scheme) + copy(dAtA[i:], m.Scheme) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scheme))) + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) - i += copy(dAtA[i:], m.RBDPool) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) - i += copy(dAtA[i:], m.RadosUser) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) - i += copy(dAtA[i:], m.Keyring) - if m.SecretRef != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n166, err := m.SecretRef.MarshalTo(dAtA[i:]) + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n166 - } - dAtA[i] = 0x40 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i++ - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *RangeAllocation) Marshal() (dAtA []byte, err error) { +func (m *HTTPHeader) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *HTTPHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n167, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n167 + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) - i += copy(dAtA[i:], m.Range) - if m.Data != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) - } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicationController) Marshal() (dAtA []byte, err error) { +func (m *Handler) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Handler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Handler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n168, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.TCPSocket != nil { + { + size, err := m.TCPSocket.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i += n168 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n169, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.HTTPGet != nil { + { + size, err := m.HTTPGet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n169 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n170, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Exec != nil { + { + size, err := m.Exec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n170 - return i, nil + return len(dAtA) - i, nil } -func (m *ReplicationControllerCondition) Marshal() (dAtA []byte, err error) { +func (m *HostAlias) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *HostAlias) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostAlias) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n171, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Hostnames) > 0 { + for iNdEx := len(m.Hostnames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Hostnames[iNdEx]) + copy(dAtA[i:], m.Hostnames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostnames[iNdEx]))) + i-- + dAtA[i] = 0x12 + } } - i += n171 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicationControllerList) Marshal() (dAtA []byte, err error) { +func (m *HostPathVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *HostPathVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostPathVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n172, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n172 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + if m.Type != nil { + i -= len(*m.Type) + copy(dAtA[i:], *m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicationControllerSpec) Marshal() (dAtA []byte, err error) { +func (m *ISCSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ISCSIPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.InitiatorName != nil { + i -= len(*m.InitiatorName) + copy(dAtA[i:], *m.InitiatorName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) + i-- + dAtA[i] = 0x62 } - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { - dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + i-- + if m.SessionCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x52 } - if m.Template != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n173, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.DiscoveryCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if len(m.Portals) > 0 { + for iNdEx := len(m.Portals) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Portals[iNdEx]) + copy(dAtA[i:], m.Portals[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Portals[iNdEx]))) + i-- + dAtA[i] = 0x3a } - i += n173 } - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - return i, nil + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x2a + i -= len(m.ISCSIInterface) + copy(dAtA[i:], m.ISCSIInterface) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) + i-- + dAtA[i] = 0x18 + i -= len(m.IQN) + copy(dAtA[i:], m.IQN) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) + i-- + dAtA[i] = 0x12 + i -= len(m.TargetPortal) + copy(dAtA[i:], m.TargetPortal) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicationControllerStatus) Marshal() (dAtA []byte, err error) { +func (m *ISCSIVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicationControllerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ISCSIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.InitiatorName != nil { + i -= len(*m.InitiatorName) + copy(dAtA[i:], *m.InitiatorName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) + i-- + dAtA[i] = 0x62 + } + i-- + if m.SessionCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + i-- + if m.DiscoveryCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if len(m.Portals) > 0 { + for iNdEx := len(m.Portals) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Portals[iNdEx]) + copy(dAtA[i:], m.Portals[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Portals[iNdEx]))) + i-- + dAtA[i] = 0x3a } } - return i, nil + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x2a + i -= len(m.ISCSIInterface) + copy(dAtA[i:], m.ISCSIInterface) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) + i-- + dAtA[i] = 0x18 + i -= len(m.IQN) + copy(dAtA[i:], m.IQN) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) + i-- + dAtA[i] = 0x12 + i -= len(m.TargetPortal) + copy(dAtA[i:], m.TargetPortal) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ResourceFieldSelector) Marshal() (dAtA []byte, err error) { +func (m *KeyToPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *KeyToPath) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KeyToPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) - i += copy(dAtA[i:], m.ContainerName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Divisor.Size())) - n174, err := m.Divisor.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Mode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) + i-- + dAtA[i] = 0x18 } - i += n174 - return i, nil + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ResourceQuota) Marshal() (dAtA []byte, err error) { +func (m *Lifecycle) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Lifecycle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n175, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n175 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n176, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n176 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n177, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n177 - return i, nil -} - -func (m *ResourceQuotaList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n178, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PreStop != nil { + { + size, err := m.PreStop.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n178 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.PostStart != nil { + { + size, err := m.PostStart.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *ResourceQuotaSpec) Marshal() (dAtA []byte, err error) { +func (m *LimitRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Hard) > 0 { - keysForHard := make([]string, 0, len(m.Hard)) - for k := range m.Hard { - keysForHard = append(keysForHard, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - for _, k := range keysForHard { - dAtA[i] = 0xa - i++ - v := m.Hard[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n179, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n179 - } - } - if len(m.Scopes) > 0 { - for _, s := range m.Scopes { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ScopeSelector != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScopeSelector.Size())) - n180, err := m.ScopeSelector.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n180 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ResourceQuotaStatus) Marshal() (dAtA []byte, err error) { +func (m *LimitRangeItem) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitRangeItem) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Hard) > 0 { - keysForHard := make([]string, 0, len(m.Hard)) - for k := range m.Hard { - keysForHard = append(keysForHard, string(k)) + if len(m.MaxLimitRequestRatio) > 0 { + keysForMaxLimitRequestRatio := make([]string, 0, len(m.MaxLimitRequestRatio)) + for k := range m.MaxLimitRequestRatio { + keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - for _, k := range keysForHard { + github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) + for iNdEx := len(keysForMaxLimitRequestRatio) - 1; iNdEx >= 0; iNdEx-- { + v := m.MaxLimitRequestRatio[ResourceName(keysForMaxLimitRequestRatio[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForMaxLimitRequestRatio[iNdEx]) + copy(dAtA[i:], keysForMaxLimitRequestRatio[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMaxLimitRequestRatio[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - v := m.Hard[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.DefaultRequest) > 0 { + keysForDefaultRequest := make([]string, 0, len(m.DefaultRequest)) + for k := range m.DefaultRequest { + keysForDefaultRequest = append(keysForDefaultRequest, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) + for iNdEx := len(keysForDefaultRequest) - 1; iNdEx >= 0; iNdEx-- { + v := m.DefaultRequest[ResourceName(keysForDefaultRequest[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForDefaultRequest[iNdEx]) + copy(dAtA[i:], keysForDefaultRequest[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefaultRequest[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Default) > 0 { + keysForDefault := make([]string, 0, len(m.Default)) + for k := range m.Default { + keysForDefault = append(keysForDefault, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) + for iNdEx := len(keysForDefault) - 1; iNdEx >= 0; iNdEx-- { + v := m.Default[ResourceName(keysForDefault[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n181, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(keysForDefault[iNdEx]) + copy(dAtA[i:], keysForDefault[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefault[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Min) > 0 { + keysForMin := make([]string, 0, len(m.Min)) + for k := range m.Min { + keysForMin = append(keysForMin, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMin) + for iNdEx := len(keysForMin) - 1; iNdEx >= 0; iNdEx-- { + v := m.Min[ResourceName(keysForMin[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n181 + i-- + dAtA[i] = 0x12 + i -= len(keysForMin[iNdEx]) + copy(dAtA[i:], keysForMin[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMin[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a } } - if len(m.Used) > 0 { - keysForUsed := make([]string, 0, len(m.Used)) - for k := range m.Used { - keysForUsed = append(keysForUsed, string(k)) + if len(m.Max) > 0 { + keysForMax := make([]string, 0, len(m.Max)) + for k := range m.Max { + keysForMax = append(keysForMax, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) - for _, k := range keysForUsed { + github_com_gogo_protobuf_sortkeys.Strings(keysForMax) + for iNdEx := len(keysForMax) - 1; iNdEx >= 0; iNdEx-- { + v := m.Max[ResourceName(keysForMax[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - v := m.Used[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForMax[iNdEx]) + copy(dAtA[i:], keysForMax[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMax[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n182, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n182 } } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ResourceRequirements) Marshal() (dAtA []byte, err error) { +func (m *LimitRangeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitRangeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Limits) > 0 { - keysForLimits := make([]string, 0, len(m.Limits)) - for k := range m.Limits { - keysForLimits = append(keysForLimits, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) - for _, k := range keysForLimits { - dAtA[i] = 0xa - i++ - v := m.Limits[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n183, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n183 + i-- + dAtA[i] = 0x12 } } - if len(m.Requests) > 0 { - keysForRequests := make([]string, 0, len(m.Requests)) - for k := range m.Requests { - keysForRequests = append(keysForRequests, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) - for _, k := range keysForRequests { - dAtA[i] = 0x12 - i++ - v := m.Requests[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n184, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n184 + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SELinuxOptions) Marshal() (dAtA []byte, err error) { +func (m *LimitRangeSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SELinuxOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LimitRangeSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitRangeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Role))) - i += copy(dAtA[i:], m.Role) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) - i += copy(dAtA[i:], m.Level) - return i, nil + if len(m.Limits) > 0 { + for iNdEx := len(m.Limits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Limits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *ScaleIOPersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *List) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *List) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *List) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) - i += copy(dAtA[i:], m.Gateway) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) - i += copy(dAtA[i:], m.System) - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n185, err := m.SecretRef.MarshalTo(dAtA[i:]) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n185 - } - dAtA[i] = 0x20 - i++ - if m.SSLEnabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) - i += copy(dAtA[i:], m.ProtectionDomain) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) - i += copy(dAtA[i:], m.StoragePool) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) - i += copy(dAtA[i:], m.StorageMode) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x50 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i++ - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ScaleIOVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *LoadBalancerIngress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) - i += copy(dAtA[i:], m.Gateway) + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) - i += copy(dAtA[i:], m.System) - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n186, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n186 - } - dAtA[i] = 0x20 - i++ - if m.SSLEnabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) - i += copy(dAtA[i:], m.ProtectionDomain) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) - i += copy(dAtA[i:], m.StoragePool) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) - i += copy(dAtA[i:], m.StorageMode) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x50 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ScopeSelector) Marshal() (dAtA []byte, err error) { +func (m *LoadBalancerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ScopeSelector) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LoadBalancerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } -func (m *ScopedResourceSelectorRequirement) Marshal() (dAtA []byte, err error) { +func (m *LocalObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ScopedResourceSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LocalObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ScopeName))) - i += copy(dAtA[i:], m.ScopeName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i += copy(dAtA[i:], m.Operator) - if len(m.Values) > 0 { - for _, s := range m.Values { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil + return len(dAtA) - i, nil } -func (m *Secret) Marshal() (dAtA []byte, err error) { +func (m *LocalVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Secret) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LocalVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n187, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n187 - if len(m.Data) > 0 { - keysForData := make([]string, 0, len(m.Data)) - for k := range m.Data { - keysForData = append(keysForData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - for _, k := range keysForData { - dAtA[i] = 0x12 - i++ - v := m.Data[string(k)] - byteSize := 0 - if v != nil { - byteSize = 1 + len(v) + sovGenerated(uint64(len(v))) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + byteSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - if v != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if len(m.StringData) > 0 { - keysForStringData := make([]string, 0, len(m.StringData)) - for k := range m.StringData { - keysForStringData = append(keysForStringData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) - for _, k := range keysForStringData { - dAtA[i] = 0x22 - i++ - v := m.StringData[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } + if m.FSType != nil { + i -= len(*m.FSType) + copy(dAtA[i:], *m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretEnvSource) Marshal() (dAtA []byte, err error) { +func (m *NFSVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NFSVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n188, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n188 - if m.Optional != nil { - dAtA[i] = 0x10 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.Server) + copy(dAtA[i:], m.Server) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Server))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretKeySelector) Marshal() (dAtA []byte, err error) { +func (m *Namespace) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Namespace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n189, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n189 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - if m.Optional != nil { - dAtA[i] = 0x18 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretList) Marshal() (dAtA []byte, err error) { +func (m *NamespaceCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NamespaceCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n190, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n190 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretProjection) Marshal() (dAtA []byte, err error) { +func (m *NamespaceList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n191, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n191 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if m.Optional != nil { - dAtA[i] = 0x20 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretReference) Marshal() (dAtA []byte, err error) { +func (m *NamespaceSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecretReference) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NamespaceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + if len(m.Finalizers) > 0 { + for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Finalizers[iNdEx]) + copy(dAtA[i:], m.Finalizers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *SecretVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *NamespaceStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecretVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NamespaceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if m.DefaultMode != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - } - if m.Optional != nil { - dAtA[i] = 0x20 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecurityContext) Marshal() (dAtA []byte, err error) { +func (m *Node) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Node) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Node) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Capabilities != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Capabilities.Size())) - n192, err := m.Capabilities.MarshalTo(dAtA[i:]) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n192 - } - if m.Privileged != nil { - dAtA[i] = 0x10 - i++ - if *m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.SELinuxOptions != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n193, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n193 - } - if m.RunAsUser != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - dAtA[i] = 0x28 - i++ - if *m.RunAsNonRoot { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.ReadOnlyRootFilesystem != nil { - dAtA[i] = 0x30 - i++ - if *m.ReadOnlyRootFilesystem { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AllowPrivilegeEscalation != nil { - dAtA[i] = 0x38 - i++ - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i++ - } - if m.RunAsGroup != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) - } - if m.ProcMount != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ProcMount))) - i += copy(dAtA[i:], *m.ProcMount) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SerializedReference) Marshal() (dAtA []byte, err error) { +func (m *NodeAddress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Reference.Size())) - n194, err := m.Reference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n194 - return i, nil -} - -func (m *Service) Marshal() (dAtA []byte, err error) { +func (m *NodeAddress) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Service) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n195, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n195 + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Address))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n196, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n196 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n197, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n197 - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ServiceAccount) Marshal() (dAtA []byte, err error) { +func (m *NodeAffinity) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n198, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n198 - if len(m.Secrets) > 0 { - for _, msg := range m.Secrets { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.PreferredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PreferredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.ImagePullSecrets) > 0 { - for _, msg := range m.ImagePullSecrets { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { + { + size, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n - } - } - if m.AutomountServiceAccountToken != nil { - dAtA[i] = 0x20 - i++ - if *m.AutomountServiceAccountToken { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i++ + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *ServiceAccountList) Marshal() (dAtA []byte, err error) { +func (m *NodeCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n199, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n199 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i-- + dAtA[i] = 0x22 + { + size, err := m.LastHeartbeatTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ServiceAccountTokenProjection) Marshal() (dAtA []byte, err error) { +func (m *NodeConfigSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ServiceAccountTokenProjection) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience))) - i += copy(dAtA[i:], m.Audience) - if m.ExpirationSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - return i, nil -} - -func (m *ServiceList) Marshal() (dAtA []byte, err error) { +func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeConfigSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n200, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n200 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.ConfigMap != nil { + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil -} - -func (m *ServicePort) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) - i += copy(dAtA[i:], m.Protocol) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetPort.Size())) - n201, err := m.TargetPort.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n201 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) - return i, nil + return len(dAtA) - i, nil } -func (m *ServiceProxyOptions) Marshal() (dAtA []byte, err error) { +func (m *NodeConfigStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ServiceProxyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - return i, nil -} - -func (m *ServiceSpec) Marshal() (dAtA []byte, err error) { +func (m *NodeConfigStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x22 + if m.LastKnownGood != nil { + { + size, err := m.LastKnownGood.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n - } - } - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { - dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIP))) - i += copy(dAtA[i:], m.ClusterIP) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if len(m.ExternalIPs) > 0 { - for _, s := range m.ExternalIPs { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.Active != nil { + { + size, err := m.Active.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SessionAffinity))) - i += copy(dAtA[i:], m.SessionAffinity) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerIP))) - i += copy(dAtA[i:], m.LoadBalancerIP) - if len(m.LoadBalancerSourceRanges) > 0 { - for _, s := range m.LoadBalancerSourceRanges { - dAtA[i] = 0x4a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.Assigned != nil { + { + size, err := m.Assigned.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalName))) - i += copy(dAtA[i:], m.ExternalName) - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalTrafficPolicy))) - i += copy(dAtA[i:], m.ExternalTrafficPolicy) - dAtA[i] = 0x60 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HealthCheckNodePort)) - dAtA[i] = 0x68 - i++ - if m.PublishNotReadyAddresses { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SessionAffinityConfig != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SessionAffinityConfig.Size())) - n202, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n202 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *ServiceStatus) Marshal() (dAtA []byte, err error) { +func (m *NodeDaemonEndpoints) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n203, err := m.LoadBalancer.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n203 - return i, nil -} - -func (m *SessionAffinityConfig) Marshal() (dAtA []byte, err error) { +func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeDaemonEndpoints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ClientIP != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClientIP.Size())) - n204, err := m.ClientIP.MarshalTo(dAtA[i:]) + { + size, err := m.KubeletEndpoint.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n204 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *StorageOSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *NodeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) - i += copy(dAtA[i:], m.VolumeNamespace) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n205, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n205 - } - return i, nil -} - -func (m *StorageOSVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) - i += copy(dAtA[i:], m.VolumeNamespace) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n206, err := m.SecretRef.MarshalTo(dAtA[i:]) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n206 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Sysctl) Marshal() (dAtA []byte, err error) { +func (m *NodeProxyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Sysctl) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeProxyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - return i, nil + return len(dAtA) - i, nil } -func (m *TCPSocketAction) Marshal() (dAtA []byte, err error) { +func (m *NodeResources) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n207, err := m.Port.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } } - i += n207 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - return i, nil + return len(dAtA) - i, nil } -func (m *Taint) Marshal() (dAtA []byte, err error) { +func (m *NodeSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Taint) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) - i += copy(dAtA[i:], m.Effect) - if m.TimeAdded != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TimeAdded.Size())) - n208, err := m.TimeAdded.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.NodeSelectorTerms) > 0 { + for iNdEx := len(m.NodeSelectorTerms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NodeSelectorTerms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n208 } - return i, nil + return len(dAtA) - i, nil } -func (m *Toleration) Marshal() (dAtA []byte, err error) { +func (m *NodeSelectorRequirement) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Toleration) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i += copy(dAtA[i:], m.Operator) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) - i += copy(dAtA[i:], m.Effect) - if m.TolerationSeconds != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds)) + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x1a + } } - return i, nil + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *TopologySelectorLabelRequirement) Marshal() (dAtA []byte, err error) { +func (m *NodeSelectorTerm) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *TopologySelectorLabelRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeSelectorTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSelectorTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - if len(m.Values) > 0 { - for _, s := range m.Values { + if len(m.MatchFields) > 0 { + for iNdEx := len(m.MatchFields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchFields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + } + } + if len(m.MatchExpressions) > 0 { + for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } -func (m *TopologySelectorTerm) Marshal() (dAtA []byte, err error) { +func (m *NodeSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *TopologySelectorTerm) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.MatchLabelExpressions) > 0 { - for _, msg := range m.MatchLabelExpressions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if len(m.PodCIDRs) > 0 { + for iNdEx := len(m.PodCIDRs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PodCIDRs[iNdEx]) + copy(dAtA[i:], m.PodCIDRs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDRs[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if m.ConfigSource != nil { + { + size, err := m.ConfigSource.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if len(m.Taints) > 0 { + for iNdEx := len(m.Taints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Taints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } } - return i, nil + i-- + if m.Unschedulable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i -= len(m.ProviderID) + copy(dAtA[i:], m.ProviderID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderID))) + i-- + dAtA[i] = 0x1a + i -= len(m.DoNotUse_ExternalID) + copy(dAtA[i:], m.DoNotUse_ExternalID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DoNotUse_ExternalID))) + i-- + dAtA[i] = 0x12 + i -= len(m.PodCIDR) + copy(dAtA[i:], m.PodCIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *TypedLocalObjectReference) Marshal() (dAtA []byte, err error) { +func (m *NodeStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *TypedLocalObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.APIGroup != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup))) - i += copy(dAtA[i:], *m.APIGroup) + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) + if len(m.VolumesAttached) > 0 { + for iNdEx := len(m.VolumesAttached) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumesAttached[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } + if len(m.VolumesInUse) > 0 { + for iNdEx := len(m.VolumesInUse) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VolumesInUse[iNdEx]) + copy(dAtA[i:], m.VolumesInUse[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumesInUse[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + { + size, err := m.NodeInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + { + size, err := m.DaemonEndpoints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + if len(m.Allocatable) > 0 { + keysForAllocatable := make([]string, 0, len(m.Allocatable)) + for k := range m.Allocatable { + keysForAllocatable = append(keysForAllocatable, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) + for iNdEx := len(keysForAllocatable) - 1; iNdEx >= 0; iNdEx-- { + v := m.Allocatable[ResourceName(keysForAllocatable[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForAllocatable[iNdEx]) + copy(dAtA[i:], keysForAllocatable[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatable[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *Volume) Marshal() (dAtA []byte, err error) { +func (m *NodeSystemInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Volume) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeSystemInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSystemInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + i -= len(m.Architecture) + copy(dAtA[i:], m.Architecture) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture))) + i-- + dAtA[i] = 0x52 + i -= len(m.OperatingSystem) + copy(dAtA[i:], m.OperatingSystem) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OperatingSystem))) + i-- + dAtA[i] = 0x4a + i -= len(m.KubeProxyVersion) + copy(dAtA[i:], m.KubeProxyVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeProxyVersion))) + i-- + dAtA[i] = 0x42 + i -= len(m.KubeletVersion) + copy(dAtA[i:], m.KubeletVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletVersion))) + i-- + dAtA[i] = 0x3a + i -= len(m.ContainerRuntimeVersion) + copy(dAtA[i:], m.ContainerRuntimeVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerRuntimeVersion))) + i-- + dAtA[i] = 0x32 + i -= len(m.OSImage) + copy(dAtA[i:], m.OSImage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OSImage))) + i-- + dAtA[i] = 0x2a + i -= len(m.KernelVersion) + copy(dAtA[i:], m.KernelVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KernelVersion))) + i-- + dAtA[i] = 0x22 + i -= len(m.BootID) + copy(dAtA[i:], m.BootID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BootID))) + i-- + dAtA[i] = 0x1a + i -= len(m.SystemUUID) + copy(dAtA[i:], m.SystemUUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SystemUUID))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.VolumeSource.Size())) - n209, err := m.VolumeSource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n209 - return i, nil + i -= len(m.MachineID) + copy(dAtA[i:], m.MachineID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MachineID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *VolumeDevice) Marshal() (dAtA []byte, err error) { +func (m *ObjectFieldSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VolumeDevice) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ObjectFieldSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectFieldSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + i -= len(m.FieldPath) + copy(dAtA[i:], m.FieldPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) - i += copy(dAtA[i:], m.DevicePath) - return i, nil + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *VolumeMount) Marshal() (dAtA []byte, err error) { +func (m *ObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.FieldPath) + copy(dAtA[i:], m.FieldPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) + i-- + dAtA[i] = 0x3a + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x32 + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x2a + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath))) - i += copy(dAtA[i:], m.MountPath) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPath))) - i += copy(dAtA[i:], m.SubPath) - if m.MountPropagation != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MountPropagation))) - i += copy(dAtA[i:], *m.MountPropagation) - } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPathExpr))) - i += copy(dAtA[i:], m.SubPathExpr) - return i, nil + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *VolumeNodeAffinity) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VolumeNodeAffinity) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Required != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Required.Size())) - n210, err := m.Required.MarshalTo(dAtA[i:]) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n210 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *VolumeProjection) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaim) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Secret != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n211, err := m.Secret.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n211 - } - if m.DownwardAPI != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n212, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n212 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ConfigMap != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n213, err := m.ConfigMap.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n213 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ServiceAccountToken != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ServiceAccountToken.Size())) - n214, err := m.ServiceAccountToken.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n214 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *VolumeSource) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaimCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.HostPath != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n215, err := m.HostPath.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n215 - } - if m.EmptyDir != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) - n216, err := m.EmptyDir.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n216 - } - if m.GCEPersistentDisk != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n217, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n217 - } - if m.AWSElasticBlockStore != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n218, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n218 - } - if m.GitRepo != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) - n219, err := m.GitRepo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n219 - } - if m.Secret != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n220, err := m.Secret.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n220 - } - if m.NFS != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n221, err := m.NFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n221 - } - if m.ISCSI != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n222, err := m.ISCSI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n222 - } - if m.Glusterfs != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n223, err := m.Glusterfs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n223 - } - if m.PersistentVolumeClaim != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) - n224, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n224 - } - if m.RBD != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n225, err := m.RBD.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n225 - } - if m.FlexVolume != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n226, err := m.FlexVolume.MarshalTo(dAtA[i:]) + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n226 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.Cinder != nil { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n227, err := m.Cinder.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n227 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.CephFS != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n228, err := m.CephFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n228 + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PersistentVolumeClaimList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.Flocker != nil { - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n229, err := m.Flocker.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n229 } - if m.DownwardAPI != nil { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n230, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n230 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.FC != nil { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n231, err := m.FC.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n231 + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PersistentVolumeClaimSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.AzureFile != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n232, err := m.AzureFile.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DataSource != nil { + { + size, err := m.DataSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n232 + i-- + dAtA[i] = 0x3a } - if m.ConfigMap != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n233, err := m.ConfigMap.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n233 + if m.VolumeMode != nil { + i -= len(*m.VolumeMode) + copy(dAtA[i:], *m.VolumeMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i-- + dAtA[i] = 0x32 } - if m.VsphereVolume != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n234, err := m.VsphereVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n234 + if m.StorageClassName != nil { + i -= len(*m.StorageClassName) + copy(dAtA[i:], *m.StorageClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) + i-- + dAtA[i] = 0x2a } - if m.Quobyte != nil { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n235, err := m.Quobyte.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n235 + i-- + dAtA[i] = 0x22 } - if m.AzureDisk != nil { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n236, err := m.AzureDisk.MarshalTo(dAtA[i:]) + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n236 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.PhotonPersistentDisk != nil { - dAtA[i] = 0xba - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n237, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x12 + if len(m.AccessModes) > 0 { + for iNdEx := len(m.AccessModes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AccessModes[iNdEx]) + copy(dAtA[i:], m.AccessModes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AccessModes[iNdEx]))) + i-- + dAtA[i] = 0xa } - i += n237 } - if m.PortworxVolume != nil { - dAtA[i] = 0xc2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n238, err := m.PortworxVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n238 + return len(dAtA) - i, nil +} + +func (m *PersistentVolumeClaimStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.ScaleIO != nil { - dAtA[i] = 0xca - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n239, err := m.ScaleIO.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - i += n239 } - if m.Projected != nil { - dAtA[i] = 0xd2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Projected.Size())) - n240, err := m.Projected.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) } - i += n240 - } - if m.StorageOS != nil { - dAtA[i] = 0xda - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n241, err := m.StorageOS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a } - i += n241 } - if m.CSI != nil { - dAtA[i] = 0xe2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size())) - n242, err := m.CSI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AccessModes) > 0 { + for iNdEx := len(m.AccessModes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AccessModes[iNdEx]) + copy(dAtA[i:], m.AccessModes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AccessModes[iNdEx]))) + i-- + dAtA[i] = 0x12 } - i += n242 } - return i, nil + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *VsphereVirtualDiskVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaimVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VsphereVirtualDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeClaimVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.ClaimName) + copy(dAtA[i:], m.ClaimName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClaimName))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumePath))) - i += copy(dAtA[i:], m.VolumePath) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyName))) - i += copy(dAtA[i:], m.StoragePolicyName) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyID))) - i += copy(dAtA[i:], m.StoragePolicyID) - return i, nil + return len(dAtA) - i, nil } -func (m *WeightedPodAffinityTerm) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinityTerm.Size())) - n243, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n243 - return i, nil + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *PersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return offset + 1 + return dAtA[:n], nil } -func (m *AWSElasticBlockStoreVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Partition)) - n += 2 - return n + +func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Affinity) Size() (n int) { +func (m *PersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.NodeAffinity != nil { - l = m.NodeAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.CSI != nil { + { + size, err := m.CSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - if m.PodAffinity != nil { - l = m.PodAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.StorageOS != nil { + { + size, err := m.StorageOS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa } - if m.PodAntiAffinity != nil { - l = m.PodAntiAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Local != nil { + { + size, err := m.Local.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 } - return n + if m.ScaleIO != nil { + { + size, err := m.ScaleIO.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if m.PortworxVolume != nil { + { + size, err := m.PortworxVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.PhotonPersistentDisk != nil { + { + size, err := m.PhotonPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.AzureDisk != nil { + { + size, err := m.AzureDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Quobyte != nil { + { + size, err := m.Quobyte.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + if m.VsphereVolume != nil { + { + size, err := m.VsphereVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.AzureFile != nil { + { + size, err := m.AzureFile.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + if m.FlexVolume != nil { + { + size, err := m.FlexVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if m.Flocker != nil { + { + size, err := m.Flocker.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.FC != nil { + { + size, err := m.FC.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.CephFS != nil { + { + size, err := m.CephFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.Cinder != nil { + { + size, err := m.Cinder.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.ISCSI != nil { + { + size, err := m.ISCSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.RBD != nil { + { + size, err := m.RBD.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.NFS != nil { + { + size, err := m.NFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Glusterfs != nil { + { + size, err := m.Glusterfs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.HostPath != nil { + { + size, err := m.HostPath.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.AWSElasticBlockStore != nil { + { + size, err := m.AWSElasticBlockStore.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.GCEPersistentDisk != nil { + { + size, err := m.GCEPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *AttachedVolume) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DevicePath) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PersistentVolumeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *AvoidPods) Size() (n int) { - var l int - _ = l - if len(m.PreferAvoidPods) > 0 { - for _, e := range m.PreferAvoidPods { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n +func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AzureDiskVolumeSource) Size() (n int) { +func (m *PersistentVolumeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.DiskName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DataDiskURI) - n += 1 + l + sovGenerated(uint64(l)) - if m.CachingMode != nil { - l = len(*m.CachingMode) - n += 1 + l + sovGenerated(uint64(l)) + if m.NodeAffinity != nil { + { + size, err := m.NodeAffinity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a } - if m.FSType != nil { - l = len(*m.FSType) - n += 1 + l + sovGenerated(uint64(l)) + if m.VolumeMode != nil { + i -= len(*m.VolumeMode) + copy(dAtA[i:], *m.VolumeMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i-- + dAtA[i] = 0x42 } - if m.ReadOnly != nil { - n += 2 + if len(m.MountOptions) > 0 { + for iNdEx := len(m.MountOptions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MountOptions[iNdEx]) + copy(dAtA[i:], m.MountOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountOptions[iNdEx]))) + i-- + dAtA[i] = 0x3a + } } - if m.Kind != nil { - l = len(*m.Kind) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.StorageClassName) + copy(dAtA[i:], m.StorageClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageClassName))) + i-- + dAtA[i] = 0x32 + i -= len(m.PersistentVolumeReclaimPolicy) + copy(dAtA[i:], m.PersistentVolumeReclaimPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PersistentVolumeReclaimPolicy))) + i-- + dAtA[i] = 0x2a + if m.ClaimRef != nil { + { + size, err := m.ClaimRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - return n + if len(m.AccessModes) > 0 { + for iNdEx := len(m.AccessModes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AccessModes[iNdEx]) + copy(dAtA[i:], m.AccessModes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AccessModes[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.PersistentVolumeSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *AzureFilePersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ShareName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretNamespace != nil { - l = len(*m.SecretNamespace) - n += 1 + l + sovGenerated(uint64(l)) +func (m *PersistentVolumeStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *AzureFileVolumeSource) Size() (n int) { +func (m *PersistentVolumeStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ShareName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Binding) Size() (n int) { +func (m *PhotonPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PhotonPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PhotonPersistentDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Target.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.PdID) + copy(dAtA[i:], m.PdID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PdID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *CSIPersistentVolumeSource) Size() (n int) { +func (m *Pod) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Pod) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Pod) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeHandle) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.VolumeAttributes) > 0 { - for k, v := range m.VolumeAttributes { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ControllerPublishSecretRef != nil { - l = m.ControllerPublishSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.NodeStageSecretRef != nil { - l = m.NodeStageSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.NodePublishSecretRef != nil { - l = m.NodePublishSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *CSIVolumeSource) Size() (n int) { +func (m *PodAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - if m.ReadOnly != nil { - n += 2 - } - if m.FSType != nil { - l = len(*m.FSType) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.PreferredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PreferredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - if len(m.VolumeAttributes) > 0 { - for k, v := range m.VolumeAttributes { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.RequiredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RequiredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - if m.NodePublishSecretRef != nil { - l = m.NodePublishSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *PodAffinityTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Capabilities) Size() (n int) { +func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodAffinityTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Add) > 0 { - for _, s := range m.Add { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.TopologyKey) + copy(dAtA[i:], m.TopologyKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey))) + i-- + dAtA[i] = 0x1a + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - if len(m.Drop) > 0 { - for _, s := range m.Drop { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *CephFSPersistentVolumeSource) Size() (n int) { +func (m *PodAntiAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodAntiAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodAntiAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.PreferredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PreferredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SecretFile) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.RequiredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RequiredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - n += 2 - return n + return len(dAtA) - i, nil } -func (m *CephFSVolumeSource) Size() (n int) { - var l int - _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SecretFile) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *PodAttachOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - n += 2 - return n + return dAtA[:n], nil } -func (m *CinderPersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n +func (m *PodAttachOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *CinderVolumeSource) Size() (n int) { +func (m *PodAttachOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x2a + i-- + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return n + i-- + dAtA[i] = 0x20 + i-- + if m.Stderr { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Stdout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i-- + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *ClientIPConfig) Size() (n int) { - var l int - _ = l - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) +func (m *PodCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ComponentCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Error) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ComponentStatus) Size() (n int) { +func (m *PodCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x22 + { + size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ComponentStatusList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *PodDNSConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ConfigMap) Size() (n int) { +func (m *PodDNSConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDNSConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Data) > 0 { - for k, v := range m.Data { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - if len(m.BinaryData) > 0 { - for k, v := range m.BinaryData { - _ = k - _ = v - l = 0 - if v != nil { - l = 1 + len(v) + sovGenerated(uint64(len(v))) - } - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.Searches) > 0 { + for iNdEx := len(m.Searches) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Searches[iNdEx]) + copy(dAtA[i:], m.Searches[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Searches[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - return n + if len(m.Nameservers) > 0 { + for iNdEx := len(m.Nameservers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Nameservers[iNdEx]) + copy(dAtA[i:], m.Nameservers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Nameservers[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *ConfigMapEnvSource) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 +func (m *PodDNSConfigOption) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ConfigMapKeySelector) Size() (n int) { +func (m *PodDNSConfigOption) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDNSConfigOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 + if m.Value != nil { + i -= len(*m.Value) + copy(dAtA[i:], *m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) + i-- + dAtA[i] = 0x12 } - return n + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ConfigMapList) Size() (n int) { +func (m *PodExecOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodExecOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodExecOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0x32 } } - return n + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x2a + i-- + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i-- + if m.Stderr { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Stdout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i-- + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *ConfigMapNodeConfigSource) Size() (n int) { - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KubeletConfigKey) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodIP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ConfigMapProjection) Size() (n int) { +func (m *PodIP) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodIP) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Optional != nil { - n += 2 + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ConfigMapVolumeSource) Size() (n int) { +func (m *PodList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.Optional != nil { - n += 2 + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodLogOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Container) Size() (n int) { +func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Image) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Args) > 0 { - for _, s := range m.Args { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + if m.LimitBytes != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) + i-- + dAtA[i] = 0x40 } - l = len(m.WorkingDir) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.TailLines != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) + i-- + dAtA[i] = 0x38 } - if len(m.Env) > 0 { - for _, e := range m.Env { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + i-- + if m.Timestamps { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - l = m.Resources.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.VolumeMounts) > 0 { - for _, e := range m.VolumeMounts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x30 + if m.SinceTime != nil { + { + size, err := m.SinceTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - if m.LivenessProbe != nil { - l = m.LivenessProbe.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ReadinessProbe != nil { - l = m.ReadinessProbe.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Lifecycle != nil { - l = m.Lifecycle.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.SinceSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) + i-- + dAtA[i] = 0x20 } - l = len(m.TerminationMessagePath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ImagePullPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + if m.Previous { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - n += 3 - n += 3 - n += 3 - if len(m.EnvFrom) > 0 { - for _, e := range m.EnvFrom { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + i-- + dAtA[i] = 0x18 + i-- + if m.Follow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - l = len(m.TerminationMessagePolicy) - n += 2 + l + sovGenerated(uint64(l)) - if len(m.VolumeDevices) > 0 { - for _, e := range m.VolumeDevices { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + i-- + dAtA[i] = 0x10 + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodPortForwardOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ContainerImage) Size() (n int) { +func (m *PodPortForwardOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodPortForwardOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Names) > 0 { - for _, s := range m.Names { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintGenerated(dAtA, i, uint64(m.Ports[iNdEx])) + i-- + dAtA[i] = 0x8 } } - n += 1 + sovGenerated(uint64(m.SizeBytes)) - return n + return len(dAtA) - i, nil } -func (m *ContainerPort) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.HostPort)) - n += 1 + sovGenerated(uint64(m.ContainerPort)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.HostIP) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodProxyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ContainerState) Size() (n int) { +func (m *PodProxyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Waiting != nil { - l = m.Waiting.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Running != nil { - l = m.Running.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Terminated != nil { - l = m.Terminated.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ContainerStateRunning) Size() (n int) { - var l int - _ = l - l = m.StartedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ContainerStateTerminated) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ExitCode)) - n += 1 + sovGenerated(uint64(m.Signal)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.StartedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FinishedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContainerID) - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ContainerStateWaiting) Size() (n int) { - var l int - _ = l - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodReadinessGate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ContainerStatus) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.State.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTerminationState.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 1 + sovGenerated(uint64(m.RestartCount)) - l = len(m.Image) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ImageID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContainerID) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodReadinessGate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *DaemonEndpoint) Size() (n int) { +func (m *PodReadinessGate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - n += 1 + sovGenerated(uint64(m.Port)) - return n + i -= len(m.ConditionType) + copy(dAtA[i:], m.ConditionType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConditionType))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *DownwardAPIProjection) Size() (n int) { - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *PodSecurityContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *DownwardAPIVolumeFile) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.FieldRef != nil { - l = m.FieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ResourceFieldRef != nil { - l = m.ResourceFieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Mode != nil { - n += 1 + sovGenerated(uint64(*m.Mode)) - } - return n +func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *DownwardAPIVolumeSource) Size() (n int) { +func (m *PodSecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.WindowsOptions != nil { + { + size, err := m.WindowsOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x42 } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) - } - return n -} - -func (m *EmptyDirVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Medium) - n += 1 + l + sovGenerated(uint64(l)) - if m.SizeLimit != nil { - l = m.SizeLimit.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Sysctls) > 0 { + for iNdEx := len(m.Sysctls) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Sysctls[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } } - return n -} - -func (m *EndpointAddress) Size() (n int) { - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - if m.TargetRef != nil { - l = m.TargetRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.RunAsGroup != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) + i-- + dAtA[i] = 0x30 } - l = len(m.Hostname) - n += 1 + l + sovGenerated(uint64(l)) - if m.NodeName != nil { - l = len(*m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) + if m.FSGroup != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.FSGroup)) + i-- + dAtA[i] = 0x28 } - return n -} - -func (m *EndpointPort) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Port)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *EndpointSubset) Size() (n int) { - var l int - _ = l - if len(m.Addresses) > 0 { - for _, e := range m.Addresses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.SupplementalGroups) > 0 { + for iNdEx := len(m.SupplementalGroups) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups[iNdEx])) + i-- + dAtA[i] = 0x20 } } - if len(m.NotReadyAddresses) > 0 { - for _, e := range m.NotReadyAddresses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.RunAsNonRoot != nil { + i-- + if *m.RunAsNonRoot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x18 } - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.RunAsUser != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) + i-- + dAtA[i] = 0x10 } - return n -} - -func (m *Endpoints) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Subsets) > 0 { - for _, e := range m.Subsets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.SELinuxOptions != nil { + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *EndpointsList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *PodSignature) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *EnvFromSource) Size() (n int) { - var l int - _ = l - l = len(m.Prefix) - n += 1 + l + sovGenerated(uint64(l)) - if m.ConfigMapRef != nil { - l = m.ConfigMapRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n +func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *EnvVar) Size() (n int) { +func (m *PodSignature) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - if m.ValueFrom != nil { - l = m.ValueFrom.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.PodController != nil { + { + size, err := m.PodController.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *EnvVarSource) Size() (n int) { - var l int - _ = l - if m.FieldRef != nil { - l = m.FieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ResourceFieldRef != nil { - l = m.ResourceFieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ConfigMapKeyRef != nil { - l = m.ConfigMapKeyRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SecretKeyRef != nil { - l = m.SecretKeyRef.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *PodSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Event) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.InvolvedObject.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Source.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FirstTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Count)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = m.EventTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Series != nil { - l = m.Series.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Action) - n += 1 + l + sovGenerated(uint64(l)) - if m.Related != nil { - l = m.Related.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.ReportingController) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ReportingInstance) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *EventList) Size() (n int) { +func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.EphemeralContainers) > 0 { + for iNdEx := len(m.EphemeralContainers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EphemeralContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 } } - return n -} - -func (m *EventSeries) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Count)) - l = m.LastObservedTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.State) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *EventSource) Size() (n int) { - var l int - _ = l - l = len(m.Component) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ExecAction) Size() (n int) { - var l int - _ = l - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.TopologySpreadConstraints) > 0 { + for iNdEx := len(m.TopologySpreadConstraints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TopologySpreadConstraints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a } } - return n -} - -func (m *FCVolumeSource) Size() (n int) { - var l int - _ = l - if len(m.TargetWWNs) > 0 { - for _, s := range m.TargetWWNs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Overhead) > 0 { + keysForOverhead := make([]string, 0, len(m.Overhead)) + for k := range m.Overhead { + keysForOverhead = append(keysForOverhead, string(k)) } + github_com_gogo_protobuf_sortkeys.Strings(keysForOverhead) + for iNdEx := len(keysForOverhead) - 1; iNdEx >= 0; iNdEx-- { + v := m.Overhead[ResourceName(keysForOverhead[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForOverhead[iNdEx]) + copy(dAtA[i:], keysForOverhead[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOverhead[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + } + if m.PreemptionPolicy != nil { + i -= len(*m.PreemptionPolicy) + copy(dAtA[i:], *m.PreemptionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa } - if m.Lun != nil { - n += 1 + sovGenerated(uint64(*m.Lun)) + if m.EnableServiceLinks != nil { + i-- + if *m.EnableServiceLinks { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf0 } - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.WWIDs) > 0 { - for _, s := range m.WWIDs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.RuntimeClassName != nil { + i -= len(*m.RuntimeClassName) + copy(dAtA[i:], *m.RuntimeClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RuntimeClassName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + if len(m.ReadinessGates) > 0 { + for iNdEx := len(m.ReadinessGates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ReadinessGates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 } } - return n -} - -func (m *FlexPersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ShareProcessNamespace != nil { + i-- + if *m.ShareProcessNamespace { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 } - n += 2 - if len(m.Options) > 0 { - for k, v := range m.Options { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.DNSConfig != nil { + { + size, err := m.DNSConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 } - return n -} - -func (m *FlexVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc8 } - n += 2 - if len(m.Options) > 0 { - for k, v := range m.Options { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i -= len(m.PriorityClassName) + copy(dAtA[i:], m.PriorityClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + if len(m.HostAliases) > 0 { + for iNdEx := len(m.HostAliases) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HostAliases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba } } - return n -} - -func (m *FlockerVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.DatasetName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DatasetUUID) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GCEPersistentDiskVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.PDName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Partition)) - n += 2 - return n -} - -func (m *GitRepoVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Repository) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Revision) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Directory) - n += 1 + l + sovGenerated(uint64(l)) - return n + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + } + if m.AutomountServiceAccountToken != nil { + i-- + if *m.AutomountServiceAccountToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if len(m.InitContainers) > 0 { + for iNdEx := len(m.InitContainers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.InitContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } + i -= len(m.SchedulerName) + copy(dAtA[i:], m.SchedulerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + if m.Affinity != nil { + { + size, err := m.Affinity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + i -= len(m.Subdomain) + copy(dAtA[i:], m.Subdomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + if len(m.ImagePullSecrets) > 0 { + for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImagePullSecrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + } + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + i-- + if m.HostIPC { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x68 + i-- + if m.HostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + i-- + if m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i-- + dAtA[i] = 0x52 + i -= len(m.DeprecatedServiceAccount) + copy(dAtA[i:], m.DeprecatedServiceAccount) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedServiceAccount))) + i-- + dAtA[i] = 0x4a + i -= len(m.ServiceAccountName) + copy(dAtA[i:], m.ServiceAccountName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i-- + dAtA[i] = 0x42 + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + i -= len(m.DNSPolicy) + copy(dAtA[i:], m.DNSPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSPolicy))) + i-- + dAtA[i] = 0x32 + if m.ActiveDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + i-- + dAtA[i] = 0x28 + } + if m.TerminationGracePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminationGracePeriodSeconds)) + i-- + dAtA[i] = 0x20 + } + i -= len(m.RestartPolicy) + copy(dAtA[i:], m.RestartPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy))) + i-- + dAtA[i] = 0x1a + if len(m.Containers) > 0 { + for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *GlusterfsPersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.EndpointsName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.EndpointsNamespace != nil { - l = len(*m.EndpointsNamespace) - n += 1 + l + sovGenerated(uint64(l)) +func (m *PodStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *GlusterfsVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.EndpointsName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n +func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *HTTPGetAction) Size() (n int) { +func (m *PodStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Scheme) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.HTTPHeaders) > 0 { - for _, e := range m.HTTPHeaders { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.EphemeralContainerStatuses) > 0 { + for iNdEx := len(m.EphemeralContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EphemeralContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a } } - return n + if len(m.PodIPs) > 0 { + for iNdEx := len(m.PodIPs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PodIPs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } + i -= len(m.NominatedNodeName) + copy(dAtA[i:], m.NominatedNodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NominatedNodeName))) + i-- + dAtA[i] = 0x5a + if len(m.InitContainerStatuses) > 0 { + for iNdEx := len(m.InitContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.InitContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } + i -= len(m.QOSClass) + copy(dAtA[i:], m.QOSClass) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.QOSClass))) + i-- + dAtA[i] = 0x4a + if len(m.ContainerStatuses) > 0 { + for iNdEx := len(m.ContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.StartTime != nil { + { + size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i -= len(m.PodIP) + copy(dAtA[i:], m.PodIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodIP))) + i-- + dAtA[i] = 0x32 + i -= len(m.HostIP) + copy(dAtA[i:], m.HostIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *HTTPHeader) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodStatusResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *Handler) Size() (n int) { +func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodStatusResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Exec != nil { - l = m.Exec.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.HTTPGet != nil { - l = m.HTTPGet.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.TCPSocket != nil { - l = m.TCPSocket.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *HostAlias) Size() (n int) { +func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Hostnames) > 0 { - for _, s := range m.Hostnames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *HostPathVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.Type != nil { - l = len(*m.Type) - n += 1 + l + sovGenerated(uint64(l)) +func (m *PodTemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ISCSIPersistentVolumeSource) Size() (n int) { +func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.TargetPortal) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.IQN) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Lun)) - l = len(m.ISCSIInterface) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.Portals) > 0 { - for _, s := range m.Portals { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - n += 2 - if m.InitiatorName != nil { - l = len(*m.InitiatorName) - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodTemplateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ISCSIVolumeSource) Size() (n int) { +func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.TargetPortal) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.IQN) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Lun)) - l = len(m.ISCSIInterface) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.Portals) > 0 { - for _, s := range m.Portals { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - n += 2 - if m.InitiatorName != nil { - l = len(*m.InitiatorName) - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PortworxVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *KeyToPath) Size() (n int) { +func (m *PortworxVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PortworxVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.Mode != nil { - n += 1 + sovGenerated(uint64(*m.Mode)) + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return n + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeID) + copy(dAtA[i:], m.VolumeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Lifecycle) Size() (n int) { +func (m *Preconditions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Preconditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.PostStart != nil { - l = m.PostStart.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.UID != nil { + i -= len(*m.UID) + copy(dAtA[i:], *m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) + i-- + dAtA[i] = 0xa } - if m.PreStop != nil { - l = m.PreStop.Size() - n += 1 + l + sovGenerated(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *PreferAvoidPodsEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *LimitRange) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LimitRangeItem) Size() (n int) { +func (m *PreferAvoidPodsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Max) > 0 { - for k, v := range m.Max { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + { + size, err := m.EvictionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.Min) > 0 { - for k, v := range m.Min { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i-- + dAtA[i] = 0x12 + { + size, err := m.PodSignature.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.Default) > 0 { - for k, v := range m.Default { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.DefaultRequest) > 0 { - for k, v := range m.DefaultRequest { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.MaxLimitRequestRatio) > 0 { - for k, v := range m.MaxLimitRequestRatio { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *LimitRangeList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *PreferredSchedulingTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *LimitRangeSpec) Size() (n int) { +func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PreferredSchedulingTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Limits) > 0 { - for _, e := range m.Limits { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Preference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *List) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *Probe) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *LoadBalancerIngress) Size() (n int) { - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Hostname) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *Probe) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LoadBalancerStatus) Size() (n int) { +func (m *Probe) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Ingress) > 0 { - for _, e := range m.Ingress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i = encodeVarintGenerated(dAtA, i, uint64(m.FailureThreshold)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.SuccessThreshold)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.PeriodSeconds)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.TimeoutSeconds)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds)) + i-- + dAtA[i] = 0x10 + { + size, err := m.Handler.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *LocalObjectReference) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ProjectedVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *LocalVolumeSource) Size() (n int) { +func (m *ProjectedVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectedVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.FSType != nil { - l = len(*m.FSType) - n += 1 + l + sovGenerated(uint64(l)) + if m.DefaultMode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + i-- + dAtA[i] = 0x10 } - return n + if len(m.Sources) > 0 { + for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Sources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *NFSVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Server) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n +func (m *QuobyteVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *Namespace) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *QuobyteVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NamespaceList) Size() (n int) { +func (m *QuobyteVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + i -= len(m.Tenant) + copy(dAtA[i:], m.Tenant) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tenant))) + i-- + dAtA[i] = 0x32 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x2a + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x22 + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return n + i-- + dAtA[i] = 0x18 + i -= len(m.Volume) + copy(dAtA[i:], m.Volume) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volume))) + i-- + dAtA[i] = 0x12 + i -= len(m.Registry) + copy(dAtA[i:], m.Registry) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Registry))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NamespaceSpec) Size() (n int) { - var l int - _ = l - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *RBDPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *NamespaceStatus) Size() (n int) { - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Node) Size() (n int) { +func (m *RBDPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i -= len(m.Keyring) + copy(dAtA[i:], m.Keyring) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) + i-- + dAtA[i] = 0x32 + i -= len(m.RadosUser) + copy(dAtA[i:], m.RadosUser) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) + i-- + dAtA[i] = 0x2a + i -= len(m.RBDPool) + copy(dAtA[i:], m.RBDPool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) + i-- + dAtA[i] = 0x22 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + i -= len(m.RBDImage) + copy(dAtA[i:], m.RBDImage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) + i-- + dAtA[i] = 0x12 + if len(m.CephMonitors) > 0 { + for iNdEx := len(m.CephMonitors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CephMonitors[iNdEx]) + copy(dAtA[i:], m.CephMonitors[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CephMonitors[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *NodeAddress) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Address) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *RBDVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *NodeAffinity) Size() (n int) { +func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RBDVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { - l = m.RequiredDuringSchedulingIgnoredDuringExecution.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x40 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x3a } - return n + i -= len(m.Keyring) + copy(dAtA[i:], m.Keyring) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) + i-- + dAtA[i] = 0x32 + i -= len(m.RadosUser) + copy(dAtA[i:], m.RadosUser) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) + i-- + dAtA[i] = 0x2a + i -= len(m.RBDPool) + copy(dAtA[i:], m.RBDPool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) + i-- + dAtA[i] = 0x22 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + i -= len(m.RBDImage) + copy(dAtA[i:], m.RBDImage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) + i-- + dAtA[i] = 0x12 + if len(m.CephMonitors) > 0 { + for iNdEx := len(m.CephMonitors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CephMonitors[iNdEx]) + copy(dAtA[i:], m.CephMonitors[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CephMonitors[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *NodeCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastHeartbeatTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *RangeAllocation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *NodeConfigSource) Size() (n int) { - var l int - _ = l - if m.ConfigMap != nil { - l = m.ConfigMap.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n +func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NodeConfigStatus) Size() (n int) { +func (m *RangeAllocation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Assigned != nil { - l = m.Assigned.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Data != nil { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x1a } - if m.Active != nil { - l = m.Active.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Range) + copy(dAtA[i:], m.Range) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.LastKnownGood != nil { - l = m.LastKnownGood.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicationController) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = len(m.Error) - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *NodeDaemonEndpoints) Size() (n int) { - var l int - _ = l - l = m.KubeletEndpoint.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NodeList) Size() (n int) { +func (m *ReplicationController) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeProxyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ReplicationControllerCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *NodeResources) Size() (n int) { - var l int - _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n +func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NodeSelector) Size() (n int) { +func (m *ReplicationControllerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.NodeSelectorTerms) > 0 { - for _, e := range m.NodeSelectorTerms { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSelectorRequirement) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *ReplicationControllerList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *NodeSelectorTerm) Size() (n int) { +func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationControllerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.MatchExpressions) > 0 { - for _, e := range m.MatchExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - if len(m.MatchFields) > 0 { - for _, e := range m.MatchFields { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSpec) Size() (n int) { +func (m *ReplicationControllerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationControllerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.PodCIDR) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DoNotUse_ExternalID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ProviderID) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.Taints) > 0 { - for _, e := range m.Taints { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + if m.Template != nil { + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if m.ConfigSource != nil { - l = m.ConfigSource.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } } - return n + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } -func (m *NodeStatus) Size() (n int) { +func (m *ReplicationControllerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationControllerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationControllerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } } - if len(m.Allocatable) > 0 { - for k, v := range m.Allocatable { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ResourceFieldSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceFieldSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Divisor.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.Addresses) > 0 { - for _, e := range m.Addresses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + i-- + dAtA[i] = 0x1a + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceQuota) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = m.DaemonEndpoints.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.NodeInfo.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Images) > 0 { - for _, e := range m.Images { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceQuota) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.VolumesInUse) > 0 { - for _, s := range m.VolumesInUse { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.VolumesAttached) > 0 { - for _, e := range m.VolumesAttached { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.Config != nil { - l = m.Config.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSystemInfo) Size() (n int) { - var l int - _ = l - l = len(m.MachineID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SystemUUID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.BootID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KernelVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.OSImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContainerRuntimeVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KubeletVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KubeProxyVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.OperatingSystem) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Architecture) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ResourceQuotaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ObjectFieldSelector) Size() (n int) { - var l int - _ = l - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FieldPath) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ObjectReference) Size() (n int) { +func (m *ResourceQuotaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FieldPath) - n += 1 + l + sovGenerated(uint64(l)) - return n + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolume) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ResourceQuotaSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PersistentVolumeClaim) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PersistentVolumeClaimCondition) Size() (n int) { +func (m *ResourceQuotaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n + if m.ScopeSelector != nil { + { + size, err := m.ScopeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Scopes) > 0 { + for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Scopes[iNdEx]) + copy(dAtA[i:], m.Scopes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Hard) > 0 { + keysForHard := make([]string, 0, len(m.Hard)) + for k := range m.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + for iNdEx := len(keysForHard) - 1; iNdEx >= 0; iNdEx-- { + v := m.Hard[ResourceName(keysForHard[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForHard[iNdEx]) + copy(dAtA[i:], keysForHard[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHard[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *ResourceQuotaStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PersistentVolumeClaimSpec) Size() (n int) { +func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceQuotaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Used) > 0 { + keysForUsed := make([]string, 0, len(m.Used)) + for k := range m.Used { + keysForUsed = append(keysForUsed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) + for iNdEx := len(keysForUsed) - 1; iNdEx >= 0; iNdEx-- { + v := m.Used[ResourceName(keysForUsed[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForUsed[iNdEx]) + copy(dAtA[i:], keysForUsed[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUsed[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - l = m.Resources.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.StorageClassName != nil { - l = len(*m.StorageClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.VolumeMode != nil { - l = len(*m.VolumeMode) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Hard) > 0 { + keysForHard := make([]string, 0, len(m.Hard)) + for k := range m.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + for iNdEx := len(keysForHard) - 1; iNdEx >= 0; iNdEx-- { + v := m.Hard[ResourceName(keysForHard[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForHard[iNdEx]) + copy(dAtA[i:], keysForHard[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHard[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } } - if m.DataSource != nil { - l = m.DataSource.Size() - n += 1 + l + sovGenerated(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *ResourceRequirements) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PersistentVolumeClaimStatus) Size() (n int) { +func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Requests) > 0 { + keysForRequests := make([]string, 0, len(m.Requests)) + for k := range m.Requests { + keysForRequests = append(keysForRequests, string(k)) } - } - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + for iNdEx := len(keysForRequests) - 1; iNdEx >= 0; iNdEx-- { + v := m.Requests[ResourceName(keysForRequests[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForRequests[iNdEx]) + copy(dAtA[i:], keysForRequests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRequests[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Limits) > 0 { + keysForLimits := make([]string, 0, len(m.Limits)) + for k := range m.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + for iNdEx := len(keysForLimits) - 1; iNdEx >= 0; iNdEx-- { + v := m.Limits[ResourceName(keysForLimits[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForLimits[iNdEx]) + copy(dAtA[i:], keysForLimits[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLimits[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } } - return n + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.ClaimName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n +func (m *SELinuxOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PersistentVolumeList) Size() (n int) { +func (m *SELinuxOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SELinuxOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + i -= len(m.Level) + copy(dAtA[i:], m.Level) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i-- + dAtA[i] = 0x22 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x1a + i -= len(m.Role) + copy(dAtA[i:], m.Role) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Role))) + i-- + dAtA[i] = 0x12 + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScaleIOPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PersistentVolumeSource) Size() (n int) { +func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleIOPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.GCEPersistentDisk != nil { - l = m.GCEPersistentDisk.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AWSElasticBlockStore != nil { - l = m.AWSElasticBlockStore.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.HostPath != nil { - l = m.HostPath.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x50 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x4a + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0x42 + i -= len(m.StorageMode) + copy(dAtA[i:], m.StorageMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) + i-- + dAtA[i] = 0x3a + i -= len(m.StoragePool) + copy(dAtA[i:], m.StoragePool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) + i-- + dAtA[i] = 0x32 + i -= len(m.ProtectionDomain) + copy(dAtA[i:], m.ProtectionDomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) + i-- + dAtA[i] = 0x2a + i-- + if m.SSLEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.Glusterfs != nil { - l = m.Glusterfs.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NFS != nil { - l = m.NFS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RBD != nil { - l = m.RBD.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ISCSI != nil { - l = m.ISCSI.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Cinder != nil { - l = m.Cinder.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CephFS != nil { - l = m.CephFS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.FC != nil { - l = m.FC.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Flocker != nil { - l = m.Flocker.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.FlexVolume != nil { - l = m.FlexVolume.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AzureFile != nil { - l = m.AzureFile.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.VsphereVolume != nil { - l = m.VsphereVolume.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Quobyte != nil { - l = m.Quobyte.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AzureDisk != nil { - l = m.AzureDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.PhotonPersistentDisk != nil { - l = m.PhotonPersistentDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.PortworxVolume != nil { - l = m.PortworxVolume.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.ScaleIO != nil { - l = m.ScaleIO.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.Local != nil { - l = m.Local.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.StorageOS != nil { - l = m.StorageOS.Size() - n += 2 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x20 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - if m.CSI != nil { - l = m.CSI.Size() - n += 2 + l + sovGenerated(uint64(l)) + i -= len(m.System) + copy(dAtA[i:], m.System) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) + i-- + dAtA[i] = 0x12 + i -= len(m.Gateway) + copy(dAtA[i:], m.Gateway) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScaleIOVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PersistentVolumeSpec) Size() (n int) { +func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleIOVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = m.PersistentVolumeSource.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.ClaimRef != nil { - l = m.ClaimRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x50 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x4a + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0x42 + i -= len(m.StorageMode) + copy(dAtA[i:], m.StorageMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) + i-- + dAtA[i] = 0x3a + i -= len(m.StoragePool) + copy(dAtA[i:], m.StoragePool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) + i-- + dAtA[i] = 0x32 + i -= len(m.ProtectionDomain) + copy(dAtA[i:], m.ProtectionDomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) + i-- + dAtA[i] = 0x2a + i-- + if m.SSLEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - l = len(m.PersistentVolumeReclaimPolicy) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StorageClassName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x20 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if m.VolumeMode != nil { - l = len(*m.VolumeMode) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NodeAffinity != nil { - l = m.NodeAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.System) + copy(dAtA[i:], m.System) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) + i-- + dAtA[i] = 0x12 + i -= len(m.Gateway) + copy(dAtA[i:], m.Gateway) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScopeSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PersistentVolumeStatus) Size() (n int) { - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ScopeSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PhotonPersistentDiskVolumeSource) Size() (n int) { +func (m *ScopeSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.PdID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - return n + if len(m.MatchExpressions) > 0 { + for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *Pod) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ScopedResourceSelectorRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PodAffinity) Size() (n int) { +func (m *ScopedResourceSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScopedResourceSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.ScopeName) + copy(dAtA[i:], m.ScopeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ScopeName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Secret) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PodAffinityTerm) Size() (n int) { +func (m *Secret) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Secret) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.LabelSelector != nil { - l = m.LabelSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.StringData) > 0 { + keysForStringData := make([]string, 0, len(m.StringData)) + for k := range m.StringData { + keysForStringData = append(keysForStringData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) + for iNdEx := len(keysForStringData) - 1; iNdEx >= 0; iNdEx-- { + v := m.StringData[string(keysForStringData[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForStringData[iNdEx]) + copy(dAtA[i:], keysForStringData[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForStringData[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } } - if len(m.Namespaces) > 0 { - for _, s := range m.Namespaces { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x1a + if len(m.Data) > 0 { + keysForData := make([]string, 0, len(m.Data)) + for k := range m.Data { + keysForData = append(keysForData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + for iNdEx := len(keysForData) - 1; iNdEx >= 0; iNdEx-- { + v := m.Data[string(keysForData[iNdEx])] + baseI := i + if v != nil { + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + } + i -= len(keysForData[iNdEx]) + copy(dAtA[i:], keysForData[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForData[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - l = len(m.TopologyKey) - n += 1 + l + sovGenerated(uint64(l)) - return n + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodAntiAffinity) Size() (n int) { +func (m *SecretEnvSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretEnvSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x10 } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodAttachOptions) Size() (n int) { - var l int - _ = l - n += 2 - n += 2 - n += 2 - n += 2 - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SecretKeySelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PodCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodDNSConfig) Size() (n int) { +func (m *SecretKeySelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Nameservers) > 0 { - for _, s := range m.Nameservers { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Searches) > 0 { - for _, s := range m.Searches { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x18 } - if len(m.Options) > 0 { - for _, e := range m.Options { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x12 + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodDNSConfigOption) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Value != nil { - l = len(*m.Value) - n += 1 + l + sovGenerated(uint64(l)) +func (m *SecretList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PodExecOptions) Size() (n int) { - var l int - _ = l - n += 2 - n += 2 - n += 2 - n += 2 - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n +func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodList) Size() (n int) { +func (m *SecretList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodLogOptions) Size() (n int) { +func (m *SecretProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 2 - if m.SinceSeconds != nil { - n += 1 + sovGenerated(uint64(*m.SinceSeconds)) - } - if m.SinceTime != nil { - l = m.SinceTime.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 } - n += 2 - if m.TailLines != nil { - n += 1 + sovGenerated(uint64(*m.TailLines)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - if m.LimitBytes != nil { - n += 1 + sovGenerated(uint64(*m.LimitBytes)) + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodPortForwardOptions) Size() (n int) { - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - n += 1 + sovGenerated(uint64(e)) - } +func (m *SecretReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PodProxyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SecretReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodReadinessGate) Size() (n int) { +func (m *SecretReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.ConditionType) - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodSecurityContext) Size() (n int) { +func (m *SecretVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RunAsUser != nil { - n += 1 + sovGenerated(uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - n += 2 - } - if len(m.SupplementalGroups) > 0 { - for _, e := range m.SupplementalGroups { - n += 1 + sovGenerated(uint64(e)) + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x20 } - if m.FSGroup != nil { - n += 1 + sovGenerated(uint64(*m.FSGroup)) - } - if m.RunAsGroup != nil { - n += 1 + sovGenerated(uint64(*m.RunAsGroup)) + if m.DefaultMode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + i-- + dAtA[i] = 0x18 } - if len(m.Sysctls) > 0 { - for _, e := range m.Sysctls { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodSignature) Size() (n int) { - var l int - _ = l - if m.PodController != nil { - l = m.PodController.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *SecurityContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PodSpec) Size() (n int) { +func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Volumes) > 0 { - for _, e := range m.Volumes { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Containers) > 0 { - for _, e := range m.Containers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.WindowsOptions != nil { + { + size, err := m.WindowsOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x52 } - l = len(m.RestartPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.TerminationGracePeriodSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds)) + if m.ProcMount != nil { + i -= len(*m.ProcMount) + copy(dAtA[i:], *m.ProcMount) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ProcMount))) + i-- + dAtA[i] = 0x4a } - if m.ActiveDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + if m.RunAsGroup != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) + i-- + dAtA[i] = 0x40 } - l = len(m.DNSPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.NodeSelector) > 0 { - for k, v := range m.NodeSelector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.AllowPrivilegeEscalation != nil { + i-- + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x38 } - l = len(m.ServiceAccountName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DeprecatedServiceAccount) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 2 - n += 2 - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ReadOnlyRootFilesystem != nil { + i-- + if *m.ReadOnlyRootFilesystem { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 } - if len(m.ImagePullSecrets) > 0 { - for _, e := range m.ImagePullSecrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.RunAsNonRoot != nil { + i-- + if *m.RunAsNonRoot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x28 } - l = len(m.Hostname) - n += 2 + l + sovGenerated(uint64(l)) - l = len(m.Subdomain) - n += 2 + l + sovGenerated(uint64(l)) - if m.Affinity != nil { - l = m.Affinity.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.RunAsUser != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) + i-- + dAtA[i] = 0x20 } - l = len(m.SchedulerName) - n += 2 + l + sovGenerated(uint64(l)) - if len(m.InitContainers) > 0 { - for _, e := range m.InitContainers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.SELinuxOptions != nil { + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if m.AutomountServiceAccountToken != nil { - n += 3 - } - if len(m.Tolerations) > 0 { - for _, e := range m.Tolerations { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.Privileged != nil { + i-- + if *m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x10 } - if len(m.HostAliases) > 0 { - for _, e := range m.HostAliases { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.Capabilities != nil { + { + size, err := m.Capabilities.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - l = len(m.PriorityClassName) - n += 2 + l + sovGenerated(uint64(l)) - if m.Priority != nil { - n += 2 + sovGenerated(uint64(*m.Priority)) + return len(dAtA) - i, nil +} + +func (m *SerializedReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.DNSConfig != nil { - l = m.DNSConfig.Size() - n += 2 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SerializedReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Reference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ShareProcessNamespace != nil { - n += 3 + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Service) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.ReadinessGates) > 0 { - for _, e := range m.ReadinessGates { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *Service) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Service) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.RuntimeClassName != nil { - l = len(*m.RuntimeClassName) - n += 2 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.EnableServiceLinks != nil { - n += 3 + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodStatus) Size() (n int) { +func (m *ServiceAccount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.AutomountServiceAccountToken != nil { + i-- + if *m.AutomountServiceAccountToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x20 } - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.HostIP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PodIP) - n += 1 + l + sovGenerated(uint64(l)) - if m.StartTime != nil { - l = m.StartTime.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.ImagePullSecrets) > 0 { + for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImagePullSecrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - if len(m.ContainerStatuses) > 0 { - for _, e := range m.ContainerStatuses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Secrets) > 0 { + for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Secrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - l = len(m.QOSClass) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.InitContainerStatuses) > 0 { - for _, e := range m.InitContainerStatuses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - l = len(m.NominatedNodeName) - n += 1 + l + sovGenerated(uint64(l)) - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodStatusResult) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServiceAccountList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PodTemplate) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodTemplateList) Size() (n int) { +func (m *ServiceAccountList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodTemplateSpec) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServiceAccountTokenProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PortworxVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n +func (m *ServiceAccountTokenProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Preconditions) Size() (n int) { +func (m *ServiceAccountTokenProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.UID != nil { - l = len(*m.UID) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x1a + if m.ExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + i-- + dAtA[i] = 0x10 } - return n -} - -func (m *PreferAvoidPodsEntry) Size() (n int) { - var l int - _ = l - l = m.PodSignature.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.EvictionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.Audience) + copy(dAtA[i:], m.Audience) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PreferredSchedulingTerm) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Weight)) - l = m.Preference.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServiceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *Probe) Size() (n int) { - var l int - _ = l - l = m.Handler.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.InitialDelaySeconds)) - n += 1 + sovGenerated(uint64(m.TimeoutSeconds)) - n += 1 + sovGenerated(uint64(m.PeriodSeconds)) - n += 1 + sovGenerated(uint64(m.SuccessThreshold)) - n += 1 + sovGenerated(uint64(m.FailureThreshold)) - return n +func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ProjectedVolumeSource) Size() (n int) { +func (m *ServiceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Sources) > 0 { - for _, e := range m.Sources { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *QuobyteVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Registry) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Volume) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Tenant) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServicePort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *RBDPersistentVolumeSource) Size() (n int) { +func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServicePort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) + i-- + dAtA[i] = 0x28 + { + size, err := m.TargetPort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - l = len(m.RBDImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RBDPool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RadosUser) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Keyring) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - return n + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x18 + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *RBDVolumeSource) Size() (n int) { - var l int - _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.RBDImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RBDPool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RadosUser) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Keyring) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *ServiceProxyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - n += 2 - return n + return dAtA[:n], nil } -func (m *RangeAllocation) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Range) - n += 1 + l + sovGenerated(uint64(l)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovGenerated(uint64(l)) - } - return n +func (m *ServiceProxyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicationController) Size() (n int) { +func (m *ServiceProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicationControllerCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServiceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ReplicationControllerList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n +func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicationControllerSpec) Size() (n int) { +func (m *ServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) + if m.IPFamily != nil { + i -= len(*m.IPFamily) + copy(dAtA[i:], *m.IPFamily) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPFamily))) + i-- + dAtA[i] = 0x7a + } + if m.SessionAffinityConfig != nil { + { + size, err := m.SessionAffinityConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + i-- + if m.PublishNotReadyAddresses { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x68 + i = encodeVarintGenerated(dAtA, i, uint64(m.HealthCheckNodePort)) + i-- + dAtA[i] = 0x60 + i -= len(m.ExternalTrafficPolicy) + copy(dAtA[i:], m.ExternalTrafficPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalTrafficPolicy))) + i-- + dAtA[i] = 0x5a + i -= len(m.ExternalName) + copy(dAtA[i:], m.ExternalName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalName))) + i-- + dAtA[i] = 0x52 + if len(m.LoadBalancerSourceRanges) > 0 { + for iNdEx := len(m.LoadBalancerSourceRanges) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LoadBalancerSourceRanges[iNdEx]) + copy(dAtA[i:], m.LoadBalancerSourceRanges[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerSourceRanges[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + i -= len(m.LoadBalancerIP) + copy(dAtA[i:], m.LoadBalancerIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerIP))) + i-- + dAtA[i] = 0x42 + i -= len(m.SessionAffinity) + copy(dAtA[i:], m.SessionAffinity) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SessionAffinity))) + i-- + dAtA[i] = 0x3a + if len(m.ExternalIPs) > 0 { + for iNdEx := len(m.ExternalIPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExternalIPs[iNdEx]) + copy(dAtA[i:], m.ExternalIPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalIPs[iNdEx]))) + i-- + dAtA[i] = 0x2a + } } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x22 + i -= len(m.ClusterIP) + copy(dAtA[i:], m.ClusterIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIP))) + i-- + dAtA[i] = 0x1a if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - if m.Template != nil { - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - return n + return len(dAtA) - i, nil } -func (m *ReplicationControllerStatus) Size() (n int) { +func (m *ServiceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ResourceFieldSelector) Size() (n int) { - var l int - _ = l - l = len(m.ContainerName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Divisor.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SessionAffinityConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ResourceQuota) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceQuotaList) Size() (n int) { +func (m *SessionAffinityConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ClientIP != nil { + { + size, err := m.ClientIP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *ResourceQuotaSpec) Size() (n int) { +func (m *StorageOSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageOSPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Hard) > 0 { - for k, v := range m.Hard { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - if len(m.Scopes) > 0 { - for _, s := range m.Scopes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.ScopeSelector != nil { - l = m.ScopeSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x20 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + i -= len(m.VolumeNamespace) + copy(dAtA[i:], m.VolumeNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageOSVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ResourceQuotaStatus) Size() (n int) { +func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageOSVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Hard) > 0 { - for k, v := range m.Hard { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - if len(m.Used) > 0 { - for k, v := range m.Used { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return n + i-- + dAtA[i] = 0x20 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + i -= len(m.VolumeNamespace) + copy(dAtA[i:], m.VolumeNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ResourceRequirements) Size() (n int) { - var l int - _ = l - if len(m.Limits) > 0 { - for k, v := range m.Limits { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Requests) > 0 { - for k, v := range m.Requests { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } +func (m *Sysctl) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *SELinuxOptions) Size() (n int) { - var l int - _ = l - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Role) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Level) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *Sysctl) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ScaleIOPersistentVolumeSource) Size() (n int) { +func (m *Sysctl) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Gateway) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.System) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - l = len(m.ProtectionDomain) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StorageMode) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ScaleIOVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Gateway) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.System) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *TCPSocketAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - n += 2 - l = len(m.ProtectionDomain) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StorageMode) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n + return dAtA[:n], nil } -func (m *ScopeSelector) Size() (n int) { +func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TCPSocketAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.MatchExpressions) > 0 { - for _, e := range m.MatchExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ScopedResourceSelectorRequirement) Size() (n int) { - var l int - _ = l - l = len(m.ScopeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *Taint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Secret) Size() (n int) { +func (m *Taint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Taint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Data) > 0 { - for k, v := range m.Data { - _ = k - _ = v - l = 0 - if v != nil { - l = 1 + len(v) + sovGenerated(uint64(len(v))) + if m.TimeAdded != nil { + { + size, err := m.TimeAdded.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.StringData) > 0 { - for k, v := range m.StringData { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - return n + i -= len(m.Effect) + copy(dAtA[i:], m.Effect) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) + i-- + dAtA[i] = 0x1a + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretEnvSource) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 +func (m *Toleration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *SecretKeySelector) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 - } - return n +func (m *Toleration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SecretList) Size() (n int) { +func (m *Toleration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.TolerationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds)) + i-- + dAtA[i] = 0x28 } - return n + i -= len(m.Effect) + copy(dAtA[i:], m.Effect) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) + i-- + dAtA[i] = 0x22 + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x1a + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretProjection) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Optional != nil { - n += 2 +func (m *TopologySelectorLabelRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *SecretReference) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *TopologySelectorLabelRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SecretVolumeSource) Size() (n int) { +func (m *TopologySelectorLabelRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) - } - if m.Optional != nil { - n += 2 - } - return n + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecurityContext) Size() (n int) { - var l int - _ = l - if m.Capabilities != nil { - l = m.Capabilities.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Privileged != nil { - n += 2 - } - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RunAsUser != nil { - n += 1 + sovGenerated(uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - n += 2 - } - if m.ReadOnlyRootFilesystem != nil { - n += 2 - } - if m.AllowPrivilegeEscalation != nil { - n += 2 - } - if m.RunAsGroup != nil { - n += 1 + sovGenerated(uint64(*m.RunAsGroup)) - } - if m.ProcMount != nil { - l = len(*m.ProcMount) - n += 1 + l + sovGenerated(uint64(l)) +func (m *TopologySelectorTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n -} - -func (m *SerializedReference) Size() (n int) { - var l int - _ = l - l = m.Reference.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *Service) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *TopologySelectorTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceAccount) Size() (n int) { +func (m *TopologySelectorTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Secrets) > 0 { - for _, e := range m.Secrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.ImagePullSecrets) > 0 { - for _, e := range m.ImagePullSecrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.MatchLabelExpressions) > 0 { + for iNdEx := len(m.MatchLabelExpressions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchLabelExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - if m.AutomountServiceAccountToken != nil { - n += 2 - } - return n + return len(dAtA) - i, nil } -func (m *ServiceAccountList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *TopologySpreadConstraint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ServiceAccountTokenProjection) Size() (n int) { - var l int - _ = l - l = len(m.Audience) - n += 1 + l + sovGenerated(uint64(l)) - if m.ExpirationSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) - } - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *TopologySpreadConstraint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceList) Size() (n int) { +func (m *TopologySpreadConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - return n + i -= len(m.WhenUnsatisfiable) + copy(dAtA[i:], m.WhenUnsatisfiable) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WhenUnsatisfiable))) + i-- + dAtA[i] = 0x1a + i -= len(m.TopologyKey) + copy(dAtA[i:], m.TopologyKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey))) + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSkew)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *ServicePort) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Port)) - l = m.TargetPort.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.NodePort)) - return n +func (m *TypedLocalObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ServiceProxyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *TypedLocalObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceSpec) Size() (n int) { +func (m *TypedLocalObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.ClusterIP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.ExternalIPs) > 0 { - for _, s := range m.ExternalIPs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.SessionAffinity) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.LoadBalancerIP) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.LoadBalancerSourceRanges) > 0 { - for _, s := range m.LoadBalancerSourceRanges { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.ExternalName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ExternalTrafficPolicy) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.HealthCheckNodePort)) - n += 2 - if m.SessionAffinityConfig != nil { - l = m.SessionAffinityConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + if m.APIGroup != nil { + i -= len(*m.APIGroup) + copy(dAtA[i:], *m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup))) + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *ServiceStatus) Size() (n int) { - var l int - _ = l - l = m.LoadBalancer.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *Volume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *SessionAffinityConfig) Size() (n int) { - var l int - _ = l - if m.ClientIP != nil { - l = m.ClientIP.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n +func (m *Volume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *StorageOSPersistentVolumeSource) Size() (n int) { +func (m *Volume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeNamespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.VolumeSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *StorageOSVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeNamespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *VolumeDevice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Sysctl) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *VolumeDevice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TCPSocketAction) Size() (n int) { +func (m *VolumeDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.DevicePath) + copy(dAtA[i:], m.DevicePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Taint) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Effect) - n += 1 + l + sovGenerated(uint64(l)) - if m.TimeAdded != nil { - l = m.TimeAdded.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *VolumeMount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Toleration) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Effect) - n += 1 + l + sovGenerated(uint64(l)) - if m.TolerationSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TolerationSeconds)) - } - return n +func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TopologySelectorLabelRequirement) Size() (n int) { +func (m *VolumeMount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + i -= len(m.SubPathExpr) + copy(dAtA[i:], m.SubPathExpr) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPathExpr))) + i-- + dAtA[i] = 0x32 + if m.MountPropagation != nil { + i -= len(*m.MountPropagation) + copy(dAtA[i:], *m.MountPropagation) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MountPropagation))) + i-- + dAtA[i] = 0x2a } - return n + i -= len(m.SubPath) + copy(dAtA[i:], m.SubPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPath))) + i-- + dAtA[i] = 0x22 + i -= len(m.MountPath) + copy(dAtA[i:], m.MountPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath))) + i-- + dAtA[i] = 0x1a + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *TopologySelectorTerm) Size() (n int) { - var l int - _ = l - if len(m.MatchLabelExpressions) > 0 { - for _, e := range m.MatchLabelExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *VolumeNodeAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *TypedLocalObjectReference) Size() (n int) { +func (m *VolumeNodeAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeNodeAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.APIGroup != nil { - l = len(*m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) + if m.Required != nil { + { + size, err := m.Required.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n + return len(dAtA) - i, nil } -func (m *Volume) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.VolumeSource.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *VolumeProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *VolumeDevice) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DevicePath) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *VolumeMount) Size() (n int) { +func (m *VolumeProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.MountPath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SubPath) - n += 1 + l + sovGenerated(uint64(l)) - if m.MountPropagation != nil { - l = len(*m.MountPropagation) - n += 1 + l + sovGenerated(uint64(l)) + if m.ServiceAccountToken != nil { + { + size, err := m.ServiceAccountToken.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - l = len(m.SubPathExpr) - n += 1 + l + sovGenerated(uint64(l)) - return n + if m.ConfigMap != nil { + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.DownwardAPI != nil { + { + size, err := m.DownwardAPI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Secret != nil { + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *VolumeNodeAffinity) Size() (n int) { - var l int - _ = l - if m.Required != nil { - l = m.Required.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *VolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *VolumeProjection) Size() (n int) { +func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Secret != nil { - l = m.Secret.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.CSI != nil { + { + size, err := m.CSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 } - if m.DownwardAPI != nil { - l = m.DownwardAPI.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.StorageOS != nil { + { + size, err := m.StorageOS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda } - if m.ConfigMap != nil { - l = m.ConfigMap.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Projected != nil { + { + size, err := m.Projected.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 } - if m.ServiceAccountToken != nil { - l = m.ServiceAccountToken.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ScaleIO != nil { + { + size, err := m.ScaleIO.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca } - return n -} - -func (m *VolumeSource) Size() (n int) { - var l int - _ = l - if m.HostPath != nil { - l = m.HostPath.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.PortworxVolume != nil { + { + size, err := m.PortworxVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 } - if m.EmptyDir != nil { - l = m.EmptyDir.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.PhotonPersistentDisk != nil { + { + size, err := m.PhotonPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba } - if m.GCEPersistentDisk != nil { - l = m.GCEPersistentDisk.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.AzureDisk != nil { + { + size, err := m.AzureDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - if m.AWSElasticBlockStore != nil { - l = m.AWSElasticBlockStore.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Quobyte != nil { + { + size, err := m.Quobyte.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa } - if m.GitRepo != nil { - l = m.GitRepo.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.VsphereVolume != nil { + { + size, err := m.VsphereVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 } - if m.Secret != nil { - l = m.Secret.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ConfigMap != nil { + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a } - if m.NFS != nil { - l = m.NFS.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.AzureFile != nil { + { + size, err := m.AzureFile.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 } - if m.ISCSI != nil { - l = m.ISCSI.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.FC != nil { + { + size, err := m.FC.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a } - if m.Glusterfs != nil { - l = m.Glusterfs.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.DownwardAPI != nil { + { + size, err := m.DownwardAPI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 } - if m.PersistentVolumeClaim != nil { - l = m.PersistentVolumeClaim.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Flocker != nil { + { + size, err := m.Flocker.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a } - if m.RBD != nil { - l = m.RBD.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.CephFS != nil { + { + size, err := m.CephFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.Cinder != nil { + { + size, err := m.Cinder.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a } if m.FlexVolume != nil { - l = m.FlexVolume.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.FlexVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 } - if m.Cinder != nil { - l = m.Cinder.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.RBD != nil { + { + size, err := m.RBD.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a } - if m.CephFS != nil { - l = m.CephFS.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.PersistentVolumeClaim != nil { + { + size, err := m.PersistentVolumeClaim.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 } - if m.Flocker != nil { - l = m.Flocker.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Glusterfs != nil { + { + size, err := m.Glusterfs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a } - if m.DownwardAPI != nil { - l = m.DownwardAPI.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.ISCSI != nil { + { + size, err := m.ISCSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 } - if m.FC != nil { - l = m.FC.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.NFS != nil { + { + size, err := m.NFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a } - if m.AzureFile != nil { - l = m.AzureFile.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.Secret != nil { + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } - if m.ConfigMap != nil { - l = m.ConfigMap.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.GitRepo != nil { + { + size, err := m.GitRepo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - if m.VsphereVolume != nil { - l = m.VsphereVolume.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.AWSElasticBlockStore != nil { + { + size, err := m.AWSElasticBlockStore.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - if m.Quobyte != nil { - l = m.Quobyte.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.GCEPersistentDisk != nil { + { + size, err := m.GCEPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - if m.AzureDisk != nil { - l = m.AzureDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.EmptyDir != nil { + { + size, err := m.EmptyDir.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - if m.PhotonPersistentDisk != nil { - l = m.PhotonPersistentDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.HostPath != nil { + { + size, err := m.HostPath.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - if m.PortworxVolume != nil { - l = m.PortworxVolume.Size() - n += 2 + l + sovGenerated(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *VsphereVirtualDiskVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.ScaleIO != nil { - l = m.ScaleIO.Size() - n += 2 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *VsphereVirtualDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VsphereVirtualDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.StoragePolicyID) + copy(dAtA[i:], m.StoragePolicyID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyID))) + i-- + dAtA[i] = 0x22 + i -= len(m.StoragePolicyName) + copy(dAtA[i:], m.StoragePolicyName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyName))) + i-- + dAtA[i] = 0x1a + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumePath) + copy(dAtA[i:], m.VolumePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumePath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WeightedPodAffinityTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.Projected != nil { - l = m.Projected.Size() - n += 2 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WeightedPodAffinityTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.PodAffinityTerm.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.StorageOS != nil { - l = m.StorageOS.Size() - n += 2 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *WindowsSecurityContextOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.CSI != nil { - l = m.CSI.Size() - n += 2 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *WindowsSecurityContextOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WindowsSecurityContextOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RunAsUserName != nil { + i -= len(*m.RunAsUserName) + copy(dAtA[i:], *m.RunAsUserName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RunAsUserName))) + i-- + dAtA[i] = 0x1a } - return n + if m.GMSACredentialSpec != nil { + i -= len(*m.GMSACredentialSpec) + copy(dAtA[i:], *m.GMSACredentialSpec) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GMSACredentialSpec))) + i-- + dAtA[i] = 0x12 + } + if m.GMSACredentialSpecName != nil { + i -= len(*m.GMSACredentialSpecName) + copy(dAtA[i:], *m.GMSACredentialSpecName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GMSACredentialSpecName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *VsphereVirtualDiskVolumeSource) Size() (n int) { +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AWSElasticBlockStoreVolumeSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = len(m.VolumePath) + l = len(m.VolumeID) n += 1 + l + sovGenerated(uint64(l)) l = len(m.FSType) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePolicyName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePolicyID) - n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Partition)) + n += 2 return n } -func (m *WeightedPodAffinityTerm) Size() (n int) { +func (m *Affinity) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - n += 1 + sovGenerated(uint64(m.Weight)) - l = m.PodAffinityTerm.Size() + if m.NodeAffinity != nil { + l = m.NodeAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PodAffinity != nil { + l = m.PodAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PodAntiAffinity != nil { + l = m.PodAntiAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AttachedVolume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DevicePath) n += 1 + l + sovGenerated(uint64(l)) return n } -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break +func (m *AvoidPods) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PreferAvoidPods) > 0 { + for _, e := range m.PreferAvoidPods { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } return n } -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AWSElasticBlockStoreVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *AzureDiskVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AWSElasticBlockStoreVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *Affinity) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.DiskName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DataDiskURI) + n += 1 + l + sovGenerated(uint64(l)) + if m.CachingMode != nil { + l = len(*m.CachingMode) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&Affinity{`, - `NodeAffinity:` + strings.Replace(fmt.Sprintf("%v", this.NodeAffinity), "NodeAffinity", "NodeAffinity", 1) + `,`, - `PodAffinity:` + strings.Replace(fmt.Sprintf("%v", this.PodAffinity), "PodAffinity", "PodAffinity", 1) + `,`, - `PodAntiAffinity:` + strings.Replace(fmt.Sprintf("%v", this.PodAntiAffinity), "PodAntiAffinity", "PodAntiAffinity", 1) + `,`, - `}`, - }, "") - return s + if m.FSType != nil { + l = len(*m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReadOnly != nil { + n += 2 + } + if m.Kind != nil { + l = len(*m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *AttachedVolume) String() string { - if this == nil { - return "nil" + +func (m *AzureFilePersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AttachedVolume{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, - `}`, - }, "") - return s -} -func (this *AvoidPods) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ShareName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretNamespace != nil { + l = len(*m.SecretNamespace) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&AvoidPods{`, - `PreferAvoidPods:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferAvoidPods), "PreferAvoidPodsEntry", "PreferAvoidPodsEntry", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *AzureDiskVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *AzureFileVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AzureDiskVolumeSource{`, - `DiskName:` + fmt.Sprintf("%v", this.DiskName) + `,`, - `DataDiskURI:` + fmt.Sprintf("%v", this.DataDiskURI) + `,`, - `CachingMode:` + valueToStringGenerated(this.CachingMode) + `,`, - `FSType:` + valueToStringGenerated(this.FSType) + `,`, - `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, - `Kind:` + valueToStringGenerated(this.Kind) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ShareName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n } -func (this *AzureFilePersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *Binding) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AzureFilePersistentVolumeSource{`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretNamespace:` + valueToStringGenerated(this.SecretNamespace) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *AzureFileVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *CSIPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AzureFileVolumeSource{`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *Binding) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeHandle) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeAttributes) > 0 { + for k, v := range m.VolumeAttributes { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&Binding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Target:` + strings.Replace(strings.Replace(this.Target.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CSIPersistentVolumeSource) String() string { - if this == nil { - return "nil" + if m.ControllerPublishSecretRef != nil { + l = m.ControllerPublishSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) - for k := range this.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, k) + if m.NodeStageSecretRef != nil { + l = m.NodeStageSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - mapStringForVolumeAttributes := "map[string]string{" - for _, k := range keysForVolumeAttributes { - mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) + if m.NodePublishSecretRef != nil { + l = m.NodePublishSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForVolumeAttributes += "}" - s := strings.Join([]string{`&CSIPersistentVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `VolumeHandle:` + fmt.Sprintf("%v", this.VolumeHandle) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, - `ControllerPublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.ControllerPublishSecretRef), "SecretReference", "SecretReference", 1) + `,`, - `NodeStageSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodeStageSecretRef), "SecretReference", "SecretReference", 1) + `,`, - `NodePublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodePublishSecretRef), "SecretReference", "SecretReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CSIVolumeSource) String() string { - if this == nil { - return "nil" + if m.ControllerExpandSecretRef != nil { + l = m.ControllerExpandSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) - for k := range this.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, k) + return n +} + +func (m *CSIVolumeSource) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - mapStringForVolumeAttributes := "map[string]string{" - for _, k := range keysForVolumeAttributes { - mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + if m.ReadOnly != nil { + n += 2 } - mapStringForVolumeAttributes += "}" - s := strings.Join([]string{`&CSIVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, - `FSType:` + valueToStringGenerated(this.FSType) + `,`, - `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, - `NodePublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodePublishSecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Capabilities) String() string { - if this == nil { - return "nil" + if m.FSType != nil { + l = len(*m.FSType) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&Capabilities{`, - `Add:` + fmt.Sprintf("%v", this.Add) + `,`, - `Drop:` + fmt.Sprintf("%v", this.Drop) + `,`, - `}`, - }, "") - return s -} -func (this *CephFSPersistentVolumeSource) String() string { - if this == nil { - return "nil" + if len(m.VolumeAttributes) > 0 { + for k, v := range m.VolumeAttributes { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&CephFSPersistentVolumeSource{`, - `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *CephFSVolumeSource) String() string { - if this == nil { - return "nil" + if m.NodePublishSecretRef != nil { + l = m.NodePublishSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&CephFSVolumeSource{`, - `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + return n } -func (this *CinderPersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *Capabilities) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&CinderPersistentVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CinderVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Add) > 0 { + for _, s := range m.Add { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&CinderVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClientIPConfig) String() string { - if this == nil { - return "nil" + if len(m.Drop) > 0 { + for _, s := range m.Drop { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ClientIPConfig{`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `}`, - }, "") - return s + return n } -func (this *ComponentCondition) String() string { - if this == nil { - return "nil" + +func (m *CephFSPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ComponentCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *ComponentStatus) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ComponentStatus{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ComponentCondition", "ComponentCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ComponentStatusList) String() string { - if this == nil { - return "nil" + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SecretFile) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ComponentStatusList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ComponentStatus", "ComponentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + n += 2 + return n } -func (this *ConfigMap) String() string { - if this == nil { - return "nil" + +func (m *CephFSVolumeSource) Size() (n int) { + if m == nil { + return 0 } - keysForData := make([]string, 0, len(this.Data)) - for k := range this.Data { - keysForData = append(keysForData, k) + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - mapStringForData := "map[string]string{" - for _, k := range keysForData { - mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SecretFile) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForData += "}" - keysForBinaryData := make([]string, 0, len(this.BinaryData)) - for k := range this.BinaryData { - keysForBinaryData = append(keysForBinaryData, k) + n += 2 + return n +} + +func (m *CinderPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) - mapStringForBinaryData := "map[string][]byte{" - for _, k := range keysForBinaryData { - mapStringForBinaryData += fmt.Sprintf("%v: %v,", k, this.BinaryData[k]) + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForBinaryData += "}" - s := strings.Join([]string{`&ConfigMap{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + mapStringForData + `,`, - `BinaryData:` + mapStringForBinaryData + `,`, - `}`, - }, "") - return s + return n } -func (this *ConfigMapEnvSource) String() string { - if this == nil { - return "nil" + +func (m *CinderVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ConfigMapEnvSource{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapKeySelector) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ConfigMapKeySelector{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s + return n } -func (this *ConfigMapList) String() string { - if this == nil { - return "nil" + +func (m *ClientIPConfig) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ConfigMapList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ConfigMap", "ConfigMap", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapNodeConfigSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) } - s := strings.Join([]string{`&ConfigMapNodeConfigSource{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, - `KubeletConfigKey:` + fmt.Sprintf("%v", this.KubeletConfigKey) + `,`, - `}`, - }, "") - return s + return n } -func (this *ConfigMapProjection) String() string { - if this == nil { - return "nil" + +func (m *ComponentCondition) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ConfigMapProjection{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ConfigMapVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *ComponentStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ConfigMapVolumeSource{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *Container) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Container{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `Args:` + fmt.Sprintf("%v", this.Args) + `,`, - `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + `,`, - `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "EnvVar", 1), `&`, ``, 1) + `,`, - `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, - `VolumeMounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeMounts), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + `,`, - `LivenessProbe:` + strings.Replace(fmt.Sprintf("%v", this.LivenessProbe), "Probe", "Probe", 1) + `,`, - `ReadinessProbe:` + strings.Replace(fmt.Sprintf("%v", this.ReadinessProbe), "Probe", "Probe", 1) + `,`, - `Lifecycle:` + strings.Replace(fmt.Sprintf("%v", this.Lifecycle), "Lifecycle", "Lifecycle", 1) + `,`, - `TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`, - `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, - `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "SecurityContext", "SecurityContext", 1) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, - `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, - `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + `,`, - `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, - `VolumeDevices:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeDevices), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ContainerImage) String() string { - if this == nil { - return "nil" + +func (m *ComponentStatusList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ContainerImage{`, - `Names:` + fmt.Sprintf("%v", this.Names) + `,`, - `SizeBytes:` + fmt.Sprintf("%v", this.SizeBytes) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *ContainerPort) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerPort{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`, - `ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerState) String() string { - if this == nil { - return "nil" + +func (m *ConfigMap) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ContainerState{`, - `Waiting:` + strings.Replace(fmt.Sprintf("%v", this.Waiting), "ContainerStateWaiting", "ContainerStateWaiting", 1) + `,`, - `Running:` + strings.Replace(fmt.Sprintf("%v", this.Running), "ContainerStateRunning", "ContainerStateRunning", 1) + `,`, - `Terminated:` + strings.Replace(fmt.Sprintf("%v", this.Terminated), "ContainerStateTerminated", "ContainerStateTerminated", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerStateRunning) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Data) > 0 { + for k, v := range m.Data { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&ContainerStateRunning{`, - `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerStateTerminated) String() string { - if this == nil { - return "nil" + if len(m.BinaryData) > 0 { + for k, v := range m.BinaryData { + _ = k + _ = v + l = 0 + if v != nil { + l = 1 + len(v) + sovGenerated(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&ContainerStateTerminated{`, - `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, - `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `FinishedAt:` + strings.Replace(strings.Replace(this.FinishedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `}`, - }, "") - return s + return n } -func (this *ContainerStateWaiting) String() string { - if this == nil { - return "nil" + +func (m *ConfigMapEnvSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ContainerStateWaiting{`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerStatus) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 } - s := strings.Join([]string{`&ContainerStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, - `LastTerminationState:` + strings.Replace(strings.Replace(this.LastTerminationState.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, - `Ready:` + fmt.Sprintf("%v", this.Ready) + `,`, - `RestartCount:` + fmt.Sprintf("%v", this.RestartCount) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `}`, - }, "") - return s + return n } -func (this *DaemonEndpoint) String() string { - if this == nil { - return "nil" + +func (m *ConfigMapKeySelector) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&DaemonEndpoint{`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `}`, - }, "") - return s -} -func (this *DownwardAPIProjection) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 } - s := strings.Join([]string{`&DownwardAPIProjection{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *DownwardAPIVolumeFile) String() string { - if this == nil { - return "nil" + +func (m *ConfigMapList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&DownwardAPIVolumeFile{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `FieldRef:` + strings.Replace(fmt.Sprintf("%v", this.FieldRef), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, - `ResourceFieldRef:` + strings.Replace(fmt.Sprintf("%v", this.ResourceFieldRef), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, - `Mode:` + valueToStringGenerated(this.Mode) + `,`, - `}`, - }, "") - return s -} -func (this *DownwardAPIVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&DownwardAPIVolumeSource{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `}`, - }, "") - return s + return n } -func (this *EmptyDirVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *ConfigMapNodeConfigSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&EmptyDirVolumeSource{`, - `Medium:` + fmt.Sprintf("%v", this.Medium) + `,`, - `SizeLimit:` + strings.Replace(fmt.Sprintf("%v", this.SizeLimit), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeletConfigKey) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *EndpointAddress) String() string { - if this == nil { - return "nil" + +func (m *ConfigMapProjection) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&EndpointAddress{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `TargetRef:` + strings.Replace(fmt.Sprintf("%v", this.TargetRef), "ObjectReference", "ObjectReference", 1) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, - `}`, - }, "") - return s -} -func (this *EndpointPort) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&EndpointPort{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `}`, - }, "") - return s -} -func (this *EndpointSubset) String() string { - if this == nil { - return "nil" + if m.Optional != nil { + n += 2 } - s := strings.Join([]string{`&EndpointSubset{`, - `Addresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Addresses), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + `,`, - `NotReadyAddresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NotReadyAddresses), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + `,`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *Endpoints) String() string { - if this == nil { - return "nil" + +func (m *ConfigMapVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Endpoints{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subsets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subsets), "EndpointSubset", "EndpointSubset", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *EndpointsList) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&EndpointsList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Endpoints", "Endpoints", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *EnvFromSource) String() string { - if this == nil { - return "nil" + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) } - s := strings.Join([]string{`&EnvFromSource{`, - `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, - `ConfigMapRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapRef), "ConfigMapEnvSource", "ConfigMapEnvSource", 1) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretEnvSource", "SecretEnvSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *EnvVar) String() string { - if this == nil { - return "nil" + if m.Optional != nil { + n += 2 } - s := strings.Join([]string{`&EnvVar{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `ValueFrom:` + strings.Replace(fmt.Sprintf("%v", this.ValueFrom), "EnvVarSource", "EnvVarSource", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *EnvVarSource) String() string { - if this == nil { - return "nil" + +func (m *Container) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&EnvVarSource{`, - `FieldRef:` + strings.Replace(fmt.Sprintf("%v", this.FieldRef), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, - `ResourceFieldRef:` + strings.Replace(fmt.Sprintf("%v", this.ResourceFieldRef), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, - `ConfigMapKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapKeyRef), "ConfigMapKeySelector", "ConfigMapKeySelector", 1) + `,`, - `SecretKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretKeyRef), "SecretKeySelector", "SecretKeySelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Event) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Event{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `InvolvedObject:` + strings.Replace(strings.Replace(this.InvolvedObject.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Source:` + strings.Replace(strings.Replace(this.Source.String(), "EventSource", "EventSource", 1), `&`, ``, 1) + `,`, - `FirstTimestamp:` + strings.Replace(strings.Replace(this.FirstTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTimestamp:` + strings.Replace(strings.Replace(this.LastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `EventTime:` + strings.Replace(strings.Replace(this.EventTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, - `Series:` + strings.Replace(fmt.Sprintf("%v", this.Series), "EventSeries", "EventSeries", 1) + `,`, - `Action:` + fmt.Sprintf("%v", this.Action) + `,`, - `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "ObjectReference", 1) + `,`, - `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, - `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, - `}`, - }, "") - return s -} -func (this *EventList) String() string { - if this == nil { - return "nil" + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&EventList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Event", "Event", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *EventSeries) String() string { - if this == nil { - return "nil" + l = len(m.WorkingDir) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&EventSeries{`, - `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `LastObservedTime:` + strings.Replace(strings.Replace(this.LastObservedTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, - `State:` + fmt.Sprintf("%v", this.State) + `,`, - `}`, - }, "") - return s -} -func (this *EventSource) String() string { - if this == nil { - return "nil" + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&EventSource{`, - `Component:` + fmt.Sprintf("%v", this.Component) + `,`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `}`, - }, "") - return s -} -func (this *ExecAction) String() string { - if this == nil { - return "nil" + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ExecAction{`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `}`, - }, "") - return s -} -func (this *FCVolumeSource) String() string { - if this == nil { - return "nil" + if m.LivenessProbe != nil { + l = m.LivenessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&FCVolumeSource{`, - `TargetWWNs:` + fmt.Sprintf("%v", this.TargetWWNs) + `,`, - `Lun:` + valueToStringGenerated(this.Lun) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `WWIDs:` + fmt.Sprintf("%v", this.WWIDs) + `,`, - `}`, - }, "") - return s -} -func (this *FlexPersistentVolumeSource) String() string { - if this == nil { - return "nil" + if m.ReadinessProbe != nil { + l = m.ReadinessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) } - keysForOptions := make([]string, 0, len(this.Options)) - for k := range this.Options { - keysForOptions = append(keysForOptions, k) + if m.Lifecycle != nil { + l = m.Lifecycle.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - mapStringForOptions := "map[string]string{" - for _, k := range keysForOptions { - mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + l = len(m.TerminationMessagePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImagePullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForOptions += "}" - s := strings.Join([]string{`&FlexPersistentVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Options:` + mapStringForOptions + `,`, - `}`, - }, "") - return s -} -func (this *FlexVolumeSource) String() string { - if this == nil { - return "nil" + n += 3 + n += 3 + n += 3 + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - keysForOptions := make([]string, 0, len(this.Options)) - for k := range this.Options { - keysForOptions = append(keysForOptions, k) + l = len(m.TerminationMessagePolicy) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.VolumeDevices) > 0 { + for _, e := range m.VolumeDevices { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - mapStringForOptions := "map[string]string{" - for _, k := range keysForOptions { - mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + if m.StartupProbe != nil { + l = m.StartupProbe.Size() + n += 2 + l + sovGenerated(uint64(l)) } - mapStringForOptions += "}" - s := strings.Join([]string{`&FlexVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Options:` + mapStringForOptions + `,`, - `}`, - }, "") - return s + return n } -func (this *FlockerVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *ContainerImage) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&FlockerVolumeSource{`, - `DatasetName:` + fmt.Sprintf("%v", this.DatasetName) + `,`, - `DatasetUUID:` + fmt.Sprintf("%v", this.DatasetUUID) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.SizeBytes)) + return n } -func (this *GCEPersistentDiskVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *ContainerPort) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&GCEPersistentDiskVolumeSource{`, - `PDName:` + fmt.Sprintf("%v", this.PDName) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HostPort)) + n += 1 + sovGenerated(uint64(m.ContainerPort)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *GitRepoVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *ContainerState) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&GitRepoVolumeSource{`, - `Repository:` + fmt.Sprintf("%v", this.Repository) + `,`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, - `Directory:` + fmt.Sprintf("%v", this.Directory) + `,`, - `}`, - }, "") - return s -} -func (this *GlusterfsPersistentVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.Waiting != nil { + l = m.Waiting.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&GlusterfsPersistentVolumeSource{`, - `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `EndpointsNamespace:` + valueToStringGenerated(this.EndpointsNamespace) + `,`, - `}`, - }, "") - return s -} -func (this *GlusterfsVolumeSource) String() string { - if this == nil { - return "nil" + if m.Running != nil { + l = m.Running.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&GlusterfsVolumeSource{`, - `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPGetAction) String() string { - if this == nil { - return "nil" + if m.Terminated != nil { + l = m.Terminated.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&HTTPGetAction{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `Scheme:` + fmt.Sprintf("%v", this.Scheme) + `,`, - `HTTPHeaders:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HTTPHeaders), "HTTPHeader", "HTTPHeader", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *HTTPHeader) String() string { - if this == nil { - return "nil" + +func (m *ContainerStateRunning) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&HTTPHeader{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *Handler) String() string { - if this == nil { - return "nil" + +func (m *ContainerStateTerminated) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Handler{`, - `Exec:` + strings.Replace(fmt.Sprintf("%v", this.Exec), "ExecAction", "ExecAction", 1) + `,`, - `HTTPGet:` + strings.Replace(fmt.Sprintf("%v", this.HTTPGet), "HTTPGetAction", "HTTPGetAction", 1) + `,`, - `TCPSocket:` + strings.Replace(fmt.Sprintf("%v", this.TCPSocket), "TCPSocketAction", "TCPSocketAction", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ExitCode)) + n += 1 + sovGenerated(uint64(m.Signal)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FinishedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerID) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *HostAlias) String() string { - if this == nil { - return "nil" + +func (m *ContainerStateWaiting) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&HostAlias{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `Hostnames:` + fmt.Sprintf("%v", this.Hostnames) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *HostPathVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *ContainerStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&HostPathVolumeSource{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Type:` + valueToStringGenerated(this.Type) + `,`, - `}`, - }, "") - return s -} -func (this *ISCSIPersistentVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.State.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTerminationState.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 1 + sovGenerated(uint64(m.RestartCount)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImageID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerID) + n += 1 + l + sovGenerated(uint64(l)) + if m.Started != nil { + n += 2 } - s := strings.Join([]string{`&ISCSIPersistentVolumeSource{`, - `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, - `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, - `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, - `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, - `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, - `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, - `}`, - }, "") - return s + return n } -func (this *ISCSIVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *DaemonEndpoint) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ISCSIVolumeSource{`, - `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, - `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, - `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, - `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, - `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, - `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, - `}`, - }, "") - return s + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Port)) + return n } -func (this *KeyToPath) String() string { - if this == nil { - return "nil" + +func (m *DownwardAPIProjection) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&KeyToPath{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Mode:` + valueToStringGenerated(this.Mode) + `,`, - `}`, - }, "") - return s -} -func (this *Lifecycle) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Lifecycle{`, - `PostStart:` + strings.Replace(fmt.Sprintf("%v", this.PostStart), "Handler", "Handler", 1) + `,`, - `PreStop:` + strings.Replace(fmt.Sprintf("%v", this.PreStop), "Handler", "Handler", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *LimitRange) String() string { - if this == nil { - return "nil" + +func (m *DownwardAPIVolumeFile) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&LimitRange{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LimitRangeSpec", "LimitRangeSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *LimitRangeItem) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.FieldRef != nil { + l = m.FieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - keysForMax := make([]string, 0, len(this.Max)) - for k := range this.Max { - keysForMax = append(keysForMax, string(k)) + if m.ResourceFieldRef != nil { + l = m.ResourceFieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMax) - mapStringForMax := "ResourceList{" - for _, k := range keysForMax { - mapStringForMax += fmt.Sprintf("%v: %v,", k, this.Max[ResourceName(k)]) + if m.Mode != nil { + n += 1 + sovGenerated(uint64(*m.Mode)) } - mapStringForMax += "}" - keysForMin := make([]string, 0, len(this.Min)) - for k := range this.Min { - keysForMin = append(keysForMin, string(k)) + return n +} + +func (m *DownwardAPIVolumeSource) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForMin) - mapStringForMin := "ResourceList{" - for _, k := range keysForMin { - mapStringForMin += fmt.Sprintf("%v: %v,", k, this.Min[ResourceName(k)]) + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForMin += "}" - keysForDefault := make([]string, 0, len(this.Default)) - for k := range this.Default { - keysForDefault = append(keysForDefault, string(k)) + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) - mapStringForDefault := "ResourceList{" - for _, k := range keysForDefault { - mapStringForDefault += fmt.Sprintf("%v: %v,", k, this.Default[ResourceName(k)]) + return n +} + +func (m *EmptyDirVolumeSource) Size() (n int) { + if m == nil { + return 0 } - mapStringForDefault += "}" - keysForDefaultRequest := make([]string, 0, len(this.DefaultRequest)) - for k := range this.DefaultRequest { - keysForDefaultRequest = append(keysForDefaultRequest, string(k)) + var l int + _ = l + l = len(m.Medium) + n += 1 + l + sovGenerated(uint64(l)) + if m.SizeLimit != nil { + l = m.SizeLimit.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) - mapStringForDefaultRequest := "ResourceList{" - for _, k := range keysForDefaultRequest { - mapStringForDefaultRequest += fmt.Sprintf("%v: %v,", k, this.DefaultRequest[ResourceName(k)]) + return n +} + +func (m *EndpointAddress) Size() (n int) { + if m == nil { + return 0 } - mapStringForDefaultRequest += "}" - keysForMaxLimitRequestRatio := make([]string, 0, len(this.MaxLimitRequestRatio)) - for k := range this.MaxLimitRequestRatio { - keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetRef != nil { + l = m.TargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) - mapStringForMaxLimitRequestRatio := "ResourceList{" - for _, k := range keysForMaxLimitRequestRatio { - mapStringForMaxLimitRequestRatio += fmt.Sprintf("%v: %v,", k, this.MaxLimitRequestRatio[ResourceName(k)]) + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeName != nil { + l = len(*m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForMaxLimitRequestRatio += "}" - s := strings.Join([]string{`&LimitRangeItem{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Max:` + mapStringForMax + `,`, - `Min:` + mapStringForMin + `,`, - `Default:` + mapStringForDefault + `,`, - `DefaultRequest:` + mapStringForDefaultRequest + `,`, - `MaxLimitRequestRatio:` + mapStringForMaxLimitRequestRatio + `,`, - `}`, - }, "") - return s + return n } -func (this *LimitRangeList) String() string { - if this == nil { - return "nil" + +func (m *EndpointPort) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&LimitRangeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "LimitRange", "LimitRange", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *LimitRangeSpec) String() string { - if this == nil { - return "nil" + +func (m *EndpointSubset) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&LimitRangeSpec{`, - `Limits:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Limits), "LimitRangeItem", "LimitRangeItem", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *List) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&List{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *LoadBalancerIngress) String() string { - if this == nil { - return "nil" + if len(m.NotReadyAddresses) > 0 { + for _, e := range m.NotReadyAddresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&LoadBalancerIngress{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `}`, - }, "") - return s -} -func (this *LoadBalancerStatus) String() string { - if this == nil { - return "nil" + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&LoadBalancerStatus{`, - `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "LoadBalancerIngress", "LoadBalancerIngress", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *LocalObjectReference) String() string { - if this == nil { - return "nil" + +func (m *Endpoints) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&LocalObjectReference{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *LocalVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subsets) > 0 { + for _, e := range m.Subsets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&LocalVolumeSource{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `FSType:` + valueToStringGenerated(this.FSType) + `,`, - `}`, - }, "") - return s + return n } -func (this *NFSVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *EndpointsList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NFSVolumeSource{`, - `Server:` + fmt.Sprintf("%v", this.Server) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *Namespace) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Namespace{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NamespaceSpec", "NamespaceSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NamespaceStatus", "NamespaceStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *NamespaceList) String() string { - if this == nil { - return "nil" + +func (m *EnvFromSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NamespaceList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NamespaceSpec) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Prefix) + n += 1 + l + sovGenerated(uint64(l)) + if m.ConfigMapRef != nil { + l = m.ConfigMapRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NamespaceSpec{`, - `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, - `}`, - }, "") - return s + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *NamespaceStatus) String() string { - if this == nil { - return "nil" + +func (m *EnvVar) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NamespaceStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + if m.ValueFrom != nil { + l = m.ValueFrom.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *Node) String() string { - if this == nil { - return "nil" + +func (m *EnvVarSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Node{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeAddress) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.FieldRef != nil { + l = m.FieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeAddress{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Address:` + fmt.Sprintf("%v", this.Address) + `,`, - `}`, - }, "") - return s -} -func (this *NodeAffinity) String() string { - if this == nil { - return "nil" + if m.ResourceFieldRef != nil { + l = m.ResourceFieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeAffinity{`, - `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "NodeSelector", "NodeSelector", 1) + `,`, - `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "PreferredSchedulingTerm", "PreferredSchedulingTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeCondition) String() string { - if this == nil { - return "nil" + if m.ConfigMapKeyRef != nil { + l = m.ConfigMapKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastHeartbeatTime:` + strings.Replace(strings.Replace(this.LastHeartbeatTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *NodeConfigSource) String() string { - if this == nil { - return "nil" + if m.SecretKeyRef != nil { + l = m.SecretKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeConfigSource{`, - `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapNodeConfigSource", "ConfigMapNodeConfigSource", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *NodeConfigStatus) String() string { - if this == nil { - return "nil" + +func (m *EphemeralContainer) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NodeConfigStatus{`, - `Assigned:` + strings.Replace(fmt.Sprintf("%v", this.Assigned), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `Active:` + strings.Replace(fmt.Sprintf("%v", this.Active), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `LastKnownGood:` + strings.Replace(fmt.Sprintf("%v", this.LastKnownGood), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.EphemeralContainerCommon.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.TargetContainerName) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *NodeDaemonEndpoints) String() string { - if this == nil { - return "nil" + +func (m *EphemeralContainerCommon) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NodeDaemonEndpoints{`, - `KubeletEndpoint:` + strings.Replace(strings.Replace(this.KubeletEndpoint.String(), "DaemonEndpoint", "DaemonEndpoint", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeList) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&NodeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Node", "Node", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeProxyOptions) String() string { - if this == nil { - return "nil" + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&NodeProxyOptions{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s -} -func (this *NodeResources) String() string { - if this == nil { - return "nil" + l = len(m.WorkingDir) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForCapacity += "}" - s := strings.Join([]string{`&NodeResources{`, - `Capacity:` + mapStringForCapacity + `,`, - `}`, - }, "") - return s -} -func (this *NodeSelector) String() string { - if this == nil { - return "nil" + if m.LivenessProbe != nil { + l = m.LivenessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeSelector{`, - `NodeSelectorTerms:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NodeSelectorTerms), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeSelectorRequirement) String() string { - if this == nil { - return "nil" + if m.ReadinessProbe != nil { + l = m.ReadinessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeSelectorRequirement{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s -} -func (this *NodeSelectorTerm) String() string { - if this == nil { - return "nil" + if m.Lifecycle != nil { + l = m.Lifecycle.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeSelectorTerm{`, - `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + `,`, - `MatchFields:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchFields), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeSpec) String() string { - if this == nil { - return "nil" + l = len(m.TerminationMessagePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImagePullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeSpec{`, - `PodCIDR:` + fmt.Sprintf("%v", this.PodCIDR) + `,`, - `DoNotUse_ExternalID:` + fmt.Sprintf("%v", this.DoNotUse_ExternalID) + `,`, - `ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`, - `Unschedulable:` + fmt.Sprintf("%v", this.Unschedulable) + `,`, - `Taints:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Taints), "Taint", "Taint", 1), `&`, ``, 1) + `,`, - `ConfigSource:` + strings.Replace(fmt.Sprintf("%v", this.ConfigSource), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeStatus) String() string { - if this == nil { - return "nil" + n += 3 + n += 3 + n += 3 + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + l = len(m.TerminationMessagePolicy) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.VolumeDevices) > 0 { + for _, e := range m.VolumeDevices { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + if m.StartupProbe != nil { + l = m.StartupProbe.Size() + n += 2 + l + sovGenerated(uint64(l)) } - mapStringForCapacity += "}" - keysForAllocatable := make([]string, 0, len(this.Allocatable)) - for k := range this.Allocatable { - keysForAllocatable = append(keysForAllocatable, string(k)) + return n +} + +func (m *EphemeralContainers) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) - mapStringForAllocatable := "ResourceList{" - for _, k := range keysForAllocatable { - mapStringForAllocatable += fmt.Sprintf("%v: %v,", k, this.Allocatable[ResourceName(k)]) + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.EphemeralContainers) > 0 { + for _, e := range m.EphemeralContainers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForAllocatable += "}" - s := strings.Join([]string{`&NodeStatus{`, - `Capacity:` + mapStringForCapacity + `,`, - `Allocatable:` + mapStringForAllocatable + `,`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "NodeCondition", "NodeCondition", 1), `&`, ``, 1) + `,`, - `Addresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Addresses), "NodeAddress", "NodeAddress", 1), `&`, ``, 1) + `,`, - `DaemonEndpoints:` + strings.Replace(strings.Replace(this.DaemonEndpoints.String(), "NodeDaemonEndpoints", "NodeDaemonEndpoints", 1), `&`, ``, 1) + `,`, - `NodeInfo:` + strings.Replace(strings.Replace(this.NodeInfo.String(), "NodeSystemInfo", "NodeSystemInfo", 1), `&`, ``, 1) + `,`, - `Images:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Images), "ContainerImage", "ContainerImage", 1), `&`, ``, 1) + `,`, - `VolumesInUse:` + fmt.Sprintf("%v", this.VolumesInUse) + `,`, - `VolumesAttached:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumesAttached), "AttachedVolume", "AttachedVolume", 1), `&`, ``, 1) + `,`, - `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "NodeConfigStatus", "NodeConfigStatus", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *NodeSystemInfo) String() string { - if this == nil { - return "nil" + +func (m *Event) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NodeSystemInfo{`, - `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, - `SystemUUID:` + fmt.Sprintf("%v", this.SystemUUID) + `,`, - `BootID:` + fmt.Sprintf("%v", this.BootID) + `,`, - `KernelVersion:` + fmt.Sprintf("%v", this.KernelVersion) + `,`, - `OSImage:` + fmt.Sprintf("%v", this.OSImage) + `,`, - `ContainerRuntimeVersion:` + fmt.Sprintf("%v", this.ContainerRuntimeVersion) + `,`, - `KubeletVersion:` + fmt.Sprintf("%v", this.KubeletVersion) + `,`, - `KubeProxyVersion:` + fmt.Sprintf("%v", this.KubeProxyVersion) + `,`, - `OperatingSystem:` + fmt.Sprintf("%v", this.OperatingSystem) + `,`, - `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, - `}`, - }, "") - return s -} -func (this *ObjectFieldSelector) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.InvolvedObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FirstTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.EventTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Series != nil { + l = m.Series.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ObjectFieldSelector{`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, - `}`, - }, "") - return s -} -func (this *ObjectReference) String() string { - if this == nil { - return "nil" + l = len(m.Action) + n += 1 + l + sovGenerated(uint64(l)) + if m.Related != nil { + l = m.Related.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ObjectReference{`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, - `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, - `}`, - }, "") - return s + l = len(m.ReportingController) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ReportingInstance) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PersistentVolume) String() string { - if this == nil { - return "nil" + +func (m *EventList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PersistentVolume{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeSpec", "PersistentVolumeSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeStatus", "PersistentVolumeStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaim) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PersistentVolumeClaim{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeClaimSpec", "PersistentVolumeClaimSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeClaimStatus", "PersistentVolumeClaimStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *PersistentVolumeClaimCondition) String() string { - if this == nil { - return "nil" + +func (m *EventSeries) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PersistentVolumeClaimCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Count)) + l = m.LastObservedTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.State) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PersistentVolumeClaimList) String() string { - if this == nil { - return "nil" + +func (m *EventSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PersistentVolumeClaimList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PersistentVolumeClaim", "PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Component) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PersistentVolumeClaimSpec) String() string { - if this == nil { - return "nil" + +func (m *ExecAction) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PersistentVolumeClaimSpec{`, - `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, - `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, - `DataSource:` + strings.Replace(fmt.Sprintf("%v", this.DataSource), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *PersistentVolumeClaimStatus) String() string { - if this == nil { - return "nil" + +func (m *FCVolumeSource) Size() (n int) { + if m == nil { + return 0 } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + var l int + _ = l + if len(m.TargetWWNs) > 0 { + for _, s := range m.TargetWWNs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + if m.Lun != nil { + n += 1 + sovGenerated(uint64(*m.Lun)) } - mapStringForCapacity += "}" - s := strings.Join([]string{`&PersistentVolumeClaimStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `Capacity:` + mapStringForCapacity + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "PersistentVolumeClaimCondition", "PersistentVolumeClaimCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaimVolumeSource) String() string { - if this == nil { - return "nil" + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.WWIDs) > 0 { + for _, s := range m.WWIDs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PersistentVolumeClaimVolumeSource{`, - `ClaimName:` + fmt.Sprintf("%v", this.ClaimName) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + return n } -func (this *PersistentVolumeList) String() string { - if this == nil { - return "nil" + +func (m *FlexPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PersistentVolumeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PersistentVolume", "PersistentVolume", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n } -func (this *PersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *FlexVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PersistentVolumeSource{`, - `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, - `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, - `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, - `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsPersistentVolumeSource", "GlusterfsPersistentVolumeSource", 1) + `,`, - `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, - `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDPersistentVolumeSource", "RBDPersistentVolumeSource", 1) + `,`, - `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIPersistentVolumeSource", "ISCSIPersistentVolumeSource", 1) + `,`, - `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderPersistentVolumeSource", "CinderPersistentVolumeSource", 1) + `,`, - `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSPersistentVolumeSource", "CephFSPersistentVolumeSource", 1) + `,`, - `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, - `Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, - `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexPersistentVolumeSource", "FlexPersistentVolumeSource", 1) + `,`, - `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFilePersistentVolumeSource", "AzureFilePersistentVolumeSource", 1) + `,`, - `VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, - `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, - `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, - `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, - `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, - `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOPersistentVolumeSource", "ScaleIOPersistentVolumeSource", 1) + `,`, - `Local:` + strings.Replace(fmt.Sprintf("%v", this.Local), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`, - `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`, - `CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIPersistentVolumeSource", "CSIPersistentVolumeSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeSpec) String() string { - if this == nil { - return "nil" - } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + n += 2 + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForCapacity += "}" - s := strings.Join([]string{`&PersistentVolumeSpec{`, - `Capacity:` + mapStringForCapacity + `,`, - `PersistentVolumeSource:` + strings.Replace(strings.Replace(this.PersistentVolumeSource.String(), "PersistentVolumeSource", "PersistentVolumeSource", 1), `&`, ``, 1) + `,`, - `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `ClaimRef:` + strings.Replace(fmt.Sprintf("%v", this.ClaimRef), "ObjectReference", "ObjectReference", 1) + `,`, - `PersistentVolumeReclaimPolicy:` + fmt.Sprintf("%v", this.PersistentVolumeReclaimPolicy) + `,`, - `StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`, - `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, - `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, - `NodeAffinity:` + strings.Replace(fmt.Sprintf("%v", this.NodeAffinity), "VolumeNodeAffinity", "VolumeNodeAffinity", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *PersistentVolumeStatus) String() string { - if this == nil { - return "nil" + +func (m *FlockerVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PersistentVolumeStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.DatasetName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DatasetUUID) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PhotonPersistentDiskVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *GCEPersistentDiskVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PhotonPersistentDiskVolumeSource{`, - `PdID:` + fmt.Sprintf("%v", this.PdID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.PDName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Partition)) + n += 2 + return n } -func (this *Pod) String() string { - if this == nil { - return "nil" + +func (m *GitRepoVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Pod{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Repository) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Revision) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Directory) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PodAffinity) String() string { - if this == nil { - return "nil" + +func (m *GlusterfsPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodAffinity{`, - `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, - `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodAffinityTerm) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.EndpointsName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.EndpointsNamespace != nil { + l = len(*m.EndpointsNamespace) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodAffinityTerm{`, - `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, - `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodAntiAffinity) String() string { - if this == nil { - return "nil" + +func (m *GlusterfsVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodAntiAffinity{`, - `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, - `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.EndpointsName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n } -func (this *PodAttachOptions) String() string { - if this == nil { - return "nil" + +func (m *HTTPGetAction) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodAttachOptions{`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, - `Container:` + fmt.Sprintf("%v", this.Container) + `,`, - `}`, - }, "") - return s -} -func (this *PodCondition) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Scheme) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.HTTPHeaders) > 0 { + for _, e := range m.HTTPHeaders { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PodCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodDNSConfig) String() string { - if this == nil { - return "nil" + +func (m *HTTPHeader) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodDNSConfig{`, - `Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`, - `Searches:` + fmt.Sprintf("%v", this.Searches) + `,`, - `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "PodDNSConfigOption", "PodDNSConfigOption", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PodDNSConfigOption) String() string { - if this == nil { - return "nil" + +func (m *Handler) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodDNSConfigOption{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + valueToStringGenerated(this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *PodExecOptions) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.Exec != nil { + l = m.Exec.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodExecOptions{`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, - `Container:` + fmt.Sprintf("%v", this.Container) + `,`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `}`, - }, "") - return s -} -func (this *PodList) String() string { - if this == nil { - return "nil" + if m.HTTPGet != nil { + l = m.HTTPGet.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Pod", "Pod", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodLogOptions) String() string { - if this == nil { - return "nil" + if m.TCPSocket != nil { + l = m.TCPSocket.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodLogOptions{`, - `Container:` + fmt.Sprintf("%v", this.Container) + `,`, - `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, - `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, - `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, - `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, - `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, - `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodPortForwardOptions) String() string { - if this == nil { - return "nil" + +func (m *HostAlias) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodPortForwardOptions{`, - `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, - `}`, - }, "") - return s -} -func (this *PodProxyOptions) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Hostnames) > 0 { + for _, s := range m.Hostnames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PodProxyOptions{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodReadinessGate) String() string { - if this == nil { - return "nil" + +func (m *HostPathVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodReadinessGate{`, - `ConditionType:` + fmt.Sprintf("%v", this.ConditionType) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityContext) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.Type != nil { + l = len(*m.Type) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodSecurityContext{`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "SELinuxOptions", 1) + `,`, - `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, - `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, - `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, - `FSGroup:` + valueToStringGenerated(this.FSGroup) + `,`, - `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, - `Sysctls:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Sysctls), "Sysctl", "Sysctl", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodSignature) String() string { - if this == nil { - return "nil" + +func (m *ISCSIPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodSignature{`, - `PodController:` + strings.Replace(fmt.Sprintf("%v", this.PodController), "OwnerReference", "k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSpec) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.TargetPortal) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IQN) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Lun)) + l = len(m.ISCSIInterface) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Portals) > 0 { + for _, s := range m.Portals { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) - for k := range this.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, k) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - mapStringForNodeSelector := "map[string]string{" - for _, k := range keysForNodeSelector { - mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + n += 2 + if m.InitiatorName != nil { + l = len(*m.InitiatorName) + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForNodeSelector += "}" - s := strings.Join([]string{`&PodSpec{`, - `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "Volume", 1), `&`, ``, 1) + `,`, - `Containers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Containers), "Container", "Container", 1), `&`, ``, 1) + `,`, - `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, - `TerminationGracePeriodSeconds:` + valueToStringGenerated(this.TerminationGracePeriodSeconds) + `,`, - `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, - `DNSPolicy:` + fmt.Sprintf("%v", this.DNSPolicy) + `,`, - `NodeSelector:` + mapStringForNodeSelector + `,`, - `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, - `DeprecatedServiceAccount:` + fmt.Sprintf("%v", this.DeprecatedServiceAccount) + `,`, - `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, - `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, - `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, - `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "PodSecurityContext", 1) + `,`, - `ImagePullSecrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImagePullSecrets), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, - `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "Affinity", 1) + `,`, - `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, - `InitContainers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainers), "Container", "Container", 1), `&`, ``, 1) + `,`, - `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, - `Tolerations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Tolerations), "Toleration", "Toleration", 1), `&`, ``, 1) + `,`, - `HostAliases:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostAliases), "HostAlias", "HostAlias", 1), `&`, ``, 1) + `,`, - `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, - `Priority:` + valueToStringGenerated(this.Priority) + `,`, - `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "PodDNSConfig", "PodDNSConfig", 1) + `,`, - `ShareProcessNamespace:` + valueToStringGenerated(this.ShareProcessNamespace) + `,`, - `ReadinessGates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ReadinessGates), "PodReadinessGate", "PodReadinessGate", 1), `&`, ``, 1) + `,`, - `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`, - `EnableServiceLinks:` + valueToStringGenerated(this.EnableServiceLinks) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodStatus) String() string { - if this == nil { - return "nil" + +func (m *ISCSIVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "PodCondition", "PodCondition", 1), `&`, ``, 1) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, - `PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`, - `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `ContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, - `QOSClass:` + fmt.Sprintf("%v", this.QOSClass) + `,`, - `InitContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, - `NominatedNodeName:` + fmt.Sprintf("%v", this.NominatedNodeName) + `,`, - `}`, - }, "") - return s -} -func (this *PodStatusResult) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.TargetPortal) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IQN) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Lun)) + l = len(m.ISCSIInterface) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Portals) > 0 { + for _, s := range m.Portals { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PodStatusResult{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodTemplate) String() string { - if this == nil { - return "nil" + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodTemplateList) String() string { - if this == nil { - return "nil" + n += 2 + if m.InitiatorName != nil { + l = len(*m.InitiatorName) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodTemplateList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodTemplate", "PodTemplate", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodTemplateSpec) String() string { - if this == nil { - return "nil" + +func (m *KeyToPath) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PortworxVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.Mode != nil { + n += 1 + sovGenerated(uint64(*m.Mode)) } - s := strings.Join([]string{`&PortworxVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + return n } -func (this *Preconditions) String() string { - if this == nil { - return "nil" + +func (m *Lifecycle) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Preconditions{`, - `UID:` + valueToStringGenerated(this.UID) + `,`, - `}`, - }, "") - return s -} -func (this *PreferAvoidPodsEntry) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.PostStart != nil { + l = m.PostStart.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PreferAvoidPodsEntry{`, - `PodSignature:` + strings.Replace(strings.Replace(this.PodSignature.String(), "PodSignature", "PodSignature", 1), `&`, ``, 1) + `,`, - `EvictionTime:` + strings.Replace(strings.Replace(this.EvictionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *PreferredSchedulingTerm) String() string { - if this == nil { - return "nil" + if m.PreStop != nil { + l = m.PreStop.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PreferredSchedulingTerm{`, - `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, - `Preference:` + strings.Replace(strings.Replace(this.Preference.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *Probe) String() string { - if this == nil { - return "nil" + +func (m *LimitRange) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Probe{`, - `Handler:` + strings.Replace(strings.Replace(this.Handler.String(), "Handler", "Handler", 1), `&`, ``, 1) + `,`, - `InitialDelaySeconds:` + fmt.Sprintf("%v", this.InitialDelaySeconds) + `,`, - `TimeoutSeconds:` + fmt.Sprintf("%v", this.TimeoutSeconds) + `,`, - `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, - `SuccessThreshold:` + fmt.Sprintf("%v", this.SuccessThreshold) + `,`, - `FailureThreshold:` + fmt.Sprintf("%v", this.FailureThreshold) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ProjectedVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *LimitRangeItem) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ProjectedVolumeSource{`, - `Sources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Sources), "VolumeProjection", "VolumeProjection", 1), `&`, ``, 1) + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `}`, - }, "") - return s -} -func (this *QuobyteVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Max) > 0 { + for k, v := range m.Max { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&QuobyteVolumeSource{`, - `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, - `Volume:` + fmt.Sprintf("%v", this.Volume) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `Group:` + fmt.Sprintf("%v", this.Group) + `,`, - `Tenant:` + fmt.Sprintf("%v", this.Tenant) + `,`, - `}`, - }, "") - return s -} -func (this *RBDPersistentVolumeSource) String() string { - if this == nil { - return "nil" + if len(m.Min) > 0 { + for k, v := range m.Min { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&RBDPersistentVolumeSource{`, - `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, - `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, - `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, - `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *RBDVolumeSource) String() string { - if this == nil { - return "nil" + if len(m.Default) > 0 { + for k, v := range m.Default { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&RBDVolumeSource{`, - `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, - `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, - `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, - `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *RangeAllocation) String() string { - if this == nil { - return "nil" + if len(m.DefaultRequest) > 0 { + for k, v := range m.DefaultRequest { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&RangeAllocation{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Range:` + fmt.Sprintf("%v", this.Range) + `,`, - `Data:` + valueToStringGenerated(this.Data) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicationController) String() string { - if this == nil { - return "nil" + if len(m.MaxLimitRequestRatio) > 0 { + for k, v := range m.MaxLimitRequestRatio { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&ReplicationController{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicationControllerSpec", "ReplicationControllerSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicationControllerStatus", "ReplicationControllerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ReplicationControllerCondition) String() string { - if this == nil { - return "nil" + +func (m *LimitRangeList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ReplicationControllerCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicationControllerList) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ReplicationControllerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicationController", "ReplicationController", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ReplicationControllerSpec) String() string { - if this == nil { - return "nil" - } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) + +func (m *LimitRangeSpec) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + var l int + _ = l + if len(m.Limits) > 0 { + for _, e := range m.Limits { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForSelector += "}" - s := strings.Join([]string{`&ReplicationControllerSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + mapStringForSelector + `,`, - `Template:` + strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "PodTemplateSpec", 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `}`, - }, "") - return s + return n } -func (this *ReplicationControllerStatus) String() string { - if this == nil { - return "nil" + +func (m *List) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ReplicationControllerStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicationControllerCondition", "ReplicationControllerCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceFieldSelector) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ResourceFieldSelector{`, - `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, - `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, - `Divisor:` + strings.Replace(strings.Replace(this.Divisor.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ResourceQuota) String() string { - if this == nil { - return "nil" + +func (m *LoadBalancerIngress) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ResourceQuota{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceQuotaSpec", "ResourceQuotaSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceQuotaStatus", "ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ResourceQuotaList) String() string { - if this == nil { - return "nil" + +func (m *LoadBalancerStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ResourceQuotaList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ResourceQuota", "ResourceQuota", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *ResourceQuotaSpec) String() string { - if this == nil { - return "nil" + +func (m *LocalObjectReference) Size() (n int) { + if m == nil { + return 0 } - keysForHard := make([]string, 0, len(this.Hard)) - for k := range this.Hard { - keysForHard = append(keysForHard, string(k)) + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LocalVolumeSource) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - mapStringForHard := "ResourceList{" - for _, k := range keysForHard { - mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.FSType != nil { + l = len(*m.FSType) + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForHard += "}" - s := strings.Join([]string{`&ResourceQuotaSpec{`, - `Hard:` + mapStringForHard + `,`, - `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, - `ScopeSelector:` + strings.Replace(fmt.Sprintf("%v", this.ScopeSelector), "ScopeSelector", "ScopeSelector", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ResourceQuotaStatus) String() string { - if this == nil { - return "nil" + +func (m *NFSVolumeSource) Size() (n int) { + if m == nil { + return 0 } - keysForHard := make([]string, 0, len(this.Hard)) - for k := range this.Hard { - keysForHard = append(keysForHard, string(k)) + var l int + _ = l + l = len(m.Server) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Namespace) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - mapStringForHard := "ResourceList{" - for _, k := range keysForHard { - mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamespaceCondition) Size() (n int) { + if m == nil { + return 0 } - mapStringForHard += "}" - keysForUsed := make([]string, 0, len(this.Used)) - for k := range this.Used { - keysForUsed = append(keysForUsed, string(k)) + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamespaceList) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) - mapStringForUsed := "ResourceList{" - for _, k := range keysForUsed { - mapStringForUsed += fmt.Sprintf("%v: %v,", k, this.Used[ResourceName(k)]) + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForUsed += "}" - s := strings.Join([]string{`&ResourceQuotaStatus{`, - `Hard:` + mapStringForHard + `,`, - `Used:` + mapStringForUsed + `,`, - `}`, - }, "") - return s + return n } -func (this *ResourceRequirements) String() string { - if this == nil { - return "nil" - } - keysForLimits := make([]string, 0, len(this.Limits)) - for k := range this.Limits { - keysForLimits = append(keysForLimits, string(k)) + +func (m *NamespaceSpec) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) - mapStringForLimits := "ResourceList{" - for _, k := range keysForLimits { - mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)]) + var l int + _ = l + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForLimits += "}" - keysForRequests := make([]string, 0, len(this.Requests)) - for k := range this.Requests { - keysForRequests = append(keysForRequests, string(k)) + return n +} + +func (m *NamespaceStatus) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) - mapStringForRequests := "ResourceList{" - for _, k := range keysForRequests { - mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)]) + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForRequests += "}" - s := strings.Join([]string{`&ResourceRequirements{`, - `Limits:` + mapStringForLimits + `,`, - `Requests:` + mapStringForRequests + `,`, - `}`, - }, "") - return s + return n } -func (this *SELinuxOptions) String() string { - if this == nil { - return "nil" + +func (m *Node) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SELinuxOptions{`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `Role:` + fmt.Sprintf("%v", this.Role) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Level:` + fmt.Sprintf("%v", this.Level) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ScaleIOPersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *NodeAddress) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ScaleIOPersistentVolumeSource{`, - `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, - `System:` + fmt.Sprintf("%v", this.System) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, - `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, - `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, - `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Address) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ScaleIOVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *NodeAffinity) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ScaleIOVolumeSource{`, - `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, - `System:` + fmt.Sprintf("%v", this.System) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, - `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, - `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, - `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *ScopeSelector) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { + l = m.RequiredDuringSchedulingIgnoredDuringExecution.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ScopeSelector{`, - `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "ScopedResourceSelectorRequirement", "ScopedResourceSelectorRequirement", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *ScopedResourceSelectorRequirement) String() string { - if this == nil { - return "nil" + +func (m *NodeCondition) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ScopedResourceSelectorRequirement{`, - `ScopeName:` + fmt.Sprintf("%v", this.ScopeName) + `,`, - `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastHeartbeatTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *Secret) String() string { - if this == nil { - return "nil" + +func (m *NodeConfigSource) Size() (n int) { + if m == nil { + return 0 } - keysForData := make([]string, 0, len(this.Data)) - for k := range this.Data { - keysForData = append(keysForData, k) + var l int + _ = l + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - mapStringForData := "map[string][]byte{" - for _, k := range keysForData { - mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + return n +} + +func (m *NodeConfigStatus) Size() (n int) { + if m == nil { + return 0 } - mapStringForData += "}" - keysForStringData := make([]string, 0, len(this.StringData)) - for k := range this.StringData { - keysForStringData = append(keysForStringData, k) + var l int + _ = l + if m.Assigned != nil { + l = m.Assigned.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) - mapStringForStringData := "map[string]string{" - for _, k := range keysForStringData { - mapStringForStringData += fmt.Sprintf("%v: %v,", k, this.StringData[k]) + if m.Active != nil { + l = m.Active.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForStringData += "}" - s := strings.Join([]string{`&Secret{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + mapStringForData + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `StringData:` + mapStringForStringData + `,`, - `}`, - }, "") - return s -} -func (this *SecretEnvSource) String() string { - if this == nil { - return "nil" + if m.LastKnownGood != nil { + l = m.LastKnownGood.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&SecretEnvSource{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *SecretKeySelector) String() string { - if this == nil { - return "nil" + +func (m *NodeDaemonEndpoints) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SecretKeySelector{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.KubeletEndpoint.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *SecretList) String() string { - if this == nil { - return "nil" + +func (m *NodeList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SecretList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Secret", "Secret", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SecretProjection) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&SecretProjection{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s + return n } -func (this *SecretReference) String() string { - if this == nil { - return "nil" + +func (m *NodeProxyOptions) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SecretReference{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *SecretVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *NodeResources) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SecretVolumeSource{`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *SecurityContext) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&SecurityContext{`, - `Capabilities:` + strings.Replace(fmt.Sprintf("%v", this.Capabilities), "Capabilities", "Capabilities", 1) + `,`, - `Privileged:` + valueToStringGenerated(this.Privileged) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "SELinuxOptions", 1) + `,`, - `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, - `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, - `ReadOnlyRootFilesystem:` + valueToStringGenerated(this.ReadOnlyRootFilesystem) + `,`, - `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, - `ProcMount:` + valueToStringGenerated(this.ProcMount) + `,`, - `}`, - }, "") - return s + return n } -func (this *SerializedReference) String() string { - if this == nil { - return "nil" + +func (m *NodeSelector) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SerializedReference{`, - `Reference:` + strings.Replace(strings.Replace(this.Reference.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Service) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.NodeSelectorTerms) > 0 { + for _, e := range m.NodeSelectorTerms { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Service{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceStatus", "ServiceStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ServiceAccount) String() string { - if this == nil { - return "nil" + +func (m *NodeSelectorRequirement) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ServiceAccount{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Secrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Secrets), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `ImagePullSecrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImagePullSecrets), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceAccountList) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ServiceAccountList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ServiceAccount", "ServiceAccount", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ServiceAccountTokenProjection) String() string { - if this == nil { - return "nil" + +func (m *NodeSelectorTerm) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ServiceAccountTokenProjection{`, - `Audience:` + fmt.Sprintf("%v", this.Audience) + `,`, - `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceList) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ServiceList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Service", "Service", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ServicePort) String() string { - if this == nil { - return "nil" + if len(m.MatchFields) > 0 { + for _, e := range m.MatchFields { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ServicePort{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `TargetPort:` + strings.Replace(strings.Replace(this.TargetPort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`, - `}`, - }, "") - return s + return n } -func (this *ServiceProxyOptions) String() string { - if this == nil { - return "nil" + +func (m *NodeSpec) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ServiceProxyOptions{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceSpec) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.PodCIDR) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DoNotUse_ExternalID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ProviderID) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Taints) > 0 { + for _, e := range m.Taints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) + if m.ConfigSource != nil { + l = m.ConfigSource.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + if len(m.PodCIDRs) > 0 { + for _, s := range m.PodCIDRs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForSelector += "}" - s := strings.Join([]string{`&ServiceSpec{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "ServicePort", "ServicePort", 1), `&`, ``, 1) + `,`, - `Selector:` + mapStringForSelector + `,`, - `ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `ExternalIPs:` + fmt.Sprintf("%v", this.ExternalIPs) + `,`, - `SessionAffinity:` + fmt.Sprintf("%v", this.SessionAffinity) + `,`, - `LoadBalancerIP:` + fmt.Sprintf("%v", this.LoadBalancerIP) + `,`, - `LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`, - `ExternalName:` + fmt.Sprintf("%v", this.ExternalName) + `,`, - `ExternalTrafficPolicy:` + fmt.Sprintf("%v", this.ExternalTrafficPolicy) + `,`, - `HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`, - `PublishNotReadyAddresses:` + fmt.Sprintf("%v", this.PublishNotReadyAddresses) + `,`, - `SessionAffinityConfig:` + strings.Replace(fmt.Sprintf("%v", this.SessionAffinityConfig), "SessionAffinityConfig", "SessionAffinityConfig", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ServiceStatus) String() string { - if this == nil { - return "nil" + +func (m *NodeStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ServiceStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "LoadBalancerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SessionAffinityConfig) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&SessionAffinityConfig{`, - `ClientIP:` + strings.Replace(fmt.Sprintf("%v", this.ClientIP), "ClientIPConfig", "ClientIPConfig", 1) + `,`, - `}`, - }, "") - return s -} -func (this *StorageOSPersistentVolumeSource) String() string { - if this == nil { - return "nil" + if len(m.Allocatable) > 0 { + for k, v := range m.Allocatable { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&StorageOSPersistentVolumeSource{`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "ObjectReference", "ObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *StorageOSVolumeSource) String() string { - if this == nil { - return "nil" + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&StorageOSVolumeSource{`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Sysctl) String() string { - if this == nil { - return "nil" + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Sysctl{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *TCPSocketAction) String() string { - if this == nil { - return "nil" + l = m.DaemonEndpoints.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.NodeInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&TCPSocketAction{`, - `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `}`, - }, "") - return s -} -func (this *Taint) String() string { - if this == nil { - return "nil" + if len(m.VolumesInUse) > 0 { + for _, s := range m.VolumesInUse { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Taint{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, - `TimeAdded:` + strings.Replace(fmt.Sprintf("%v", this.TimeAdded), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Toleration) String() string { - if this == nil { - return "nil" + if len(m.VolumesAttached) > 0 { + for _, e := range m.VolumesAttached { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Toleration{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, - `TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`, - `}`, - }, "") - return s + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *TopologySelectorLabelRequirement) String() string { - if this == nil { - return "nil" + +func (m *NodeSystemInfo) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&TopologySelectorLabelRequirement{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.MachineID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SystemUUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.BootID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KernelVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OSImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerRuntimeVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeletVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeProxyVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OperatingSystem) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Architecture) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *TopologySelectorTerm) String() string { - if this == nil { - return "nil" + +func (m *ObjectFieldSelector) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&TopologySelectorTerm{`, - `MatchLabelExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchLabelExpressions), "TopologySelectorLabelRequirement", "TopologySelectorLabelRequirement", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *TypedLocalObjectReference) String() string { - if this == nil { - return "nil" + +func (m *ObjectReference) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&TypedLocalObjectReference{`, - `APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *Volume) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolume) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Volume{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `VolumeSource:` + strings.Replace(strings.Replace(this.VolumeSource.String(), "VolumeSource", "VolumeSource", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *VolumeDevice) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaim) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VolumeDevice{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *VolumeMount) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaimCondition) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VolumeMount{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, - `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, - `MountPropagation:` + valueToStringGenerated(this.MountPropagation) + `,`, - `SubPathExpr:` + fmt.Sprintf("%v", this.SubPathExpr) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *VolumeNodeAffinity) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaimList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VolumeNodeAffinity{`, - `Required:` + strings.Replace(fmt.Sprintf("%v", this.Required), "NodeSelector", "NodeSelector", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *VolumeProjection) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaimSpec) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VolumeProjection{`, - `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretProjection", "SecretProjection", 1) + `,`, - `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, - `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, - `ServiceAccountToken:` + strings.Replace(fmt.Sprintf("%v", this.ServiceAccountToken), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.StorageClassName != nil { + l = len(*m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DataSource != nil { + l = m.DataSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *VolumeSource) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaimStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VolumeSource{`, - `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, - `EmptyDir:` + strings.Replace(fmt.Sprintf("%v", this.EmptyDir), "EmptyDirVolumeSource", "EmptyDirVolumeSource", 1) + `,`, - `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, - `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, - `GitRepo:` + strings.Replace(fmt.Sprintf("%v", this.GitRepo), "GitRepoVolumeSource", "GitRepoVolumeSource", 1) + `,`, - `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretVolumeSource", "SecretVolumeSource", 1) + `,`, - `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, - `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, - `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, - `PersistentVolumeClaim:` + strings.Replace(fmt.Sprintf("%v", this.PersistentVolumeClaim), "PersistentVolumeClaimVolumeSource", "PersistentVolumeClaimVolumeSource", 1) + `,`, - `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDVolumeSource", "RBDVolumeSource", 1) + `,`, - `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexVolumeSource", "FlexVolumeSource", 1) + `,`, - `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, - `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSVolumeSource", "CephFSVolumeSource", 1) + `,`, - `Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, - `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIVolumeSource", "DownwardAPIVolumeSource", 1) + `,`, - `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, - `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFileVolumeSource", "AzureFileVolumeSource", 1) + `,`, - `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapVolumeSource", "ConfigMapVolumeSource", 1) + `,`, - `VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, - `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, - `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, - `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, - `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, - `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, - `Projected:` + strings.Replace(fmt.Sprintf("%v", this.Projected), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`, - `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`, - `CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIVolumeSource", "CSIVolumeSource", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *VsphereVirtualDiskVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaimVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VsphereVirtualDiskVolumeSource{`, - `VolumePath:` + fmt.Sprintf("%v", this.VolumePath) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `StoragePolicyName:` + fmt.Sprintf("%v", this.StoragePolicyName) + `,`, - `StoragePolicyID:` + fmt.Sprintf("%v", this.StoragePolicyID) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.ClaimName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n } -func (this *WeightedPodAffinityTerm) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&WeightedPodAffinityTerm{`, - `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, - `PodAffinityTerm:` + strings.Replace(strings.Replace(this.PodAffinityTerm.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + +func (m *PersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + var l int + _ = l + if m.GCEPersistentDisk != nil { + l = m.GCEPersistentDisk.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AWSElasticBlockStore != nil { + l = m.AWSElasticBlockStore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HostPath != nil { + l = m.HostPath.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Glusterfs != nil { + l = m.Glusterfs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NFS != nil { + l = m.NFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RBD != nil { + l = m.RBD.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ISCSI != nil { + l = m.ISCSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Cinder != nil { + l = m.Cinder.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CephFS != nil { + l = m.CephFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FC != nil { + l = m.FC.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Flocker != nil { + l = m.Flocker.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FlexVolume != nil { + l = m.FlexVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AzureFile != nil { + l = m.AzureFile.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VsphereVolume != nil { + l = m.VsphereVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Quobyte != nil { + l = m.Quobyte.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AzureDisk != nil { + l = m.AzureDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PhotonPersistentDisk != nil { + l = m.PhotonPersistentDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PortworxVolume != nil { + l = m.PortworxVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ScaleIO != nil { + l = m.ScaleIO.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Local != nil { + l = m.Local.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.StorageOS != nil { + l = m.StorageOS.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.CSI != nil { + l = m.CSI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PersistentVolumeSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.PersistentVolumeSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ClaimRef != nil { + l = m.ClaimRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.PersistentVolumeReclaimPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeAffinity != nil { + l = m.NodeAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PersistentVolumeStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PhotonPersistentDiskVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PdID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Pod) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodAffinity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodAffinityTerm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.TopologyKey) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodAntiAffinity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodAttachOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodDNSConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Searches) > 0 { + for _, s := range m.Searches { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodDNSConfigOption) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodExecOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodIP) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodLogOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.SinceSeconds != nil { + n += 1 + sovGenerated(uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + l = m.SinceTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.TailLines != nil { + n += 1 + sovGenerated(uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + n += 1 + sovGenerated(uint64(*m.LimitBytes)) + } + return n +} + +func (m *PodPortForwardOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + n += 1 + sovGenerated(uint64(e)) + } + } + return n +} + +func (m *PodProxyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodReadinessGate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConditionType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUser != nil { + n += 1 + sovGenerated(uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + n += 2 + } + if len(m.SupplementalGroups) > 0 { + for _, e := range m.SupplementalGroups { + n += 1 + sovGenerated(uint64(e)) + } + } + if m.FSGroup != nil { + n += 1 + sovGenerated(uint64(*m.FSGroup)) + } + if m.RunAsGroup != nil { + n += 1 + sovGenerated(uint64(*m.RunAsGroup)) + } + if len(m.Sysctls) > 0 { + for _, e := range m.Sysctls { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.WindowsOptions != nil { + l = m.WindowsOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodSignature) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PodController != nil { + l = m.PodController.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RestartPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.TerminationGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds)) + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + l = len(m.DNSPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeprecatedServiceAccount) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + n += 2 + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Hostname) + n += 2 + l + sovGenerated(uint64(l)) + l = len(m.Subdomain) + n += 2 + l + sovGenerated(uint64(l)) + if m.Affinity != nil { + l = m.Affinity.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + l = len(m.SchedulerName) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.InitContainers) > 0 { + for _, e := range m.InitContainers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.AutomountServiceAccountToken != nil { + n += 3 + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.HostAliases) > 0 { + for _, e := range m.HostAliases { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + l = len(m.PriorityClassName) + n += 2 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + n += 2 + sovGenerated(uint64(*m.Priority)) + } + if m.DNSConfig != nil { + l = m.DNSConfig.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ShareProcessNamespace != nil { + n += 3 + } + if len(m.ReadinessGates) > 0 { + for _, e := range m.ReadinessGates { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.RuntimeClassName != nil { + l = len(*m.RuntimeClassName) + n += 2 + l + sovGenerated(uint64(l)) + } + if m.EnableServiceLinks != nil { + n += 3 + } + if m.PreemptionPolicy != nil { + l = len(*m.PreemptionPolicy) + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.Overhead) > 0 { + for k, v := range m.Overhead { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.TopologySpreadConstraints) > 0 { + for _, e := range m.TopologySpreadConstraints { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.EphemeralContainers) > 0 { + for _, e := range m.EphemeralContainers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodIP) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartTime != nil { + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ContainerStatuses) > 0 { + for _, e := range m.ContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.QOSClass) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.InitContainerStatuses) > 0 { + for _, e := range m.InitContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.NominatedNodeName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.PodIPs) > 0 { + for _, e := range m.PodIPs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.EphemeralContainerStatuses) > 0 { + for _, e := range m.EphemeralContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodStatusResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodTemplateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PortworxVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Preconditions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UID != nil { + l = len(*m.UID) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PreferAvoidPodsEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PodSignature.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.EvictionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PreferredSchedulingTerm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Weight)) + l = m.Preference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Probe) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Handler.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.InitialDelaySeconds)) + n += 1 + sovGenerated(uint64(m.TimeoutSeconds)) + n += 1 + sovGenerated(uint64(m.PeriodSeconds)) + n += 1 + sovGenerated(uint64(m.SuccessThreshold)) + n += 1 + sovGenerated(uint64(m.FailureThreshold)) + return n +} + +func (m *ProjectedVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Sources) > 0 { + for _, e := range m.Sources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + return n +} + +func (m *QuobyteVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Registry) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Volume) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Tenant) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RBDPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RBDImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RBDPool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RadosUser) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Keyring) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *RBDVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RBDImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RBDPool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RadosUser) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Keyring) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *RangeAllocation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Range) + n += 1 + l + sovGenerated(uint64(l)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ReplicationController) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicationControllerCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicationControllerList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ReplicationControllerSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Template != nil { + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n +} + +func (m *ReplicationControllerStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceFieldSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Divisor.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceQuota) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceQuotaList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceQuotaSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hard) > 0 { + for k, v := range m.Hard { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ScopeSelector != nil { + l = m.ScopeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ResourceQuotaStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hard) > 0 { + for k, v := range m.Hard { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Used) > 0 { + for k, v := range m.Used { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ResourceRequirements) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Limits) > 0 { + for k, v := range m.Limits { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Requests) > 0 { + for k, v := range m.Requests { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SELinuxOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Role) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleIOPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Gateway) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.System) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + l = len(m.ProtectionDomain) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageMode) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ScaleIOVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Gateway) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.System) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + l = len(m.ProtectionDomain) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageMode) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ScopeSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ScopedResourceSelectorRequirement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ScopeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Secret) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Data) > 0 { + for k, v := range m.Data { + _ = k + _ = v + l = 0 + if v != nil { + l = 1 + len(v) + sovGenerated(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.StringData) > 0 { + for k, v := range m.StringData { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SecretEnvSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretKeySelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SecretProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SecretVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecurityContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Capabilities != nil { + l = m.Capabilities.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Privileged != nil { + n += 2 + } + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUser != nil { + n += 1 + sovGenerated(uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + n += 2 + } + if m.ReadOnlyRootFilesystem != nil { + n += 2 + } + if m.AllowPrivilegeEscalation != nil { + n += 2 + } + if m.RunAsGroup != nil { + n += 1 + sovGenerated(uint64(*m.RunAsGroup)) + } + if m.ProcMount != nil { + l = len(*m.ProcMount) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.WindowsOptions != nil { + l = m.WindowsOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SerializedReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Reference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Service) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceAccount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AutomountServiceAccountToken != nil { + n += 2 + } + return n +} + +func (m *ServiceAccountList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceAccountTokenProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Audience) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) + } + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServicePort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) + l = m.TargetPort.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.NodePort)) + return n +} + +func (m *ServiceProxyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.ClusterIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ExternalIPs) > 0 { + for _, s := range m.ExternalIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SessionAffinity) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.LoadBalancerIP) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.LoadBalancerSourceRanges) > 0 { + for _, s := range m.LoadBalancerSourceRanges { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ExternalName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ExternalTrafficPolicy) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HealthCheckNodePort)) + n += 2 + if m.SessionAffinityConfig != nil { + l = m.SessionAffinityConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IPFamily != nil { + l = len(*m.IPFamily) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ServiceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LoadBalancer.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SessionAffinityConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClientIP != nil { + l = m.ClientIP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *StorageOSPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeNamespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *StorageOSVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeNamespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Sysctl) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TCPSocketAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Taint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + if m.TimeAdded != nil { + l = m.TimeAdded.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Toleration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + if m.TolerationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TolerationSeconds)) + } + return n +} + +func (m *TopologySelectorLabelRequirement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TopologySelectorTerm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MatchLabelExpressions) > 0 { + for _, e := range m.MatchLabelExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TopologySpreadConstraint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.MaxSkew)) + l = len(m.TopologyKey) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.WhenUnsatisfiable) + n += 1 + l + sovGenerated(uint64(l)) + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TypedLocalObjectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.APIGroup != nil { + l = len(*m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Volume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.VolumeSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeDevice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DevicePath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeMount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.MountPath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SubPath) + n += 1 + l + sovGenerated(uint64(l)) + if m.MountPropagation != nil { + l = len(*m.MountPropagation) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.SubPathExpr) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeNodeAffinity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Required != nil { + l = m.Required.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DownwardAPI != nil { + l = m.DownwardAPI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ServiceAccountToken != nil { + l = m.ServiceAccountToken.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HostPath != nil { + l = m.HostPath.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EmptyDir != nil { + l = m.EmptyDir.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GCEPersistentDisk != nil { + l = m.GCEPersistentDisk.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AWSElasticBlockStore != nil { + l = m.AWSElasticBlockStore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitRepo != nil { + l = m.GitRepo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NFS != nil { + l = m.NFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ISCSI != nil { + l = m.ISCSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Glusterfs != nil { + l = m.Glusterfs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PersistentVolumeClaim != nil { + l = m.PersistentVolumeClaim.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RBD != nil { + l = m.RBD.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FlexVolume != nil { + l = m.FlexVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Cinder != nil { + l = m.Cinder.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CephFS != nil { + l = m.CephFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Flocker != nil { + l = m.Flocker.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DownwardAPI != nil { + l = m.DownwardAPI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.FC != nil { + l = m.FC.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.AzureFile != nil { + l = m.AzureFile.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.VsphereVolume != nil { + l = m.VsphereVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Quobyte != nil { + l = m.Quobyte.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.AzureDisk != nil { + l = m.AzureDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PhotonPersistentDisk != nil { + l = m.PhotonPersistentDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PortworxVolume != nil { + l = m.PortworxVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ScaleIO != nil { + l = m.ScaleIO.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Projected != nil { + l = m.Projected.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.StorageOS != nil { + l = m.StorageOS.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.CSI != nil { + l = m.CSI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VsphereVirtualDiskVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePolicyName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePolicyID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WeightedPodAffinityTerm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Weight)) + l = m.PodAffinityTerm.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WindowsSecurityContextOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GMSACredentialSpecName != nil { + l = len(*m.GMSACredentialSpecName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GMSACredentialSpec != nil { + l = len(*m.GMSACredentialSpec) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUserName != nil { + l = len(*m.RunAsUserName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AWSElasticBlockStoreVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AWSElasticBlockStoreVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Affinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Affinity{`, + `NodeAffinity:` + strings.Replace(this.NodeAffinity.String(), "NodeAffinity", "NodeAffinity", 1) + `,`, + `PodAffinity:` + strings.Replace(this.PodAffinity.String(), "PodAffinity", "PodAffinity", 1) + `,`, + `PodAntiAffinity:` + strings.Replace(this.PodAntiAffinity.String(), "PodAntiAffinity", "PodAntiAffinity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AttachedVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AttachedVolume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, + `}`, + }, "") + return s +} +func (this *AvoidPods) String() string { + if this == nil { + return "nil" + } + repeatedStringForPreferAvoidPods := "[]PreferAvoidPodsEntry{" + for _, f := range this.PreferAvoidPods { + repeatedStringForPreferAvoidPods += strings.Replace(strings.Replace(f.String(), "PreferAvoidPodsEntry", "PreferAvoidPodsEntry", 1), `&`, ``, 1) + "," + } + repeatedStringForPreferAvoidPods += "}" + s := strings.Join([]string{`&AvoidPods{`, + `PreferAvoidPods:` + repeatedStringForPreferAvoidPods + `,`, + `}`, + }, "") + return s +} +func (this *AzureDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureDiskVolumeSource{`, + `DiskName:` + fmt.Sprintf("%v", this.DiskName) + `,`, + `DataDiskURI:` + fmt.Sprintf("%v", this.DataDiskURI) + `,`, + `CachingMode:` + valueToStringGenerated(this.CachingMode) + `,`, + `FSType:` + valueToStringGenerated(this.FSType) + `,`, + `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, + `Kind:` + valueToStringGenerated(this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *AzureFilePersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureFilePersistentVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretNamespace:` + valueToStringGenerated(this.SecretNamespace) + `,`, + `}`, + }, "") + return s +} +func (this *AzureFileVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureFileVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Binding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Binding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSIPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) + for k := range this.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + mapStringForVolumeAttributes := "map[string]string{" + for _, k := range keysForVolumeAttributes { + mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) + } + mapStringForVolumeAttributes += "}" + s := strings.Join([]string{`&CSIPersistentVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `VolumeHandle:` + fmt.Sprintf("%v", this.VolumeHandle) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, + `ControllerPublishSecretRef:` + strings.Replace(this.ControllerPublishSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `NodeStageSecretRef:` + strings.Replace(this.NodeStageSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `NodePublishSecretRef:` + strings.Replace(this.NodePublishSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `ControllerExpandSecretRef:` + strings.Replace(this.ControllerExpandSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSIVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) + for k := range this.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + mapStringForVolumeAttributes := "map[string]string{" + for _, k := range keysForVolumeAttributes { + mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) + } + mapStringForVolumeAttributes += "}" + s := strings.Join([]string{`&CSIVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, + `FSType:` + valueToStringGenerated(this.FSType) + `,`, + `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, + `NodePublishSecretRef:` + strings.Replace(this.NodePublishSecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Capabilities) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Capabilities{`, + `Add:` + fmt.Sprintf("%v", this.Add) + `,`, + `Drop:` + fmt.Sprintf("%v", this.Drop) + `,`, + `}`, + }, "") + return s +} +func (this *CephFSPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CephFSPersistentVolumeSource{`, + `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *CephFSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CephFSVolumeSource{`, + `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *CinderPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CinderPersistentVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CinderVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CinderVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClientIPConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClientIPConfig{`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ComponentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ComponentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *ComponentStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]ComponentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ComponentCondition", "ComponentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ComponentStatus{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *ComponentStatusList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ComponentStatus{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ComponentStatus", "ComponentStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ComponentStatusList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMap) String() string { + if this == nil { + return "nil" + } + keysForData := make([]string, 0, len(this.Data)) + for k := range this.Data { + keysForData = append(keysForData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + mapStringForData := "map[string]string{" + for _, k := range keysForData { + mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + } + mapStringForData += "}" + keysForBinaryData := make([]string, 0, len(this.BinaryData)) + for k := range this.BinaryData { + keysForBinaryData = append(keysForBinaryData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) + mapStringForBinaryData := "map[string][]byte{" + for _, k := range keysForBinaryData { + mapStringForBinaryData += fmt.Sprintf("%v: %v,", k, this.BinaryData[k]) + } + mapStringForBinaryData += "}" + s := strings.Join([]string{`&ConfigMap{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + mapStringForData + `,`, + `BinaryData:` + mapStringForBinaryData + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapEnvSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapEnvSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapKeySelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapKeySelector{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ConfigMap{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ConfigMap", "ConfigMap", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ConfigMapList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapNodeConfigSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapNodeConfigSource{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `KubeletConfigKey:` + fmt.Sprintf("%v", this.KubeletConfigKey) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapProjection) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]KeyToPath{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ConfigMapProjection{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapVolumeSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]KeyToPath{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ConfigMapVolumeSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *Container) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]ContainerPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += strings.Replace(strings.Replace(f.String(), "EnvVar", "EnvVar", 1), `&`, ``, 1) + "," + } + repeatedStringForEnv += "}" + repeatedStringForVolumeMounts := "[]VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += strings.Replace(strings.Replace(f.String(), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeMounts += "}" + repeatedStringForEnvFrom := "[]EnvFromSource{" + for _, f := range this.EnvFrom { + repeatedStringForEnvFrom += strings.Replace(strings.Replace(f.String(), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + "," + } + repeatedStringForEnvFrom += "}" + repeatedStringForVolumeDevices := "[]VolumeDevice{" + for _, f := range this.VolumeDevices { + repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeDevices += "}" + s := strings.Join([]string{`&Container{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `Env:` + repeatedStringForEnv + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `LivenessProbe:` + strings.Replace(this.LivenessProbe.String(), "Probe", "Probe", 1) + `,`, + `ReadinessProbe:` + strings.Replace(this.ReadinessProbe.String(), "Probe", "Probe", 1) + `,`, + `Lifecycle:` + strings.Replace(this.Lifecycle.String(), "Lifecycle", "Lifecycle", 1) + `,`, + `TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`, + `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, + `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "SecurityContext", "SecurityContext", 1) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `EnvFrom:` + repeatedStringForEnvFrom + `,`, + `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, + `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, + `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerImage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerImage{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `SizeBytes:` + fmt.Sprintf("%v", this.SizeBytes) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerPort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`, + `ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerState) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerState{`, + `Waiting:` + strings.Replace(this.Waiting.String(), "ContainerStateWaiting", "ContainerStateWaiting", 1) + `,`, + `Running:` + strings.Replace(this.Running.String(), "ContainerStateRunning", "ContainerStateRunning", 1) + `,`, + `Terminated:` + strings.Replace(this.Terminated.String(), "ContainerStateTerminated", "ContainerStateTerminated", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateRunning) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateRunning{`, + `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateTerminated) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateTerminated{`, + `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, + `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `FinishedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FinishedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateWaiting) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateWaiting{`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, + `LastTerminationState:` + strings.Replace(strings.Replace(this.LastTerminationState.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, + `Ready:` + fmt.Sprintf("%v", this.Ready) + `,`, + `RestartCount:` + fmt.Sprintf("%v", this.RestartCount) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `Started:` + valueToStringGenerated(this.Started) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonEndpoint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonEndpoint{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIProjection) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DownwardAPIVolumeFile{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DownwardAPIProjection{`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIVolumeFile) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DownwardAPIVolumeFile{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `FieldRef:` + strings.Replace(this.FieldRef.String(), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, + `ResourceFieldRef:` + strings.Replace(this.ResourceFieldRef.String(), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIVolumeSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DownwardAPIVolumeFile{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DownwardAPIVolumeSource{`, + `Items:` + repeatedStringForItems + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `}`, + }, "") + return s +} +func (this *EmptyDirVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EmptyDirVolumeSource{`, + `Medium:` + fmt.Sprintf("%v", this.Medium) + `,`, + `SizeLimit:` + strings.Replace(fmt.Sprintf("%v", this.SizeLimit), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointAddress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `TargetRef:` + strings.Replace(this.TargetRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointPort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSubset) String() string { + if this == nil { + return "nil" + } + repeatedStringForAddresses := "[]EndpointAddress{" + for _, f := range this.Addresses { + repeatedStringForAddresses += strings.Replace(strings.Replace(f.String(), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForAddresses += "}" + repeatedStringForNotReadyAddresses := "[]EndpointAddress{" + for _, f := range this.NotReadyAddresses { + repeatedStringForNotReadyAddresses += strings.Replace(strings.Replace(f.String(), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForNotReadyAddresses += "}" + repeatedStringForPorts := "[]EndpointPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&EndpointSubset{`, + `Addresses:` + repeatedStringForAddresses + `,`, + `NotReadyAddresses:` + repeatedStringForNotReadyAddresses + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `}`, + }, "") + return s +} +func (this *Endpoints) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubsets := "[]EndpointSubset{" + for _, f := range this.Subsets { + repeatedStringForSubsets += strings.Replace(strings.Replace(f.String(), "EndpointSubset", "EndpointSubset", 1), `&`, ``, 1) + "," + } + repeatedStringForSubsets += "}" + s := strings.Join([]string{`&Endpoints{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subsets:` + repeatedStringForSubsets + `,`, + `}`, + }, "") + return s +} +func (this *EndpointsList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Endpoints{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Endpoints", "Endpoints", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EndpointsList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *EnvFromSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvFromSource{`, + `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, + `ConfigMapRef:` + strings.Replace(this.ConfigMapRef.String(), "ConfigMapEnvSource", "ConfigMapEnvSource", 1) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretEnvSource", "SecretEnvSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EnvVar) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvVar{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `ValueFrom:` + strings.Replace(this.ValueFrom.String(), "EnvVarSource", "EnvVarSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EnvVarSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvVarSource{`, + `FieldRef:` + strings.Replace(this.FieldRef.String(), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, + `ResourceFieldRef:` + strings.Replace(this.ResourceFieldRef.String(), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, + `ConfigMapKeyRef:` + strings.Replace(this.ConfigMapKeyRef.String(), "ConfigMapKeySelector", "ConfigMapKeySelector", 1) + `,`, + `SecretKeyRef:` + strings.Replace(this.SecretKeyRef.String(), "SecretKeySelector", "SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EphemeralContainer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EphemeralContainer{`, + `EphemeralContainerCommon:` + strings.Replace(strings.Replace(this.EphemeralContainerCommon.String(), "EphemeralContainerCommon", "EphemeralContainerCommon", 1), `&`, ``, 1) + `,`, + `TargetContainerName:` + fmt.Sprintf("%v", this.TargetContainerName) + `,`, + `}`, + }, "") + return s +} +func (this *EphemeralContainerCommon) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]ContainerPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += strings.Replace(strings.Replace(f.String(), "EnvVar", "EnvVar", 1), `&`, ``, 1) + "," + } + repeatedStringForEnv += "}" + repeatedStringForVolumeMounts := "[]VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += strings.Replace(strings.Replace(f.String(), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeMounts += "}" + repeatedStringForEnvFrom := "[]EnvFromSource{" + for _, f := range this.EnvFrom { + repeatedStringForEnvFrom += strings.Replace(strings.Replace(f.String(), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + "," + } + repeatedStringForEnvFrom += "}" + repeatedStringForVolumeDevices := "[]VolumeDevice{" + for _, f := range this.VolumeDevices { + repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeDevices += "}" + s := strings.Join([]string{`&EphemeralContainerCommon{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `Env:` + repeatedStringForEnv + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `LivenessProbe:` + strings.Replace(this.LivenessProbe.String(), "Probe", "Probe", 1) + `,`, + `ReadinessProbe:` + strings.Replace(this.ReadinessProbe.String(), "Probe", "Probe", 1) + `,`, + `Lifecycle:` + strings.Replace(this.Lifecycle.String(), "Lifecycle", "Lifecycle", 1) + `,`, + `TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`, + `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, + `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "SecurityContext", "SecurityContext", 1) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `EnvFrom:` + repeatedStringForEnvFrom + `,`, + `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, + `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, + `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EphemeralContainers) String() string { + if this == nil { + return "nil" + } + repeatedStringForEphemeralContainers := "[]EphemeralContainer{" + for _, f := range this.EphemeralContainers { + repeatedStringForEphemeralContainers += strings.Replace(strings.Replace(f.String(), "EphemeralContainer", "EphemeralContainer", 1), `&`, ``, 1) + "," + } + repeatedStringForEphemeralContainers += "}" + s := strings.Join([]string{`&EphemeralContainers{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `EphemeralContainers:` + repeatedStringForEphemeralContainers + `,`, + `}`, + }, "") + return s +} +func (this *Event) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Event{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `InvolvedObject:` + strings.Replace(strings.Replace(this.InvolvedObject.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "EventSource", "EventSource", 1), `&`, ``, 1) + `,`, + `FirstTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FirstTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `EventTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EventTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Series:` + strings.Replace(this.Series.String(), "EventSeries", "EventSeries", 1) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Related:` + strings.Replace(this.Related.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, + `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, + `}`, + }, "") + return s +} +func (this *EventList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Event{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Event", "Event", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EventList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *EventSeries) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSeries{`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `LastObservedTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastObservedTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `}`, + }, "") + return s +} +func (this *EventSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSource{`, + `Component:` + fmt.Sprintf("%v", this.Component) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `}`, + }, "") + return s +} +func (this *ExecAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecAction{`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s +} +func (this *FCVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FCVolumeSource{`, + `TargetWWNs:` + fmt.Sprintf("%v", this.TargetWWNs) + `,`, + `Lun:` + valueToStringGenerated(this.Lun) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `WWIDs:` + fmt.Sprintf("%v", this.WWIDs) + `,`, + `}`, + }, "") + return s +} +func (this *FlexPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k := range this.Options { + keysForOptions = append(keysForOptions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&FlexPersistentVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Options:` + mapStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *FlexVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k := range this.Options { + keysForOptions = append(keysForOptions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&FlexVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Options:` + mapStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *FlockerVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlockerVolumeSource{`, + `DatasetName:` + fmt.Sprintf("%v", this.DatasetName) + `,`, + `DatasetUUID:` + fmt.Sprintf("%v", this.DatasetUUID) + `,`, + `}`, + }, "") + return s +} +func (this *GCEPersistentDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GCEPersistentDiskVolumeSource{`, + `PDName:` + fmt.Sprintf("%v", this.PDName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *GitRepoVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitRepoVolumeSource{`, + `Repository:` + fmt.Sprintf("%v", this.Repository) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `Directory:` + fmt.Sprintf("%v", this.Directory) + `,`, + `}`, + }, "") + return s +} +func (this *GlusterfsPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GlusterfsPersistentVolumeSource{`, + `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `EndpointsNamespace:` + valueToStringGenerated(this.EndpointsNamespace) + `,`, + `}`, + }, "") + return s +} +func (this *GlusterfsVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GlusterfsVolumeSource{`, + `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPGetAction) String() string { + if this == nil { + return "nil" + } + repeatedStringForHTTPHeaders := "[]HTTPHeader{" + for _, f := range this.HTTPHeaders { + repeatedStringForHTTPHeaders += strings.Replace(strings.Replace(f.String(), "HTTPHeader", "HTTPHeader", 1), `&`, ``, 1) + "," + } + repeatedStringForHTTPHeaders += "}" + s := strings.Join([]string{`&HTTPGetAction{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Port:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Scheme:` + fmt.Sprintf("%v", this.Scheme) + `,`, + `HTTPHeaders:` + repeatedStringForHTTPHeaders + `,`, + `}`, + }, "") + return s +} +func (this *HTTPHeader) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPHeader{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Handler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Handler{`, + `Exec:` + strings.Replace(this.Exec.String(), "ExecAction", "ExecAction", 1) + `,`, + `HTTPGet:` + strings.Replace(this.HTTPGet.String(), "HTTPGetAction", "HTTPGetAction", 1) + `,`, + `TCPSocket:` + strings.Replace(this.TCPSocket.String(), "TCPSocketAction", "TCPSocketAction", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HostAlias) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostAlias{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostnames:` + fmt.Sprintf("%v", this.Hostnames) + `,`, + `}`, + }, "") + return s +} +func (this *HostPathVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostPathVolumeSource{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Type:` + valueToStringGenerated(this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *ISCSIPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ISCSIPersistentVolumeSource{`, + `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, + `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, + `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, + `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, + `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, + `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, + `}`, + }, "") + return s +} +func (this *ISCSIVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ISCSIVolumeSource{`, + `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, + `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, + `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, + `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, + `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, + `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, + `}`, + }, "") + return s +} +func (this *KeyToPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KeyToPath{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *Lifecycle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Lifecycle{`, + `PostStart:` + strings.Replace(this.PostStart.String(), "Handler", "Handler", 1) + `,`, + `PreStop:` + strings.Replace(this.PreStop.String(), "Handler", "Handler", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitRange{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LimitRangeSpec", "LimitRangeSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeItem) String() string { + if this == nil { + return "nil" + } + keysForMax := make([]string, 0, len(this.Max)) + for k := range this.Max { + keysForMax = append(keysForMax, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMax) + mapStringForMax := "ResourceList{" + for _, k := range keysForMax { + mapStringForMax += fmt.Sprintf("%v: %v,", k, this.Max[ResourceName(k)]) + } + mapStringForMax += "}" + keysForMin := make([]string, 0, len(this.Min)) + for k := range this.Min { + keysForMin = append(keysForMin, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMin) + mapStringForMin := "ResourceList{" + for _, k := range keysForMin { + mapStringForMin += fmt.Sprintf("%v: %v,", k, this.Min[ResourceName(k)]) + } + mapStringForMin += "}" + keysForDefault := make([]string, 0, len(this.Default)) + for k := range this.Default { + keysForDefault = append(keysForDefault, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) + mapStringForDefault := "ResourceList{" + for _, k := range keysForDefault { + mapStringForDefault += fmt.Sprintf("%v: %v,", k, this.Default[ResourceName(k)]) + } + mapStringForDefault += "}" + keysForDefaultRequest := make([]string, 0, len(this.DefaultRequest)) + for k := range this.DefaultRequest { + keysForDefaultRequest = append(keysForDefaultRequest, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) + mapStringForDefaultRequest := "ResourceList{" + for _, k := range keysForDefaultRequest { + mapStringForDefaultRequest += fmt.Sprintf("%v: %v,", k, this.DefaultRequest[ResourceName(k)]) + } + mapStringForDefaultRequest += "}" + keysForMaxLimitRequestRatio := make([]string, 0, len(this.MaxLimitRequestRatio)) + for k := range this.MaxLimitRequestRatio { + keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) + mapStringForMaxLimitRequestRatio := "ResourceList{" + for _, k := range keysForMaxLimitRequestRatio { + mapStringForMaxLimitRequestRatio += fmt.Sprintf("%v: %v,", k, this.MaxLimitRequestRatio[ResourceName(k)]) + } + mapStringForMaxLimitRequestRatio += "}" + s := strings.Join([]string{`&LimitRangeItem{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Max:` + mapStringForMax + `,`, + `Min:` + mapStringForMin + `,`, + `Default:` + mapStringForDefault + `,`, + `DefaultRequest:` + mapStringForDefaultRequest + `,`, + `MaxLimitRequestRatio:` + mapStringForMaxLimitRequestRatio + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]LimitRange{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "LimitRange", "LimitRange", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&LimitRangeList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForLimits := "[]LimitRangeItem{" + for _, f := range this.Limits { + repeatedStringForLimits += strings.Replace(strings.Replace(f.String(), "LimitRangeItem", "LimitRangeItem", 1), `&`, ``, 1) + "," + } + repeatedStringForLimits += "}" + s := strings.Join([]string{`&LimitRangeSpec{`, + `Limits:` + repeatedStringForLimits + `,`, + `}`, + }, "") + return s +} +func (this *List) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RawExtension{" + for _, f := range this.Items { + repeatedStringForItems += fmt.Sprintf("%v", f) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&List{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *LoadBalancerIngress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LoadBalancerIngress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `}`, + }, "") + return s +} +func (this *LoadBalancerStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForIngress := "[]LoadBalancerIngress{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "LoadBalancerIngress", "LoadBalancerIngress", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + s := strings.Join([]string{`&LoadBalancerStatus{`, + `Ingress:` + repeatedStringForIngress + `,`, + `}`, + }, "") + return s +} +func (this *LocalObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalObjectReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *LocalVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalVolumeSource{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `FSType:` + valueToStringGenerated(this.FSType) + `,`, + `}`, + }, "") + return s +} +func (this *NFSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NFSVolumeSource{`, + `Server:` + fmt.Sprintf("%v", this.Server) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Namespace) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Namespace{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NamespaceSpec", "NamespaceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NamespaceStatus", "NamespaceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamespaceCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Namespace{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&NamespaceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamespaceSpec{`, + `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]NamespaceCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "NamespaceCondition", "NamespaceCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&NamespaceStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *Node) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Node{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeAddress{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Address:` + fmt.Sprintf("%v", this.Address) + `,`, + `}`, + }, "") + return s +} +func (this *NodeAffinity) String() string { + if this == nil { + return "nil" + } + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution := "[]PreferredSchedulingTerm{" + for _, f := range this.PreferredDuringSchedulingIgnoredDuringExecution { + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "PreferredSchedulingTerm", "PreferredSchedulingTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += "}" + s := strings.Join([]string{`&NodeAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(this.RequiredDuringSchedulingIgnoredDuringExecution.String(), "NodeSelector", "NodeSelector", 1) + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution + `,`, + `}`, + }, "") + return s +} +func (this *NodeCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastHeartbeatTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastHeartbeatTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *NodeConfigSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeConfigSource{`, + `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapNodeConfigSource", "ConfigMapNodeConfigSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeConfigStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeConfigStatus{`, + `Assigned:` + strings.Replace(this.Assigned.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `Active:` + strings.Replace(this.Active.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `LastKnownGood:` + strings.Replace(this.LastKnownGood.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *NodeDaemonEndpoints) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeDaemonEndpoints{`, + `KubeletEndpoint:` + strings.Replace(strings.Replace(this.KubeletEndpoint.String(), "DaemonEndpoint", "DaemonEndpoint", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Node{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Node", "Node", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&NodeList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NodeProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *NodeResources) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&NodeResources{`, + `Capacity:` + mapStringForCapacity + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelector) String() string { + if this == nil { + return "nil" + } + repeatedStringForNodeSelectorTerms := "[]NodeSelectorTerm{" + for _, f := range this.NodeSelectorTerms { + repeatedStringForNodeSelectorTerms += strings.Replace(strings.Replace(f.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForNodeSelectorTerms += "}" + s := strings.Join([]string{`&NodeSelector{`, + `NodeSelectorTerms:` + repeatedStringForNodeSelectorTerms + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSelectorRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelectorTerm) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchExpressions := "[]NodeSelectorRequirement{" + for _, f := range this.MatchExpressions { + repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchExpressions += "}" + repeatedStringForMatchFields := "[]NodeSelectorRequirement{" + for _, f := range this.MatchFields { + repeatedStringForMatchFields += strings.Replace(strings.Replace(f.String(), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchFields += "}" + s := strings.Join([]string{`&NodeSelectorTerm{`, + `MatchExpressions:` + repeatedStringForMatchExpressions + `,`, + `MatchFields:` + repeatedStringForMatchFields + `,`, + `}`, + }, "") + return s +} +func (this *NodeSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTaints := "[]Taint{" + for _, f := range this.Taints { + repeatedStringForTaints += strings.Replace(strings.Replace(f.String(), "Taint", "Taint", 1), `&`, ``, 1) + "," + } + repeatedStringForTaints += "}" + s := strings.Join([]string{`&NodeSpec{`, + `PodCIDR:` + fmt.Sprintf("%v", this.PodCIDR) + `,`, + `DoNotUse_ExternalID:` + fmt.Sprintf("%v", this.DoNotUse_ExternalID) + `,`, + `ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`, + `Unschedulable:` + fmt.Sprintf("%v", this.Unschedulable) + `,`, + `Taints:` + repeatedStringForTaints + `,`, + `ConfigSource:` + strings.Replace(this.ConfigSource.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `PodCIDRs:` + fmt.Sprintf("%v", this.PodCIDRs) + `,`, + `}`, + }, "") + return s +} +func (this *NodeStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]NodeCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "NodeCondition", "NodeCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + repeatedStringForAddresses := "[]NodeAddress{" + for _, f := range this.Addresses { + repeatedStringForAddresses += strings.Replace(strings.Replace(f.String(), "NodeAddress", "NodeAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForAddresses += "}" + repeatedStringForImages := "[]ContainerImage{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ContainerImage", "ContainerImage", 1), `&`, ``, 1) + "," + } + repeatedStringForImages += "}" + repeatedStringForVolumesAttached := "[]AttachedVolume{" + for _, f := range this.VolumesAttached { + repeatedStringForVolumesAttached += strings.Replace(strings.Replace(f.String(), "AttachedVolume", "AttachedVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumesAttached += "}" + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + keysForAllocatable := make([]string, 0, len(this.Allocatable)) + for k := range this.Allocatable { + keysForAllocatable = append(keysForAllocatable, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) + mapStringForAllocatable := "ResourceList{" + for _, k := range keysForAllocatable { + mapStringForAllocatable += fmt.Sprintf("%v: %v,", k, this.Allocatable[ResourceName(k)]) + } + mapStringForAllocatable += "}" + s := strings.Join([]string{`&NodeStatus{`, + `Capacity:` + mapStringForCapacity + `,`, + `Allocatable:` + mapStringForAllocatable + `,`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `Addresses:` + repeatedStringForAddresses + `,`, + `DaemonEndpoints:` + strings.Replace(strings.Replace(this.DaemonEndpoints.String(), "NodeDaemonEndpoints", "NodeDaemonEndpoints", 1), `&`, ``, 1) + `,`, + `NodeInfo:` + strings.Replace(strings.Replace(this.NodeInfo.String(), "NodeSystemInfo", "NodeSystemInfo", 1), `&`, ``, 1) + `,`, + `Images:` + repeatedStringForImages + `,`, + `VolumesInUse:` + fmt.Sprintf("%v", this.VolumesInUse) + `,`, + `VolumesAttached:` + repeatedStringForVolumesAttached + `,`, + `Config:` + strings.Replace(this.Config.String(), "NodeConfigStatus", "NodeConfigStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSystemInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSystemInfo{`, + `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, + `SystemUUID:` + fmt.Sprintf("%v", this.SystemUUID) + `,`, + `BootID:` + fmt.Sprintf("%v", this.BootID) + `,`, + `KernelVersion:` + fmt.Sprintf("%v", this.KernelVersion) + `,`, + `OSImage:` + fmt.Sprintf("%v", this.OSImage) + `,`, + `ContainerRuntimeVersion:` + fmt.Sprintf("%v", this.ContainerRuntimeVersion) + `,`, + `KubeletVersion:` + fmt.Sprintf("%v", this.KubeletVersion) + `,`, + `KubeProxyVersion:` + fmt.Sprintf("%v", this.KubeProxyVersion) + `,`, + `OperatingSystem:` + fmt.Sprintf("%v", this.OperatingSystem) + `,`, + `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectFieldSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectFieldSelector{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolume{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeSpec", "PersistentVolumeSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeStatus", "PersistentVolumeStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaim{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeClaimSpec", "PersistentVolumeClaimSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeClaimStatus", "PersistentVolumeClaimStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PersistentVolumeClaim{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PersistentVolumeClaim", "PersistentVolumeClaim", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PersistentVolumeClaimList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimSpec{`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, + `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, + `DataSource:` + strings.Replace(this.DataSource.String(), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]PersistentVolumeClaimCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PersistentVolumeClaimCondition", "PersistentVolumeClaimCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&PersistentVolumeClaimStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `Capacity:` + mapStringForCapacity + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimVolumeSource{`, + `ClaimName:` + fmt.Sprintf("%v", this.ClaimName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PersistentVolume{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PersistentVolume", "PersistentVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PersistentVolumeList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeSource{`, + `GCEPersistentDisk:` + strings.Replace(this.GCEPersistentDisk.String(), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, + `AWSElasticBlockStore:` + strings.Replace(this.AWSElasticBlockStore.String(), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, + `HostPath:` + strings.Replace(this.HostPath.String(), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(this.Glusterfs.String(), "GlusterfsPersistentVolumeSource", "GlusterfsPersistentVolumeSource", 1) + `,`, + `NFS:` + strings.Replace(this.NFS.String(), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, + `RBD:` + strings.Replace(this.RBD.String(), "RBDPersistentVolumeSource", "RBDPersistentVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(this.ISCSI.String(), "ISCSIPersistentVolumeSource", "ISCSIPersistentVolumeSource", 1) + `,`, + `Cinder:` + strings.Replace(this.Cinder.String(), "CinderPersistentVolumeSource", "CinderPersistentVolumeSource", 1) + `,`, + `CephFS:` + strings.Replace(this.CephFS.String(), "CephFSPersistentVolumeSource", "CephFSPersistentVolumeSource", 1) + `,`, + `FC:` + strings.Replace(this.FC.String(), "FCVolumeSource", "FCVolumeSource", 1) + `,`, + `Flocker:` + strings.Replace(this.Flocker.String(), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, + `FlexVolume:` + strings.Replace(this.FlexVolume.String(), "FlexPersistentVolumeSource", "FlexPersistentVolumeSource", 1) + `,`, + `AzureFile:` + strings.Replace(this.AzureFile.String(), "AzureFilePersistentVolumeSource", "AzureFilePersistentVolumeSource", 1) + `,`, + `VsphereVolume:` + strings.Replace(this.VsphereVolume.String(), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, + `Quobyte:` + strings.Replace(this.Quobyte.String(), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, + `AzureDisk:` + strings.Replace(this.AzureDisk.String(), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, + `PhotonPersistentDisk:` + strings.Replace(this.PhotonPersistentDisk.String(), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, + `PortworxVolume:` + strings.Replace(this.PortworxVolume.String(), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(this.ScaleIO.String(), "ScaleIOPersistentVolumeSource", "ScaleIOPersistentVolumeSource", 1) + `,`, + `Local:` + strings.Replace(this.Local.String(), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`, + `StorageOS:` + strings.Replace(this.StorageOS.String(), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`, + `CSI:` + strings.Replace(this.CSI.String(), "CSIPersistentVolumeSource", "CSIPersistentVolumeSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeSpec) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&PersistentVolumeSpec{`, + `Capacity:` + mapStringForCapacity + `,`, + `PersistentVolumeSource:` + strings.Replace(strings.Replace(this.PersistentVolumeSource.String(), "PersistentVolumeSource", "PersistentVolumeSource", 1), `&`, ``, 1) + `,`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `ClaimRef:` + strings.Replace(this.ClaimRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `PersistentVolumeReclaimPolicy:` + fmt.Sprintf("%v", this.PersistentVolumeReclaimPolicy) + `,`, + `StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`, + `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, + `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, + `NodeAffinity:` + strings.Replace(this.NodeAffinity.String(), "VolumeNodeAffinity", "VolumeNodeAffinity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `}`, + }, "") + return s +} +func (this *PhotonPersistentDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PhotonPersistentDiskVolumeSource{`, + `PdID:` + fmt.Sprintf("%v", this.PdID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `}`, + }, "") + return s +} +func (this *Pod) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Pod{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodAffinity) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution := "[]PodAffinityTerm{" + for _, f := range this.RequiredDuringSchedulingIgnoredDuringExecution { + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += "}" + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution := "[]WeightedPodAffinityTerm{" + for _, f := range this.PreferredDuringSchedulingIgnoredDuringExecution { + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += "}" + s := strings.Join([]string{`&PodAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution + `,`, + `}`, + }, "") + return s +} +func (this *PodAffinityTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAffinityTerm{`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, + `}`, + }, "") + return s +} +func (this *PodAntiAffinity) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution := "[]PodAffinityTerm{" + for _, f := range this.RequiredDuringSchedulingIgnoredDuringExecution { + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += "}" + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution := "[]WeightedPodAffinityTerm{" + for _, f := range this.PreferredDuringSchedulingIgnoredDuringExecution { + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += "}" + s := strings.Join([]string{`&PodAntiAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution + `,`, + `}`, + }, "") + return s +} +func (this *PodAttachOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAttachOptions{`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} +func (this *PodCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PodDNSConfig) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]PodDNSConfigOption{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(strings.Replace(f.String(), "PodDNSConfigOption", "PodDNSConfigOption", 1), `&`, ``, 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&PodDNSConfig{`, + `Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`, + `Searches:` + fmt.Sprintf("%v", this.Searches) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *PodDNSConfigOption) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDNSConfigOption{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + valueToStringGenerated(this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *PodExecOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodExecOptions{`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s +} +func (this *PodIP) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodIP{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `}`, + }, "") + return s +} +func (this *PodList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Pod{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Pod", "Pod", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodLogOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodLogOptions{`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, + `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, + `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "v1.Time", 1) + `,`, + `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, + `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, + `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, + `}`, + }, "") + return s +} +func (this *PodPortForwardOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodPortForwardOptions{`, + `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, + `}`, + }, "") + return s +} +func (this *PodProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *PodReadinessGate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodReadinessGate{`, + `ConditionType:` + fmt.Sprintf("%v", this.ConditionType) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityContext) String() string { + if this == nil { + return "nil" + } + repeatedStringForSysctls := "[]Sysctl{" + for _, f := range this.Sysctls { + repeatedStringForSysctls += strings.Replace(strings.Replace(f.String(), "Sysctl", "Sysctl", 1), `&`, ``, 1) + "," + } + repeatedStringForSysctls += "}" + s := strings.Join([]string{`&PodSecurityContext{`, + `SELinuxOptions:` + strings.Replace(this.SELinuxOptions.String(), "SELinuxOptions", "SELinuxOptions", 1) + `,`, + `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, + `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, + `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, + `FSGroup:` + valueToStringGenerated(this.FSGroup) + `,`, + `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, + `Sysctls:` + repeatedStringForSysctls + `,`, + `WindowsOptions:` + strings.Replace(this.WindowsOptions.String(), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSignature) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSignature{`, + `PodController:` + strings.Replace(fmt.Sprintf("%v", this.PodController), "OwnerReference", "v1.OwnerReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForVolumes := "[]Volume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += strings.Replace(strings.Replace(f.String(), "Volume", "Volume", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumes += "}" + repeatedStringForContainers := "[]Container{" + for _, f := range this.Containers { + repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "Container", "Container", 1), `&`, ``, 1) + "," + } + repeatedStringForContainers += "}" + repeatedStringForImagePullSecrets := "[]LocalObjectReference{" + for _, f := range this.ImagePullSecrets { + repeatedStringForImagePullSecrets += strings.Replace(strings.Replace(f.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + "," + } + repeatedStringForImagePullSecrets += "}" + repeatedStringForInitContainers := "[]Container{" + for _, f := range this.InitContainers { + repeatedStringForInitContainers += strings.Replace(strings.Replace(f.String(), "Container", "Container", 1), `&`, ``, 1) + "," + } + repeatedStringForInitContainers += "}" + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "Toleration", "Toleration", 1), `&`, ``, 1) + "," + } + repeatedStringForTolerations += "}" + repeatedStringForHostAliases := "[]HostAlias{" + for _, f := range this.HostAliases { + repeatedStringForHostAliases += strings.Replace(strings.Replace(f.String(), "HostAlias", "HostAlias", 1), `&`, ``, 1) + "," + } + repeatedStringForHostAliases += "}" + repeatedStringForReadinessGates := "[]PodReadinessGate{" + for _, f := range this.ReadinessGates { + repeatedStringForReadinessGates += strings.Replace(strings.Replace(f.String(), "PodReadinessGate", "PodReadinessGate", 1), `&`, ``, 1) + "," + } + repeatedStringForReadinessGates += "}" + repeatedStringForTopologySpreadConstraints := "[]TopologySpreadConstraint{" + for _, f := range this.TopologySpreadConstraints { + repeatedStringForTopologySpreadConstraints += strings.Replace(strings.Replace(f.String(), "TopologySpreadConstraint", "TopologySpreadConstraint", 1), `&`, ``, 1) + "," + } + repeatedStringForTopologySpreadConstraints += "}" + repeatedStringForEphemeralContainers := "[]EphemeralContainer{" + for _, f := range this.EphemeralContainers { + repeatedStringForEphemeralContainers += strings.Replace(strings.Replace(f.String(), "EphemeralContainer", "EphemeralContainer", 1), `&`, ``, 1) + "," + } + repeatedStringForEphemeralContainers += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + keysForOverhead := make([]string, 0, len(this.Overhead)) + for k := range this.Overhead { + keysForOverhead = append(keysForOverhead, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOverhead) + mapStringForOverhead := "ResourceList{" + for _, k := range keysForOverhead { + mapStringForOverhead += fmt.Sprintf("%v: %v,", k, this.Overhead[ResourceName(k)]) + } + mapStringForOverhead += "}" + s := strings.Join([]string{`&PodSpec{`, + `Volumes:` + repeatedStringForVolumes + `,`, + `Containers:` + repeatedStringForContainers + `,`, + `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, + `TerminationGracePeriodSeconds:` + valueToStringGenerated(this.TerminationGracePeriodSeconds) + `,`, + `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, + `DNSPolicy:` + fmt.Sprintf("%v", this.DNSPolicy) + `,`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `DeprecatedServiceAccount:` + fmt.Sprintf("%v", this.DeprecatedServiceAccount) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, + `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, + `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, + `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "PodSecurityContext", "PodSecurityContext", 1) + `,`, + `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, + `Affinity:` + strings.Replace(this.Affinity.String(), "Affinity", "Affinity", 1) + `,`, + `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, + `InitContainers:` + repeatedStringForInitContainers + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `HostAliases:` + repeatedStringForHostAliases + `,`, + `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, + `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `DNSConfig:` + strings.Replace(this.DNSConfig.String(), "PodDNSConfig", "PodDNSConfig", 1) + `,`, + `ShareProcessNamespace:` + valueToStringGenerated(this.ShareProcessNamespace) + `,`, + `ReadinessGates:` + repeatedStringForReadinessGates + `,`, + `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`, + `EnableServiceLinks:` + valueToStringGenerated(this.EnableServiceLinks) + `,`, + `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, + `Overhead:` + mapStringForOverhead + `,`, + `TopologySpreadConstraints:` + repeatedStringForTopologySpreadConstraints + `,`, + `EphemeralContainers:` + repeatedStringForEphemeralContainers + `,`, + `}`, + }, "") + return s +} +func (this *PodStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]PodCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PodCondition", "PodCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + repeatedStringForContainerStatuses := "[]ContainerStatus{" + for _, f := range this.ContainerStatuses { + repeatedStringForContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForContainerStatuses += "}" + repeatedStringForInitContainerStatuses := "[]ContainerStatus{" + for _, f := range this.InitContainerStatuses { + repeatedStringForInitContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForInitContainerStatuses += "}" + repeatedStringForPodIPs := "[]PodIP{" + for _, f := range this.PodIPs { + repeatedStringForPodIPs += strings.Replace(strings.Replace(f.String(), "PodIP", "PodIP", 1), `&`, ``, 1) + "," + } + repeatedStringForPodIPs += "}" + repeatedStringForEphemeralContainerStatuses := "[]ContainerStatus{" + for _, f := range this.EphemeralContainerStatuses { + repeatedStringForEphemeralContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForEphemeralContainerStatuses += "}" + s := strings.Join([]string{`&PodStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`, + `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1) + `,`, + `ContainerStatuses:` + repeatedStringForContainerStatuses + `,`, + `QOSClass:` + fmt.Sprintf("%v", this.QOSClass) + `,`, + `InitContainerStatuses:` + repeatedStringForInitContainerStatuses + `,`, + `NominatedNodeName:` + fmt.Sprintf("%v", this.NominatedNodeName) + `,`, + `PodIPs:` + repeatedStringForPodIPs + `,`, + `EphemeralContainerStatuses:` + repeatedStringForEphemeralContainerStatuses + `,`, + `}`, + }, "") + return s +} +func (this *PodStatusResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodStatusResult{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PodTemplate{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodTemplate", "PodTemplate", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodTemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PortworxVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortworxVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Preconditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Preconditions{`, + `UID:` + valueToStringGenerated(this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *PreferAvoidPodsEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PreferAvoidPodsEntry{`, + `PodSignature:` + strings.Replace(strings.Replace(this.PodSignature.String(), "PodSignature", "PodSignature", 1), `&`, ``, 1) + `,`, + `EvictionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvictionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PreferredSchedulingTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PreferredSchedulingTerm{`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `Preference:` + strings.Replace(strings.Replace(this.Preference.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Probe) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Probe{`, + `Handler:` + strings.Replace(strings.Replace(this.Handler.String(), "Handler", "Handler", 1), `&`, ``, 1) + `,`, + `InitialDelaySeconds:` + fmt.Sprintf("%v", this.InitialDelaySeconds) + `,`, + `TimeoutSeconds:` + fmt.Sprintf("%v", this.TimeoutSeconds) + `,`, + `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, + `SuccessThreshold:` + fmt.Sprintf("%v", this.SuccessThreshold) + `,`, + `FailureThreshold:` + fmt.Sprintf("%v", this.FailureThreshold) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectedVolumeSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForSources := "[]VolumeProjection{" + for _, f := range this.Sources { + repeatedStringForSources += strings.Replace(strings.Replace(f.String(), "VolumeProjection", "VolumeProjection", 1), `&`, ``, 1) + "," + } + repeatedStringForSources += "}" + s := strings.Join([]string{`&ProjectedVolumeSource{`, + `Sources:` + repeatedStringForSources + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `}`, + }, "") + return s +} +func (this *QuobyteVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QuobyteVolumeSource{`, + `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, + `Volume:` + fmt.Sprintf("%v", this.Volume) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Tenant:` + fmt.Sprintf("%v", this.Tenant) + `,`, + `}`, + }, "") + return s +} +func (this *RBDPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RBDPersistentVolumeSource{`, + `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, + `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, + `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, + `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *RBDVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RBDVolumeSource{`, + `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, + `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, + `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, + `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *RangeAllocation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RangeAllocation{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Range:` + fmt.Sprintf("%v", this.Range) + `,`, + `Data:` + valueToStringGenerated(this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationController) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationController{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicationControllerSpec", "ReplicationControllerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicationControllerStatus", "ReplicationControllerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationControllerCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ReplicationController{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicationController", "ReplicationController", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ReplicationControllerList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerSpec) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ReplicationControllerSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `Template:` + strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]ReplicationControllerCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicationControllerCondition", "ReplicationControllerCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ReplicationControllerStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *ResourceFieldSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceFieldSelector{`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Divisor:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Divisor), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuota) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceQuota{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceQuotaSpec", "ResourceQuotaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceQuotaStatus", "ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceQuota{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceQuota", "ResourceQuota", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceQuotaList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaSpec) String() string { + if this == nil { + return "nil" + } + keysForHard := make([]string, 0, len(this.Hard)) + for k := range this.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + mapStringForHard := "ResourceList{" + for _, k := range keysForHard { + mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + } + mapStringForHard += "}" + s := strings.Join([]string{`&ResourceQuotaSpec{`, + `Hard:` + mapStringForHard + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `ScopeSelector:` + strings.Replace(this.ScopeSelector.String(), "ScopeSelector", "ScopeSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaStatus) String() string { + if this == nil { + return "nil" + } + keysForHard := make([]string, 0, len(this.Hard)) + for k := range this.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + mapStringForHard := "ResourceList{" + for _, k := range keysForHard { + mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + } + mapStringForHard += "}" + keysForUsed := make([]string, 0, len(this.Used)) + for k := range this.Used { + keysForUsed = append(keysForUsed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) + mapStringForUsed := "ResourceList{" + for _, k := range keysForUsed { + mapStringForUsed += fmt.Sprintf("%v: %v,", k, this.Used[ResourceName(k)]) + } + mapStringForUsed += "}" + s := strings.Join([]string{`&ResourceQuotaStatus{`, + `Hard:` + mapStringForHard + `,`, + `Used:` + mapStringForUsed + `,`, + `}`, + }, "") + return s +} +func (this *ResourceRequirements) String() string { + if this == nil { + return "nil" + } + keysForLimits := make([]string, 0, len(this.Limits)) + for k := range this.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + mapStringForLimits := "ResourceList{" + for _, k := range keysForLimits { + mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)]) + } + mapStringForLimits += "}" + keysForRequests := make([]string, 0, len(this.Requests)) + for k := range this.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + mapStringForRequests := "ResourceList{" + for _, k := range keysForRequests { + mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)]) + } + mapStringForRequests += "}" + s := strings.Join([]string{`&ResourceRequirements{`, + `Limits:` + mapStringForLimits + `,`, + `Requests:` + mapStringForRequests + `,`, + `}`, + }, "") + return s +} +func (this *SELinuxOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SELinuxOptions{`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleIOPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleIOPersistentVolumeSource{`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `System:` + fmt.Sprintf("%v", this.System) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, + `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, + `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, + `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleIOVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleIOVolumeSource{`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `System:` + fmt.Sprintf("%v", this.System) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, + `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, + `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, + `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *ScopeSelector) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchExpressions := "[]ScopedResourceSelectorRequirement{" + for _, f := range this.MatchExpressions { + repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "ScopedResourceSelectorRequirement", "ScopedResourceSelectorRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchExpressions += "}" + s := strings.Join([]string{`&ScopeSelector{`, + `MatchExpressions:` + repeatedStringForMatchExpressions + `,`, + `}`, + }, "") + return s +} +func (this *ScopedResourceSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScopedResourceSelectorRequirement{`, + `ScopeName:` + fmt.Sprintf("%v", this.ScopeName) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *Secret) String() string { + if this == nil { + return "nil" + } + keysForData := make([]string, 0, len(this.Data)) + for k := range this.Data { + keysForData = append(keysForData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + mapStringForData := "map[string][]byte{" + for _, k := range keysForData { + mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + } + mapStringForData += "}" + keysForStringData := make([]string, 0, len(this.StringData)) + for k := range this.StringData { + keysForStringData = append(keysForStringData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) + mapStringForStringData := "map[string]string{" + for _, k := range keysForStringData { + mapStringForStringData += fmt.Sprintf("%v: %v,", k, this.StringData[k]) + } + mapStringForStringData += "}" + s := strings.Join([]string{`&Secret{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + mapStringForData + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `StringData:` + mapStringForStringData + `,`, + `}`, + }, "") + return s +} +func (this *SecretEnvSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretEnvSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretKeySelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretKeySelector{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Secret{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Secret", "Secret", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SecretList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *SecretProjection) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]KeyToPath{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SecretProjection{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func (this *SecretVolumeSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]KeyToPath{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SecretVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `Items:` + repeatedStringForItems + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecurityContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecurityContext{`, + `Capabilities:` + strings.Replace(this.Capabilities.String(), "Capabilities", "Capabilities", 1) + `,`, + `Privileged:` + valueToStringGenerated(this.Privileged) + `,`, + `SELinuxOptions:` + strings.Replace(this.SELinuxOptions.String(), "SELinuxOptions", "SELinuxOptions", 1) + `,`, + `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, + `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, + `ReadOnlyRootFilesystem:` + valueToStringGenerated(this.ReadOnlyRootFilesystem) + `,`, + `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, + `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, + `ProcMount:` + valueToStringGenerated(this.ProcMount) + `,`, + `WindowsOptions:` + strings.Replace(this.WindowsOptions.String(), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SerializedReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SerializedReference{`, + `Reference:` + strings.Replace(strings.Replace(this.Reference.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Service) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Service{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceStatus", "ServiceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccount) String() string { + if this == nil { + return "nil" + } + repeatedStringForSecrets := "[]ObjectReference{" + for _, f := range this.Secrets { + repeatedStringForSecrets += strings.Replace(strings.Replace(f.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + "," + } + repeatedStringForSecrets += "}" + repeatedStringForImagePullSecrets := "[]LocalObjectReference{" + for _, f := range this.ImagePullSecrets { + repeatedStringForImagePullSecrets += strings.Replace(strings.Replace(f.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + "," + } + repeatedStringForImagePullSecrets += "}" + s := strings.Join([]string{`&ServiceAccount{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Secrets:` + repeatedStringForSecrets + `,`, + `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ServiceAccount{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceAccount", "ServiceAccount", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ServiceAccountList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountTokenProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountTokenProjection{`, + `Audience:` + fmt.Sprintf("%v", this.Audience) + `,`, + `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Service{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Service", "Service", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ServiceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ServicePort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServicePort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `TargetPort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetPort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]ServicePort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ServicePort", "ServicePort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ServiceSpec{`, + `Ports:` + repeatedStringForPorts + `,`, + `Selector:` + mapStringForSelector + `,`, + `ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ExternalIPs:` + fmt.Sprintf("%v", this.ExternalIPs) + `,`, + `SessionAffinity:` + fmt.Sprintf("%v", this.SessionAffinity) + `,`, + `LoadBalancerIP:` + fmt.Sprintf("%v", this.LoadBalancerIP) + `,`, + `LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`, + `ExternalName:` + fmt.Sprintf("%v", this.ExternalName) + `,`, + `ExternalTrafficPolicy:` + fmt.Sprintf("%v", this.ExternalTrafficPolicy) + `,`, + `HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`, + `PublishNotReadyAddresses:` + fmt.Sprintf("%v", this.PublishNotReadyAddresses) + `,`, + `SessionAffinityConfig:` + strings.Replace(this.SessionAffinityConfig.String(), "SessionAffinityConfig", "SessionAffinityConfig", 1) + `,`, + `IPFamily:` + valueToStringGenerated(this.IPFamily) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SessionAffinityConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SessionAffinityConfig{`, + `ClientIP:` + strings.Replace(this.ClientIP.String(), "ClientIPConfig", "ClientIPConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageOSPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageOSPersistentVolumeSource{`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageOSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageOSVolumeSource{`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Sysctl) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Sysctl{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *TCPSocketAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TCPSocketAction{`, + `Port:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `}`, + }, "") + return s +} +func (this *Taint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Taint{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TimeAdded:` + strings.Replace(fmt.Sprintf("%v", this.TimeAdded), "Time", "v1.Time", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Toleration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Toleration{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *TopologySelectorLabelRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TopologySelectorLabelRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *TopologySelectorTerm) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchLabelExpressions := "[]TopologySelectorLabelRequirement{" + for _, f := range this.MatchLabelExpressions { + repeatedStringForMatchLabelExpressions += strings.Replace(strings.Replace(f.String(), "TopologySelectorLabelRequirement", "TopologySelectorLabelRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchLabelExpressions += "}" + s := strings.Join([]string{`&TopologySelectorTerm{`, + `MatchLabelExpressions:` + repeatedStringForMatchLabelExpressions + `,`, + `}`, + }, "") + return s +} +func (this *TopologySpreadConstraint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TopologySpreadConstraint{`, + `MaxSkew:` + fmt.Sprintf("%v", this.MaxSkew) + `,`, + `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, + `WhenUnsatisfiable:` + fmt.Sprintf("%v", this.WhenUnsatisfiable) + `,`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TypedLocalObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TypedLocalObjectReference{`, + `APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Volume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Volume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `VolumeSource:` + strings.Replace(strings.Replace(this.VolumeSource.String(), "VolumeSource", "VolumeSource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeDevice) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeDevice{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeMount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeMount{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, + `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, + `MountPropagation:` + valueToStringGenerated(this.MountPropagation) + `,`, + `SubPathExpr:` + fmt.Sprintf("%v", this.SubPathExpr) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeNodeAffinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeNodeAffinity{`, + `Required:` + strings.Replace(this.Required.String(), "NodeSelector", "NodeSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeProjection{`, + `Secret:` + strings.Replace(this.Secret.String(), "SecretProjection", "SecretProjection", 1) + `,`, + `DownwardAPI:` + strings.Replace(this.DownwardAPI.String(), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, + `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, + `ServiceAccountToken:` + strings.Replace(this.ServiceAccountToken.String(), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeSource{`, + `HostPath:` + strings.Replace(this.HostPath.String(), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, + `EmptyDir:` + strings.Replace(this.EmptyDir.String(), "EmptyDirVolumeSource", "EmptyDirVolumeSource", 1) + `,`, + `GCEPersistentDisk:` + strings.Replace(this.GCEPersistentDisk.String(), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, + `AWSElasticBlockStore:` + strings.Replace(this.AWSElasticBlockStore.String(), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, + `GitRepo:` + strings.Replace(this.GitRepo.String(), "GitRepoVolumeSource", "GitRepoVolumeSource", 1) + `,`, + `Secret:` + strings.Replace(this.Secret.String(), "SecretVolumeSource", "SecretVolumeSource", 1) + `,`, + `NFS:` + strings.Replace(this.NFS.String(), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(this.ISCSI.String(), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(this.Glusterfs.String(), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, + `PersistentVolumeClaim:` + strings.Replace(this.PersistentVolumeClaim.String(), "PersistentVolumeClaimVolumeSource", "PersistentVolumeClaimVolumeSource", 1) + `,`, + `RBD:` + strings.Replace(this.RBD.String(), "RBDVolumeSource", "RBDVolumeSource", 1) + `,`, + `FlexVolume:` + strings.Replace(this.FlexVolume.String(), "FlexVolumeSource", "FlexVolumeSource", 1) + `,`, + `Cinder:` + strings.Replace(this.Cinder.String(), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, + `CephFS:` + strings.Replace(this.CephFS.String(), "CephFSVolumeSource", "CephFSVolumeSource", 1) + `,`, + `Flocker:` + strings.Replace(this.Flocker.String(), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, + `DownwardAPI:` + strings.Replace(this.DownwardAPI.String(), "DownwardAPIVolumeSource", "DownwardAPIVolumeSource", 1) + `,`, + `FC:` + strings.Replace(this.FC.String(), "FCVolumeSource", "FCVolumeSource", 1) + `,`, + `AzureFile:` + strings.Replace(this.AzureFile.String(), "AzureFileVolumeSource", "AzureFileVolumeSource", 1) + `,`, + `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapVolumeSource", "ConfigMapVolumeSource", 1) + `,`, + `VsphereVolume:` + strings.Replace(this.VsphereVolume.String(), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, + `Quobyte:` + strings.Replace(this.Quobyte.String(), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, + `AzureDisk:` + strings.Replace(this.AzureDisk.String(), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, + `PhotonPersistentDisk:` + strings.Replace(this.PhotonPersistentDisk.String(), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, + `PortworxVolume:` + strings.Replace(this.PortworxVolume.String(), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(this.ScaleIO.String(), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, + `Projected:` + strings.Replace(this.Projected.String(), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`, + `StorageOS:` + strings.Replace(this.StorageOS.String(), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`, + `CSI:` + strings.Replace(this.CSI.String(), "CSIVolumeSource", "CSIVolumeSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VsphereVirtualDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VsphereVirtualDiskVolumeSource{`, + `VolumePath:` + fmt.Sprintf("%v", this.VolumePath) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `StoragePolicyName:` + fmt.Sprintf("%v", this.StoragePolicyName) + `,`, + `StoragePolicyID:` + fmt.Sprintf("%v", this.StoragePolicyID) + `,`, + `}`, + }, "") + return s +} +func (this *WeightedPodAffinityTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WeightedPodAffinityTerm{`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `PodAffinityTerm:` + strings.Replace(strings.Replace(this.PodAffinityTerm.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *WindowsSecurityContextOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WindowsSecurityContextOptions{`, + `GMSACredentialSpecName:` + valueToStringGenerated(this.GMSACredentialSpecName) + `,`, + `GMSACredentialSpec:` + valueToStringGenerated(this.GMSACredentialSpec) + `,`, + `RunAsUserName:` + valueToStringGenerated(this.RunAsUserName) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + m.Partition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Partition |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Affinity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Affinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Affinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeAffinity == nil { + m.NodeAffinity = &NodeAffinity{} + } + if err := m.NodeAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAffinity == nil { + m.PodAffinity = &PodAffinity{} + } + if err := m.PodAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAntiAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAntiAffinity == nil { + m.PodAntiAffinity = &PodAntiAffinity{} + } + if err := m.PodAntiAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttachedVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttachedVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttachedVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = UniqueVolumeName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DevicePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AvoidPods) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AvoidPods: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AvoidPods: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferAvoidPods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferAvoidPods = append(m.PreferAvoidPods, PreferAvoidPodsEntry{}) + if err := m.PreferAvoidPods[len(m.PreferAvoidPods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DiskName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataDiskURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataDiskURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CachingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AzureDataDiskCachingMode(dAtA[iNdEx:postIndex]) + m.CachingMode = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.FSType = &s + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnly = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AzureDataDiskKind(dAtA[iNdEx:postIndex]) + m.Kind = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureFilePersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureFilePersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShareName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.SecretNamespace = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureFileVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureFileVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShareName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Binding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Binding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Binding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeHandle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeHandle = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VolumeAttributes == nil { + m.VolumeAttributes = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.VolumeAttributes[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ControllerPublishSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ControllerPublishSecretRef == nil { + m.ControllerPublishSecretRef = &SecretReference{} + } + if err := m.ControllerPublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeStageSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeStageSecretRef == nil { + m.NodeStageSecretRef = &SecretReference{} + } + if err := m.NodeStageSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodePublishSecretRef == nil { + m.NodePublishSecretRef = &SecretReference{} + } + if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ControllerExpandSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ControllerExpandSecretRef == nil { + m.ControllerExpandSecretRef = &SecretReference{} + } + if err := m.ControllerExpandSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnly = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.FSType = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VolumeAttributes == nil { + m.VolumeAttributes = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.VolumeAttributes[mapkey] = mapvalue + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodePublishSecretRef == nil { + m.NodePublishSecretRef = &LocalObjectReference{} + } + if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Capabilities) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Add = append(m.Add, Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Drop = append(m.Drop, Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CephFSPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CephFSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretFile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } -func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { +func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17541,7 +29113,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17549,15 +29121,15 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17569,7 +29141,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17579,14 +29151,17 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeID = string(dAtA[iNdEx:postIndex]) + m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17598,7 +29173,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17608,35 +29183,19 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) - } - m.Partition = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Partition |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17646,67 +29205,29 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Affinity) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Affinity: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Affinity: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17716,28 +29237,27 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodeAffinity == nil { - m.NodeAffinity = &NodeAffinity{} - } - if err := m.NodeAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.SecretFile = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodAffinity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17749,7 +29269,7 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -17758,21 +29278,24 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PodAffinity == nil { - m.PodAffinity = &PodAffinity{} + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} } - if err := m.PodAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodAntiAffinity", wireType) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17782,25 +29305,12 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PodAntiAffinity == nil { - m.PodAntiAffinity = &PodAntiAffinity{} - } - if err := m.PodAntiAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -17810,6 +29320,9 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -17822,7 +29335,7 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { } return nil } -func (m *AttachedVolume) Unmarshal(dAtA []byte) error { +func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17837,7 +29350,7 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17845,15 +29358,15 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AttachedVolume: wiretype end group for non-group") + return fmt.Errorf("proto: CinderPersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AttachedVolume: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CinderPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17865,7 +29378,7 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17875,14 +29388,17 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = UniqueVolumeName(dAtA[iNdEx:postIndex]) + m.VolumeID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17894,7 +29410,7 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17904,64 +29420,37 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DevicePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AvoidPods) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AvoidPods: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AvoidPods: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.ReadOnly = bool(v != 0) + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferAvoidPods", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17973,7 +29462,7 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -17982,11 +29471,16 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.PreferAvoidPods = append(m.PreferAvoidPods, PreferAvoidPodsEntry{}) - if err := m.PreferAvoidPods[len(m.PreferAvoidPods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17999,6 +29493,9 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -18011,7 +29508,7 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { } return nil } -func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { +func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18026,7 +29523,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18034,15 +29531,15 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AzureDiskVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AzureDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DiskName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18054,7 +29551,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18064,14 +29561,17 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.DiskName = string(dAtA[iNdEx:postIndex]) + m.VolumeID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataDiskURI", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18083,7 +29583,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18093,16 +29593,19 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.DataDiskURI = string(dAtA[iNdEx:postIndex]) + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CachingMode", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18112,27 +29615,17 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := AzureDataDiskCachingMode(dAtA[iNdEx:postIndex]) - m.CachingMode = &s - iNdEx = postIndex + m.ReadOnly = bool(v != 0) case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18142,48 +29635,86 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.FSType = &s + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + if skippy < 0 { + return ErrInvalidLengthGenerated } - b := bool(v != 0) - m.ReadOnly = &b - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated } - var stringLen uint64 + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18193,22 +29724,12 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := AzureDataDiskKind(dAtA[iNdEx:postIndex]) - m.Kind = &s - iNdEx = postIndex + m.TimeoutSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -18218,6 +29739,9 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -18230,7 +29754,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ComponentCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18245,7 +29769,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18253,15 +29777,15 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AzureFilePersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AzureFilePersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18273,7 +29797,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18283,14 +29807,17 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretName = string(dAtA[iNdEx:postIndex]) + m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18302,7 +29829,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18312,16 +29839,19 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ShareName = string(dAtA[iNdEx:postIndex]) + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18331,15 +29861,27 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.ReadOnly = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretNamespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18351,7 +29893,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18361,11 +29903,13 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.SecretNamespace = &s + m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -18376,6 +29920,9 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -18388,7 +29935,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ComponentStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18403,7 +29950,7 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18411,17 +29958,17 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AzureFileVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AzureFileVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18431,26 +29978,30 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretName = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18460,41 +30011,26 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.ShareName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + m.Conditions = append(m.Conditions, ComponentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.ReadOnly = bool(v != 0) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -18504,6 +30040,9 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -18516,7 +30055,7 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Binding) Unmarshal(dAtA []byte) error { +func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18531,7 +30070,7 @@ func (m *Binding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18539,15 +30078,15 @@ func (m *Binding) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Binding: wiretype end group for non-group") + return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Binding: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18559,7 +30098,7 @@ func (m *Binding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -18568,16 +30107,19 @@ func (m *Binding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18589,7 +30131,7 @@ func (m *Binding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -18598,10 +30140,14 @@ func (m *Binding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ComponentStatus{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -18614,6 +30160,9 @@ func (m *Binding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -18626,7 +30175,7 @@ func (m *Binding) Unmarshal(dAtA []byte) error { } return nil } -func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ConfigMap) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18641,7 +30190,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18649,17 +30198,17 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CSIPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18669,102 +30218,28 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeHandle", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeHandle = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18776,7 +30251,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -18785,11 +30260,14 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.VolumeAttributes == nil { - m.VolumeAttributes = make(map[string]string) + if m.Data == nil { + m.Data = make(map[string]string) } var mapkey string var mapvalue string @@ -18805,7 +30283,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18822,7 +30300,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18832,6 +30310,9 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -18848,7 +30329,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18858,6 +30339,9 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -18878,77 +30362,11 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.VolumeAttributes[mapkey] = mapvalue - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ControllerPublishSecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ControllerPublishSecretRef == nil { - m.ControllerPublishSecretRef = &SecretReference{} - } - if err := m.ControllerPublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeStageSecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodeStageSecretRef == nil { - m.NodeStageSecretRef = &SecretReference{} - } - if err := m.NodeStageSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Data[mapkey] = mapvalue iNdEx = postIndex - case 8: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BinaryData", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18960,7 +30378,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -18969,177 +30387,17 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodePublishSecretRef == nil { - m.NodePublishSecretRef = &SecretReference{} - } - if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSIVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.ReadOnly = &b - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.FSType = &s - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.VolumeAttributes == nil { - m.VolumeAttributes = make(map[string]string) + if m.BinaryData == nil { + m.BinaryData = make(map[string][]byte) } var mapkey string - var mapvalue string + mapvalue := []byte{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -19152,7 +30410,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19169,7 +30427,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19179,13 +30437,16 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { - var stringLenmapvalue uint64 + var mapbyteLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19195,21 +30456,25 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + mapbyteLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex < 0 { + return ErrInvalidLengthGenerated + } + if postbytesIndex > l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex } else { iNdEx = entryPreIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19225,40 +30490,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.VolumeAttributes[mapkey] = mapvalue - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodePublishSecretRef == nil { - m.NodePublishSecretRef = &LocalObjectReference{} - } - if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.BinaryData[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -19269,6 +30501,9 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -19281,7 +30516,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Capabilities) Unmarshal(dAtA []byte) error { +func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19296,7 +30531,7 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19304,17 +30539,17 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapEnvSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19324,26 +30559,30 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Add = append(m.Add, Capability(dAtA[iNdEx:postIndex])) + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19353,21 +30592,13 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Drop = append(m.Drop, Capability(dAtA[iNdEx:postIndex])) - iNdEx = postIndex + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19377,6 +30608,9 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -19389,7 +30623,7 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } return nil } -func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19404,7 +30638,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19412,17 +30646,17 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CephFSPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CephFSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19432,24 +30666,28 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19461,7 +30699,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19471,16 +30709,19 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19490,26 +30731,71 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19519,24 +30805,28 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretFile = string(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19548,7 +30838,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -19557,36 +30847,17 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ConfigMap{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19596,6 +30867,9 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -19608,7 +30882,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19623,7 +30897,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19631,15 +30905,15 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapNodeConfigSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapNodeConfigSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19651,7 +30925,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19661,14 +30935,17 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19680,7 +30957,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19690,14 +30967,17 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19709,7 +30989,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19719,14 +30999,17 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.User = string(dAtA[iNdEx:postIndex]) + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19738,7 +31021,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19748,16 +31031,19 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretFile = string(dAtA[iNdEx:postIndex]) + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KubeletConfigKey", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19767,45 +31053,24 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.KubeletConfigKey = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19815,6 +31080,9 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -19827,7 +31095,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19842,7 +31110,7 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19850,17 +31118,17 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CinderPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapProjection: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CinderPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapProjection: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19870,26 +31138,30 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeID = string(dAtA[iNdEx:postIndex]) + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19899,24 +31171,29 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -19928,45 +31205,13 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19976,6 +31221,9 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -19988,7 +31236,7 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20003,7 +31251,7 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20011,17 +31259,17 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20031,26 +31279,30 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeID = string(dAtA[iNdEx:postIndex]) + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20060,26 +31312,31 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) } - var v int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20089,17 +31346,17 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.ReadOnly = bool(v != 0) + m.DefaultMode = &v case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20109,25 +31366,13 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -20137,6 +31382,9 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -20149,7 +31397,7 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { +func (m *Container) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20164,7 +31412,7 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20172,17 +31420,17 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group") + return fmt.Errorf("proto: Container: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var v int32 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20192,65 +31440,27 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.TimeoutSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComponentCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20262,7 +31472,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20272,14 +31482,17 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) + m.Image = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20291,7 +31504,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20301,14 +31514,17 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20320,7 +31536,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20330,14 +31546,17 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20349,7 +31568,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20359,64 +31578,17 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComponentStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.WorkingDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20428,7 +31600,7 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20437,16 +31609,20 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, ContainerPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20458,7 +31634,7 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20467,67 +31643,20 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, ComponentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20539,7 +31668,7 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20548,16 +31677,19 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20569,7 +31701,7 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20578,67 +31710,56 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ComponentStatus{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMap) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + if m.LivenessProbe == nil { + m.LivenessProbe = &Probe{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20650,7 +31771,7 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20659,16 +31780,22 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ReadinessProbe == nil { + m.ReadinessProbe = &Probe{} + } + if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20680,7 +31807,7 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20689,106 +31816,24 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Data == nil { - m.Data = make(map[string]string) + if m.Lifecycle == nil { + m.Lifecycle = &Lifecycle{} } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Data[mapkey] = mapvalue iNdEx = postIndex - case 3: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BinaryData", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20798,164 +31843,59 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.BinaryData == nil { - m.BinaryData = make(map[string][]byte) + m.TerminationMessagePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) } - var mapkey string - mapvalue := []byte{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthGenerated - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break } } - m.BinaryData[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMapEnvSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20967,7 +31907,7 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20976,16 +31916,22 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.SecurityContext == nil { + m.SecurityContext = &SecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 16: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -20997,66 +31943,55 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Optional = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + m.Stdin = bool(v != 0) + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.StdinOnce = bool(v != 0) + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.TTY = bool(v != 0) + case 19: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21068,7 +32003,7 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -21077,16 +32012,20 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 20: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21098,7 +32037,7 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21108,16 +32047,53 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) + if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21127,13 +32103,28 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Optional = &b + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartupProbe == nil { + m.StartupProbe = &Probe{} + } + if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -21143,6 +32134,9 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -21155,7 +32149,7 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapList) Unmarshal(dAtA []byte) error { +func (m *ContainerImage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21170,7 +32164,7 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21178,17 +32172,17 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapList: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21198,27 +32192,29 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) } - var msglen int + m.SizeBytes = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21228,23 +32224,11 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + m.SizeBytes |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ConfigMap{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -21254,6 +32238,9 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -21266,7 +32253,7 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { +func (m *ContainerPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21281,7 +32268,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21289,15 +32276,15 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapNodeConfigSource: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapNodeConfigSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21309,7 +32296,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21319,16 +32306,19 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) } - var stringLen uint64 + m.HostPort = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21338,26 +32328,16 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.HostPort |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) } - var stringLen uint64 + m.ContainerPort = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21367,24 +32347,14 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.ContainerPort |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21396,7 +32366,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21406,14 +32376,17 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubeletConfigKey", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21425,7 +32398,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21435,10 +32408,13 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.KubeletConfigKey = string(dAtA[iNdEx:postIndex]) + m.HostIP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -21449,6 +32425,9 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -21461,7 +32440,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { +func (m *ContainerState) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21476,7 +32455,7 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21484,15 +32463,15 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapProjection: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerState: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapProjection: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21504,7 +32483,7 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -21513,16 +32492,22 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Waiting == nil { + m.Waiting = &ContainerStateWaiting{} + } + if err := m.Waiting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21534,7 +32519,7 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -21543,19 +32528,24 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Running == nil { + m.Running = &ContainerStateRunning{} + } + if err := m.Running.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminated", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21565,13 +32555,28 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Optional = &b + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Terminated == nil { + m.Terminated = &ContainerStateTerminated{} + } + if err := m.Terminated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -21581,6 +32586,9 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -21593,7 +32601,7 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21608,7 +32616,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21616,15 +32624,15 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21636,7 +32644,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -21645,85 +32653,16 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.DefaultMode = &v - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -21733,6 +32672,9 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -21745,7 +32687,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Container) Unmarshal(dAtA []byte) error { +func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21760,7 +32702,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21768,17 +32710,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Container: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerStateTerminated: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerStateTerminated: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) } - var stringLen uint64 + m.ExitCode = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21788,26 +32730,16 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.ExitCode |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) } - var stringLen uint64 + m.Signal = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21817,24 +32749,14 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.Signal |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Image = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21846,7 +32768,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21856,14 +32778,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21875,7 +32800,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21885,43 +32810,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.WorkingDir = string(dAtA[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21933,7 +32832,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -21942,48 +32841,19 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, ContainerPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21995,7 +32865,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -22004,49 +32874,21 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) - if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 10: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22056,63 +32898,82 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.LivenessProbe == nil { - m.LivenessProbe = &Probe{} - } - if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ContainerID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + if skippy < 0 { + return ErrInvalidLengthGenerated } - if msglen < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.ReadinessProbe == nil { - m.ReadinessProbe = &Probe{} + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 12: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateWaiting: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateWaiting: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22122,28 +32983,27 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Lifecycle == nil { - m.Lifecycle = &Lifecycle{} - } - if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 13: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -22155,7 +33015,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22165,14 +33025,70 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.TerminationMessagePath = string(dAtA[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 14: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -22184,7 +33100,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22194,14 +33110,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 15: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22213,7 +33132,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -22222,21 +33141,21 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecurityContext == nil { - m.SecurityContext = &SecurityContext{} - } - if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.State.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTerminationState", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22246,15 +33165,28 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Stdin = bool(v != 0) - case 17: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTerminationState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -22266,17 +33198,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - m.StdinOnce = bool(v != 0) - case 18: + m.Ready = bool(v != 0) + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RestartCount", wireType) } - var v int + m.RestartCount = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22286,17 +33218,16 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + m.RestartCount |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.TTY = bool(v != 0) - case 19: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22306,26 +33237,27 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) - if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Image = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 20: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -22337,7 +33269,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22347,16 +33279,19 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) + m.ImageID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 21: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22366,23 +33301,45 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) - if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ContainerID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Started = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -22392,6 +33349,9 @@ func (m *Container) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -22404,7 +33364,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerImage) Unmarshal(dAtA []byte) error { +func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22419,7 +33379,7 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22427,46 +33387,17 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - m.SizeBytes = 0 + m.Port = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22476,7 +33407,7 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.SizeBytes |= (int64(b) & 0x7F) << shift + m.Port |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -22490,6 +33421,9 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -22502,7 +33436,7 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerPort) Unmarshal(dAtA []byte) error { +func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22517,7 +33451,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22525,17 +33459,17 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group") + return fmt.Errorf("proto: DownwardAPIProjection: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DownwardAPIProjection: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22545,116 +33479,25 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) - } - m.HostPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HostPort |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) - } - m.ContainerPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ContainerPort |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = Protocol(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.HostIP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -22665,6 +33508,9 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -22677,7 +33523,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerState) Unmarshal(dAtA []byte) error { +func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22692,7 +33538,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22700,17 +33546,17 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerState: wiretype end group for non-group") + return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22720,28 +33566,27 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Waiting == nil { - m.Waiting = &ContainerStateWaiting{} - } - if err := m.Waiting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22753,7 +33598,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -22762,19 +33607,22 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Running == nil { - m.Running = &ContainerStateRunning{} + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} } - if err := m.Running.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminated", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22786,7 +33634,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -22795,16 +33643,39 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Terminated == nil { - m.Terminated = &ContainerStateTerminated{} + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} } - if err := m.Terminated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Mode = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -22814,6 +33685,9 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -22826,7 +33700,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { +func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22841,7 +33715,7 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22849,15 +33723,15 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group") + return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22869,7 +33743,7 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -22878,13 +33752,37 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -22894,6 +33792,9 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -22906,7 +33807,7 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { +func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22921,7 +33822,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22929,17 +33830,17 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerStateTerminated: wiretype end group for non-group") + return fmt.Errorf("proto: EmptyDirVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateTerminated: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EmptyDirVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Medium", wireType) } - m.ExitCode = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22949,35 +33850,29 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ExitCode |= (int32(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - m.Signal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Signal |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 3: + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Medium = StorageMedium(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22987,24 +33882,84 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SizeLimit == nil { + m.SizeLimit = &resource.Quantity{} + } + if err := m.SizeLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointAddress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23016,7 +33971,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23026,14 +33981,17 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23045,7 +34003,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23054,18 +34012,24 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.TargetRef == nil { + m.TargetRef = &ObjectReference{} + } + if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23075,25 +34039,27 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23105,7 +34071,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23115,10 +34081,14 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ContainerID = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s iNdEx = postIndex default: iNdEx = preIndex @@ -23129,6 +34099,9 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -23141,7 +34114,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { +func (m *EndpointPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23156,7 +34129,7 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23164,15 +34137,15 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerStateWaiting: wiretype end group for non-group") + return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateWaiting: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23184,7 +34157,7 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23194,14 +34167,36 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23213,7 +34208,7 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23223,10 +34218,13 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -23237,6 +34235,9 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -23249,7 +34250,7 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerStatus) Unmarshal(dAtA []byte) error { +func (m *EndpointSubset) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23264,7 +34265,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23272,17 +34273,17 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") + return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23292,24 +34293,29 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Addresses = append(m.Addresses, EndpointAddress{}) + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23321,7 +34327,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23330,16 +34336,20 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.State.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) + if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTerminationState", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23351,7 +34361,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23360,86 +34370,75 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTerminationState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, EndpointPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Ready = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RestartCount", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - m.RestartCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RestartCount |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } + if skippy < 0 { + return ErrInvalidLengthGenerated } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Endpoints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Image = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23449,26 +34448,30 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.ImageID = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 8: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23478,20 +34481,25 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.ContainerID = string(dAtA[iNdEx:postIndex]) + m.Subsets = append(m.Subsets, EndpointSubset{}) + if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -23502,6 +34510,9 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -23514,7 +34525,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { +func (m *EndpointsList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23529,7 +34540,7 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23537,17 +34548,17 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group") + return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - m.Port = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23557,62 +34568,26 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Port |= (int32(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIProjection: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIProjection: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } @@ -23626,7 +34601,7 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23635,10 +34610,13 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, DownwardAPIVolumeFile{}) + m.Items = append(m.Items, Endpoints{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -23652,6 +34630,9 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -23664,7 +34645,7 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { } return nil } -func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { +func (m *EnvFromSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23679,7 +34660,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23687,15 +34668,15 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group") + return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23707,7 +34688,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23717,14 +34698,17 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Prefix = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23736,7 +34720,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23745,19 +34729,22 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.FieldRef == nil { - m.FieldRef = &ObjectFieldSelector{} + if m.ConfigMapRef == nil { + m.ConfigMapRef = &ConfigMapEnvSource{} } - if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23769,7 +34756,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23778,36 +34765,19 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResourceFieldRef == nil { - m.ResourceFieldRef = &ResourceFieldSelector{} + if m.SecretRef == nil { + m.SecretRef = &SecretEnvSource{} } - if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Mode = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -23817,6 +34787,9 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -23829,7 +34802,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } return nil } -func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { +func (m *EnvVar) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23844,7 +34817,7 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23852,17 +34825,17 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23872,96 +34845,27 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, DownwardAPIVolumeFile{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.DefaultMode = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EmptyDirVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EmptyDirVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Medium", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23973,7 +34877,7 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23983,14 +34887,17 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Medium = StorageMedium(dAtA[iNdEx:postIndex]) + m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24002,7 +34909,7 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24011,13 +34918,16 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SizeLimit == nil { - m.SizeLimit = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if m.ValueFrom == nil { + m.ValueFrom = &EnvVarSource{} } - if err := m.SizeLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -24030,6 +34940,9 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -24042,7 +34955,7 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointAddress) Unmarshal(dAtA []byte) error { +func (m *EnvVarSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24057,7 +34970,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24065,17 +34978,17 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group") + return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24085,24 +34998,31 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(dAtA[iNdEx:postIndex]) + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} + } + if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24114,7 +35034,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24123,21 +35043,24 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TargetRef == nil { - m.TargetRef = &ObjectReference{} + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} } - if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24147,26 +35070,33 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Hostname = string(dAtA[iNdEx:postIndex]) + if m.ConfigMapKeyRef == nil { + m.ConfigMapKeyRef = &ConfigMapKeySelector{} + } + if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24176,21 +35106,27 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.NodeName = &s + if m.SecretKeyRef == nil { + m.SecretKeyRef = &SecretKeySelector{} + } + if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -24201,6 +35137,9 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -24213,7 +35152,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointPort) Unmarshal(dAtA []byte) error { +func (m *EphemeralContainer) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24228,7 +35167,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24236,17 +35175,17 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + return fmt.Errorf("proto: EphemeralContainer: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EphemeralContainer: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainerCommon", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24256,43 +35195,28 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if err := m.EphemeralContainerCommon.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetContainerName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24304,7 +35228,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24314,10 +35238,13 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = Protocol(dAtA[iNdEx:postIndex]) + m.TargetContainerName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -24328,6 +35255,9 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -24340,7 +35270,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointSubset) Unmarshal(dAtA []byte) error { +func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24355,7 +35285,7 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24363,17 +35293,17 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") + return fmt.Errorf("proto: EphemeralContainerCommon: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EphemeralContainerCommon: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24383,28 +35313,29 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Addresses = append(m.Addresses, EndpointAddress{}) - if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24414,24 +35345,121 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) - if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Image = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WorkingDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } @@ -24445,7 +35473,7 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24454,67 +35482,54 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, EndpointPort{}) + m.Ports = append(m.Ports, ContainerPort{}) if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Endpoints) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + m.Env = append(m.Env, EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24526,7 +35541,7 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24535,16 +35550,19 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24556,7 +35574,7 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24565,67 +35583,56 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Subsets = append(m.Subsets, EndpointSubset{}) - if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EndpointsList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + if m.LivenessProbe == nil { + m.LivenessProbe = &Probe{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24637,7 +35644,7 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24646,16 +35653,22 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ReadinessProbe == nil { + m.ReadinessProbe = &Probe{} + } + if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24667,7 +35680,7 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24676,67 +35689,54 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Endpoints{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Lifecycle == nil { + m.Lifecycle = &Lifecycle{} + } + if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EnvFromSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.TerminationMessagePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24748,7 +35748,7 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24758,14 +35758,17 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Prefix = string(dAtA[iNdEx:postIndex]) + m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24777,7 +35780,7 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24786,19 +35789,82 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConfigMapRef == nil { - m.ConfigMapRef = &ConfigMapEnvSource{} + if m.SecurityContext == nil { + m.SecurityContext = &SecurityContext{} } - if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StdinOnce = bool(v != 0) + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 19: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24810,7 +35876,7 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24819,69 +35885,20 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &SecretEnvSource{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EnvVar) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 20: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24893,7 +35910,7 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24903,16 +35920,19 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 21: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24922,24 +35942,29 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = string(dAtA[iNdEx:postIndex]) + m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) + if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 22: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24951,7 +35976,7 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24960,13 +35985,16 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ValueFrom == nil { - m.ValueFrom = &EnvVarSource{} + if m.StartupProbe == nil { + m.StartupProbe = &Probe{} } - if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -24979,6 +36007,9 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -24991,7 +36022,7 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { } return nil } -func (m *EnvVarSource) Unmarshal(dAtA []byte) error { +func (m *EphemeralContainers) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25006,7 +36037,7 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25014,15 +36045,15 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") + return fmt.Errorf("proto: EphemeralContainers: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EphemeralContainers: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25034,7 +36065,7 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25043,52 +36074,19 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FieldRef == nil { - m.FieldRef = &ObjectFieldSelector{} - } - if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResourceFieldRef == nil { - m.ResourceFieldRef = &ResourceFieldSelector{} - } - if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainers", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25100,7 +36098,7 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25109,46 +36107,14 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigMapKeyRef == nil { - m.ConfigMapKeyRef = &ConfigMapKeySelector{} - } - if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretKeyRef == nil { - m.SecretKeyRef = &SecretKeySelector{} - } - if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.EphemeralContainers = append(m.EphemeralContainers, EphemeralContainer{}) + if err := m.EphemeralContainers[len(m.EphemeralContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -25161,6 +36127,9 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -25188,7 +36157,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25216,7 +36185,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25225,6 +36194,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25246,7 +36218,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25255,6 +36227,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25276,7 +36251,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25286,6 +36261,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25305,7 +36283,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25315,6 +36293,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25334,7 +36315,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25343,6 +36324,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25364,7 +36348,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25373,6 +36357,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25394,7 +36381,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25403,6 +36390,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25424,7 +36414,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Count |= (int32(b) & 0x7F) << shift + m.Count |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -25443,7 +36433,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25453,6 +36443,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25472,7 +36465,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25481,6 +36474,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25502,7 +36498,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25511,6 +36507,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25535,7 +36534,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25545,6 +36544,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25564,7 +36566,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25573,6 +36575,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25597,7 +36602,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25607,6 +36612,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25626,7 +36634,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25636,6 +36644,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25650,6 +36661,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -25677,7 +36691,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25705,7 +36719,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25714,6 +36728,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25735,7 +36752,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25744,6 +36761,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25761,6 +36781,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -25788,7 +36811,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25816,7 +36839,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Count |= (int32(b) & 0x7F) << shift + m.Count |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -25835,7 +36858,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25844,6 +36867,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25865,7 +36891,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25875,6 +36901,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25889,6 +36918,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -25916,7 +36948,7 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25944,7 +36976,7 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25954,6 +36986,9 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25973,7 +37008,7 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25983,6 +37018,9 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25997,6 +37035,9 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -26024,7 +37065,7 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26052,7 +37093,7 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26062,6 +37103,9 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26076,6 +37120,9 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -26103,7 +37150,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26131,7 +37178,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26141,6 +37188,9 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26160,7 +37210,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -26180,7 +37230,176 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WWIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WWIDs = append(m.WWIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlexPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlexPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26190,11 +37409,50 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) @@ -26209,7 +37467,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26217,144 +37475,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { m.ReadOnly = bool(v != 0) case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WWIDs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WWIDs = append(m.WWIDs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FlexPersistentVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FlexPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26366,7 +37487,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26375,59 +37496,9 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } @@ -26448,7 +37519,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26465,7 +37536,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26475,6 +37546,9 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -26491,7 +37565,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26501,6 +37575,9 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -26532,6 +37609,9 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -26559,7 +37639,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26587,7 +37667,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26597,6 +37677,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26616,7 +37699,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26626,6 +37709,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26645,7 +37731,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26654,6 +37740,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26678,7 +37767,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26698,7 +37787,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26707,6 +37796,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26727,7 +37819,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26744,7 +37836,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26754,6 +37846,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -26770,7 +37865,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26780,6 +37875,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -26811,6 +37909,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -26838,7 +37939,7 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26866,7 +37967,7 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26876,6 +37977,9 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26895,7 +37999,7 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26905,6 +38009,9 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26919,6 +38026,9 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -26946,7 +38056,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26974,7 +38084,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26984,6 +38094,9 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27003,7 +38116,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27013,6 +38126,9 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27032,7 +38148,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Partition |= (int32(b) & 0x7F) << shift + m.Partition |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -27051,7 +38167,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27066,6 +38182,9 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27093,7 +38212,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27121,7 +38240,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27131,6 +38250,9 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27150,7 +38272,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27160,6 +38282,9 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27179,7 +38304,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27189,6 +38314,9 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27203,6 +38331,9 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27230,7 +38361,7 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27258,7 +38389,7 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27268,6 +38399,9 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27287,7 +38421,7 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27297,6 +38431,9 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27316,7 +38453,7 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27336,7 +38473,7 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27346,6 +38483,9 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27361,6 +38501,9 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27388,7 +38531,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27416,7 +38559,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27426,6 +38569,9 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27445,7 +38591,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27455,6 +38601,9 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27474,7 +38623,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27489,6 +38638,9 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27516,7 +38668,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27544,7 +38696,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27554,6 +38706,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27573,7 +38728,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27582,6 +38737,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27603,7 +38761,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27613,6 +38771,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27632,7 +38793,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27642,6 +38803,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27661,7 +38825,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27670,6 +38834,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27687,6 +38854,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27714,7 +38884,7 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27742,7 +38912,7 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27752,6 +38922,9 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27771,7 +38944,7 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27781,6 +38954,9 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27795,6 +38971,9 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27822,7 +39001,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27850,7 +39029,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27859,6 +39038,9 @@ func (m *Handler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27883,7 +39065,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27892,6 +39074,9 @@ func (m *Handler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27916,7 +39101,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27925,6 +39110,9 @@ func (m *Handler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27944,6 +39132,9 @@ func (m *Handler) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27971,7 +39162,7 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27999,7 +39190,7 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28009,6 +39200,9 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28028,7 +39222,7 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28038,6 +39232,9 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28052,6 +39249,9 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -28079,7 +39279,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28107,7 +39307,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28117,6 +39317,9 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28136,7 +39339,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28146,6 +39349,9 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28161,6 +39367,9 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -28188,7 +39397,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28216,7 +39425,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28226,6 +39435,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28245,7 +39457,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28255,6 +39467,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28274,7 +39489,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Lun |= (int32(b) & 0x7F) << shift + m.Lun |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -28293,7 +39508,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28303,6 +39518,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28322,7 +39540,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28332,6 +39550,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28351,7 +39572,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28371,7 +39592,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28381,6 +39602,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28400,7 +39624,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28420,7 +39644,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28429,6 +39653,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28453,7 +39680,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28473,7 +39700,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28483,6 +39710,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28498,6 +39728,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -28525,7 +39758,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28553,7 +39786,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28563,6 +39796,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28582,7 +39818,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28592,6 +39828,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28611,7 +39850,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Lun |= (int32(b) & 0x7F) << shift + m.Lun |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -28630,7 +39869,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28640,6 +39879,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28659,7 +39901,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28669,6 +39911,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28688,7 +39933,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28708,7 +39953,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28718,6 +39963,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28737,7 +39985,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28757,7 +40005,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28766,6 +40014,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28790,7 +40041,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28810,7 +40061,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28820,6 +40071,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28835,6 +40089,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -28862,7 +40119,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28890,7 +40147,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28900,6 +40157,9 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28919,7 +40179,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28929,6 +40189,9 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28948,7 +40211,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -28963,6 +40226,9 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -28990,7 +40256,7 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29018,7 +40284,7 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29027,6 +40293,9 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29051,7 +40320,7 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29060,6 +40329,9 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29079,6 +40351,9 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29106,7 +40381,7 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29134,7 +40409,7 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29143,6 +40418,9 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29164,7 +40442,7 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29173,6 +40451,9 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29189,6 +40470,9 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29216,7 +40500,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29244,7 +40528,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29254,6 +40538,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29273,7 +40560,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29282,6 +40569,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29289,7 +40579,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { m.Max = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -29302,7 +40592,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29319,7 +40609,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29329,6 +40619,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -29345,7 +40638,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29354,13 +40647,13 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -29396,7 +40689,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29405,6 +40698,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29412,7 +40708,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { m.Min = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -29425,7 +40721,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29442,7 +40738,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29452,6 +40748,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -29468,7 +40767,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29477,13 +40776,13 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -29519,7 +40818,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29528,6 +40827,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29535,7 +40837,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { m.Default = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -29548,7 +40850,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29565,7 +40867,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29575,6 +40877,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -29591,7 +40896,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29600,13 +40905,13 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -29642,7 +40947,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29651,6 +40956,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29658,7 +40966,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { m.DefaultRequest = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -29671,7 +40979,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29688,7 +40996,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29698,6 +41006,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -29714,7 +41025,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29723,13 +41034,13 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -29765,7 +41076,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29774,6 +41085,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29781,7 +41095,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { m.MaxLimitRequestRatio = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -29794,7 +41108,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29811,7 +41125,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29821,6 +41135,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -29837,7 +41154,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29846,13 +41163,13 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -29883,6 +41200,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29910,7 +41230,7 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29938,7 +41258,7 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29947,6 +41267,9 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29968,7 +41291,7 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29977,6 +41300,9 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29994,6 +41320,9 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30021,7 +41350,7 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30049,7 +41378,7 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30058,6 +41387,9 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30075,6 +41407,9 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30102,7 +41437,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30130,7 +41465,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30139,6 +41474,9 @@ func (m *List) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30160,7 +41498,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30169,10 +41507,13 @@ func (m *List) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + m.Items = append(m.Items, runtime.RawExtension{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -30186,6 +41527,9 @@ func (m *List) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30213,7 +41557,7 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30241,7 +41585,7 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30251,6 +41595,9 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30270,7 +41617,7 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30280,6 +41627,9 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30294,6 +41644,9 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30321,7 +41674,7 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30349,7 +41702,7 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30358,6 +41711,9 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30375,6 +41731,9 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30402,7 +41761,7 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30430,7 +41789,7 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30440,6 +41799,9 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30454,6 +41816,9 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30481,7 +41846,7 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30509,7 +41874,157 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.FSType = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Server = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30519,16 +42034,19 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30538,22 +42056,12 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.FSType = &s - iNdEx = postIndex + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -30563,6 +42071,9 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30575,7 +42086,7 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { +func (m *Namespace) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30590,7 +42101,7 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30598,17 +42109,17 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: Namespace: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30618,26 +42129,30 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Server = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30647,26 +42162,30 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30676,12 +42195,25 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.ReadOnly = bool(v != 0) + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -30691,6 +42223,9 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30703,7 +42238,7 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Namespace) Unmarshal(dAtA []byte) error { +func (m *NamespaceCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30718,7 +42253,7 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30726,17 +42261,17 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Namespace: wiretype end group for non-group") + return fmt.Errorf("proto: NamespaceCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NamespaceCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30746,25 +42281,59 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Type = NamespaceConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -30776,7 +42345,7 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30785,18 +42354,21 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30806,21 +42378,55 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -30831,6 +42437,9 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30858,7 +42467,7 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30886,7 +42495,7 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30895,6 +42504,9 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30916,7 +42528,7 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30925,6 +42537,9 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30942,6 +42557,9 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30969,7 +42587,7 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30997,7 +42615,7 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31007,6 +42625,9 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31021,6 +42642,9 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31048,7 +42672,7 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31076,7 +42700,7 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31086,11 +42710,48 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Phase = NamespacePhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, NamespaceCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -31100,6 +42761,9 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31127,7 +42791,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31155,7 +42819,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31164,6 +42828,9 @@ func (m *Node) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31185,7 +42852,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31194,6 +42861,9 @@ func (m *Node) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31215,7 +42885,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31224,6 +42894,9 @@ func (m *Node) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31240,6 +42913,9 @@ func (m *Node) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31267,7 +42943,7 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31295,7 +42971,7 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31305,6 +42981,9 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31324,7 +43003,7 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31334,6 +43013,9 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31348,6 +43030,9 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31375,7 +43060,7 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31403,7 +43088,7 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31412,6 +43097,9 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31436,7 +43124,7 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31445,6 +43133,9 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31462,6 +43153,9 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31489,7 +43183,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31517,7 +43211,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31527,6 +43221,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31546,7 +43243,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31556,6 +43253,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31575,7 +43275,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31584,6 +43284,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31605,7 +43308,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31614,6 +43317,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31635,7 +43341,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31645,6 +43351,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31664,7 +43373,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31674,6 +43383,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31688,6 +43400,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31715,7 +43430,7 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31743,7 +43458,7 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31752,6 +43467,9 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31771,6 +43489,9 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31798,7 +43519,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31826,7 +43547,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31835,6 +43556,9 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31859,7 +43583,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31868,6 +43592,9 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31892,7 +43619,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31901,6 +43628,9 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31925,7 +43655,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31935,6 +43665,9 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31949,6 +43682,9 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31976,7 +43712,7 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32004,7 +43740,7 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32013,6 +43749,9 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32029,6 +43768,9 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32056,7 +43798,7 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32084,7 +43826,7 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32093,6 +43835,9 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32114,7 +43859,7 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32123,6 +43868,9 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32140,6 +43888,9 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32167,7 +43918,7 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32195,7 +43946,7 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32205,6 +43956,9 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32219,6 +43973,9 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32246,7 +44003,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32274,7 +44031,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32283,6 +44040,9 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32290,7 +44050,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { m.Capacity = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -32303,7 +44063,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32320,7 +44080,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32330,6 +44090,9 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -32346,7 +44109,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32355,13 +44118,13 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -32392,6 +44155,9 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32419,7 +44185,7 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32447,7 +44213,7 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32456,6 +44222,9 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32473,6 +44242,9 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32500,7 +44272,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32528,7 +44300,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32538,6 +44310,9 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32557,7 +44332,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32567,6 +44342,9 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32586,7 +44364,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32596,6 +44374,9 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32610,6 +44391,9 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32637,7 +44421,7 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32665,7 +44449,7 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32674,6 +44458,9 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32696,7 +44483,7 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32705,6 +44492,9 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32722,6 +44512,9 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32749,7 +44542,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32777,7 +44570,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32787,6 +44580,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32806,7 +44602,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32816,6 +44612,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32835,7 +44634,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32845,6 +44644,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32864,7 +44666,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32884,7 +44686,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32893,6 +44695,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32915,7 +44720,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32924,6 +44729,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32934,6 +44742,38 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodCIDRs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodCIDRs = append(m.PodCIDRs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -32943,6 +44783,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32970,7 +44813,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32998,7 +44841,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33007,6 +44850,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33014,7 +44860,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { m.Capacity = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -33027,7 +44873,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33044,7 +44890,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33054,6 +44900,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -33070,7 +44919,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33079,13 +44928,13 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -33121,7 +44970,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33130,6 +44979,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33137,7 +44989,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { m.Allocatable = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -33150,7 +45002,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33167,7 +45019,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33177,6 +45029,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -33193,7 +45048,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33202,13 +45057,13 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -33244,7 +45099,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33254,6 +45109,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33273,7 +45131,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33282,6 +45140,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33304,7 +45165,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33313,6 +45174,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33335,7 +45199,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33344,6 +45208,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33365,7 +45232,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33374,6 +45241,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33395,7 +45265,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33404,6 +45274,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33426,7 +45299,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33436,6 +45309,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33455,7 +45331,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33464,6 +45340,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33486,7 +45365,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33495,6 +45374,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33514,6 +45396,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -33541,7 +45426,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33569,7 +45454,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33579,6 +45464,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33598,7 +45486,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33608,6 +45496,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33627,7 +45518,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33637,6 +45528,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33656,7 +45550,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33666,6 +45560,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33685,7 +45582,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33695,6 +45592,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33714,7 +45614,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33724,6 +45624,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33743,7 +45646,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33753,6 +45656,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33772,7 +45678,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33782,6 +45688,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33801,7 +45710,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33811,6 +45720,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33830,7 +45742,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33840,6 +45752,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33854,6 +45769,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -33881,7 +45799,7 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33909,7 +45827,7 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33919,6 +45837,9 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33938,7 +45859,7 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33948,6 +45869,9 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33962,6 +45886,9 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -33989,7 +45916,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34017,7 +45944,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34027,6 +45954,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34046,7 +45976,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34056,6 +45986,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34075,7 +46008,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34085,6 +46018,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34104,7 +46040,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34114,6 +46050,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34133,7 +46072,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34143,6 +46082,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34162,7 +46104,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34172,6 +46114,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34191,7 +46136,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34201,6 +46146,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34215,6 +46163,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34242,7 +46193,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34270,7 +46221,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34279,6 +46230,9 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34300,7 +46254,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34309,6 +46263,9 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34330,7 +46287,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34339,6 +46296,9 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34355,6 +46315,9 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34382,7 +46345,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34410,7 +46373,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34419,6 +46382,9 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34440,7 +46406,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34449,6 +46415,9 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34470,7 +46439,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34479,6 +46448,9 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34495,6 +46467,9 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34522,7 +46497,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34550,7 +46525,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34560,6 +46535,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34579,7 +46557,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34589,6 +46567,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34608,7 +46589,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34617,6 +46598,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34638,7 +46622,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34647,6 +46631,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34668,7 +46655,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34678,6 +46665,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34697,7 +46687,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34707,6 +46697,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34721,6 +46714,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34748,7 +46744,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34776,7 +46772,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34785,6 +46781,9 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34806,7 +46805,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34815,6 +46814,9 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34832,6 +46834,9 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34859,7 +46864,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34887,7 +46892,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34897,6 +46902,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34916,7 +46924,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34925,6 +46933,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34946,7 +46957,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34956,6 +46967,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34975,7 +46989,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34984,11 +46998,14 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -35008,7 +47025,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35018,6 +47035,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35038,7 +47058,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35048,6 +47068,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35068,7 +47091,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35077,6 +47100,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35096,6 +47122,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -35123,7 +47152,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35151,7 +47180,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35161,6 +47190,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35180,7 +47212,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35190,6 +47222,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35209,7 +47244,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35218,6 +47253,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35225,7 +47263,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { m.Capacity = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -35238,7 +47276,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35255,7 +47293,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35265,6 +47303,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -35281,7 +47322,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35290,13 +47331,13 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -35332,7 +47373,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35341,6 +47382,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35358,6 +47402,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -35385,7 +47432,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35413,7 +47460,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35423,6 +47470,9 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35442,7 +47492,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35457,6 +47507,9 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -35484,7 +47537,7 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35512,7 +47565,7 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35521,6 +47574,9 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35542,7 +47598,7 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35551,6 +47607,9 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35568,6 +47627,9 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -35595,7 +47657,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35623,7 +47685,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35632,6 +47694,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35656,7 +47721,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35665,6 +47730,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35689,7 +47757,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35698,6 +47766,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35722,7 +47793,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35731,6 +47802,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35755,7 +47829,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35764,6 +47838,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35788,7 +47865,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35797,6 +47874,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35821,7 +47901,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35830,6 +47910,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35854,7 +47937,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35863,6 +47946,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35887,7 +47973,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35896,6 +47982,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35920,7 +48009,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35929,6 +48018,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35953,7 +48045,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35962,6 +48054,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35986,7 +48081,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35995,6 +48090,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36019,7 +48117,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36028,6 +48126,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36052,7 +48153,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36061,6 +48162,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36085,7 +48189,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36094,6 +48198,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36118,7 +48225,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36127,6 +48234,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36151,7 +48261,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36160,6 +48270,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36184,7 +48297,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36193,6 +48306,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36217,7 +48333,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36226,6 +48342,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36250,7 +48369,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36259,6 +48378,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36283,7 +48405,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36292,6 +48414,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36316,7 +48441,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36325,6 +48450,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36344,6 +48472,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -36371,7 +48502,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36399,7 +48530,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36408,6 +48539,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36415,7 +48549,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { m.Capacity = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -36428,7 +48562,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36445,7 +48579,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36455,6 +48589,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -36471,7 +48608,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36480,13 +48617,13 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -36522,7 +48659,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36531,6 +48668,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36552,7 +48692,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36562,6 +48702,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36581,7 +48724,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36590,6 +48733,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36614,7 +48760,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36624,6 +48770,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36643,7 +48792,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36653,6 +48802,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36672,7 +48824,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36682,6 +48834,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36701,7 +48856,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36711,6 +48866,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36731,7 +48889,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36740,6 +48898,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36759,6 +48920,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -36786,7 +48950,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36814,7 +48978,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36824,6 +48988,9 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36843,7 +49010,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36853,6 +49020,9 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36872,7 +49042,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36882,6 +49052,9 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36896,6 +49069,9 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -36923,7 +49099,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36951,7 +49127,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36961,6 +49137,9 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36980,7 +49159,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36990,6 +49169,9 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37004,6 +49186,9 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37031,7 +49216,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37059,7 +49244,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37068,6 +49253,9 @@ func (m *Pod) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37089,7 +49277,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37098,6 +49286,9 @@ func (m *Pod) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37119,7 +49310,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37128,6 +49319,9 @@ func (m *Pod) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37144,6 +49338,9 @@ func (m *Pod) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37171,7 +49368,7 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37199,7 +49396,7 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37208,6 +49405,9 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37230,7 +49430,7 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37239,6 +49439,9 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37256,6 +49459,9 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37283,7 +49489,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37311,7 +49517,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37320,11 +49526,14 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LabelSelector == nil { - m.LabelSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.LabelSelector = &v1.LabelSelector{} } if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -37344,7 +49553,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37354,6 +49563,9 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37373,7 +49585,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37383,6 +49595,9 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37397,6 +49612,9 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37424,7 +49642,7 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37452,7 +49670,7 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37461,6 +49679,9 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37483,7 +49704,7 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37492,6 +49713,9 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37509,6 +49733,9 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37536,7 +49763,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37564,7 +49791,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37584,7 +49811,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37604,7 +49831,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37624,7 +49851,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37644,7 +49871,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37654,6 +49881,9 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37668,6 +49898,9 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37695,7 +49928,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37723,7 +49956,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37733,6 +49966,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37752,7 +49988,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37762,6 +49998,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37781,7 +50020,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37790,6 +50029,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37811,7 +50053,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37820,6 +50062,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37841,7 +50086,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37851,6 +50096,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37870,7 +50118,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37880,6 +50128,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37894,6 +50145,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37921,7 +50175,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37949,7 +50203,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37959,6 +50213,9 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37978,7 +50235,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37988,6 +50245,9 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38007,7 +50267,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38016,6 +50276,9 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38033,6 +50296,9 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38060,7 +50326,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38088,7 +50354,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38098,6 +50364,9 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38117,7 +50386,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38127,6 +50396,9 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38142,6 +50414,9 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38169,7 +50444,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38197,7 +50472,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38217,7 +50492,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38237,7 +50512,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38257,7 +50532,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38277,7 +50552,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38287,6 +50562,9 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38306,7 +50584,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38316,6 +50594,9 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38330,6 +50611,94 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodIP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodIP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodIP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38357,7 +50726,7 @@ func (m *PodList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38385,7 +50754,7 @@ func (m *PodList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38394,6 +50763,9 @@ func (m *PodList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38415,7 +50787,7 @@ func (m *PodList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38424,6 +50796,9 @@ func (m *PodList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38441,6 +50816,9 @@ func (m *PodList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38468,7 +50846,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38496,7 +50874,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38506,6 +50884,9 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38525,7 +50906,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38545,7 +50926,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38565,7 +50946,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -38585,7 +50966,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38594,11 +50975,14 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.SinceTime == nil { - m.SinceTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.SinceTime = &v1.Time{} } if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -38618,7 +51002,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38638,7 +51022,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -38658,7 +51042,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -38673,6 +51057,9 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38700,7 +51087,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38726,7 +51113,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -38743,7 +51130,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - packedLen |= (int(b) & 0x7F) << shift + packedLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38752,9 +51139,23 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Ports) == 0 { + m.Ports = make([]int32, 0, elementCount) + } for iNdEx < postIndex { var v int32 for shift := uint(0); ; shift += 7 { @@ -38766,7 +51167,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -38785,6 +51186,9 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38812,7 +51216,7 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38840,7 +51244,7 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38850,6 +51254,9 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38864,6 +51271,9 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38891,7 +51301,7 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38919,7 +51329,7 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38929,6 +51339,9 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38943,6 +51356,9 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38970,7 +51386,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38998,7 +51414,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39007,6 +51423,9 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39031,7 +51450,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -39051,7 +51470,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39070,7 +51489,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -39087,7 +51506,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - packedLen |= (int(b) & 0x7F) << shift + packedLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39096,9 +51515,23 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.SupplementalGroups) == 0 { + m.SupplementalGroups = make([]int64, 0, elementCount) + } for iNdEx < postIndex { var v int64 for shift := uint(0); ; shift += 7 { @@ -39110,7 +51543,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -39134,7 +51567,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -39154,7 +51587,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -39174,7 +51607,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39183,6 +51616,9 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39191,6 +51627,42 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WindowsOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WindowsOptions == nil { + m.WindowsOptions = &WindowsSecurityContextOptions{} + } + if err := m.WindowsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -39200,6 +51672,9 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -39227,7 +51702,7 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39255,7 +51730,7 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39264,11 +51739,14 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.PodController == nil { - m.PodController = &k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference{} + m.PodController = &v1.OwnerReference{} } if err := m.PodController.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -39283,6 +51761,9 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -39310,7 +51791,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39338,7 +51819,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39347,6 +51828,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39369,7 +51853,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39378,6 +51862,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39400,7 +51887,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39410,6 +51897,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39429,7 +51919,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -39449,7 +51939,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -39469,7 +51959,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39479,6 +51969,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39498,7 +51991,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39507,6 +52000,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39527,7 +52023,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39544,7 +52040,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39554,6 +52050,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -39570,7 +52069,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39580,6 +52079,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -39616,7 +52118,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39626,6 +52128,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39645,7 +52150,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39655,6 +52160,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39674,7 +52182,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39684,6 +52192,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39703,7 +52214,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39723,7 +52234,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39743,7 +52254,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39763,7 +52274,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39772,6 +52283,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39796,7 +52310,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39805,6 +52319,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39827,7 +52344,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39837,6 +52354,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39856,7 +52376,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39866,6 +52386,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39885,7 +52408,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39894,6 +52417,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39918,7 +52444,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39928,6 +52454,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39947,7 +52476,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39956,6 +52485,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39978,7 +52510,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39999,7 +52531,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40008,6 +52540,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40030,7 +52565,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40039,6 +52574,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40061,7 +52599,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40071,6 +52609,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40090,7 +52631,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -40110,7 +52651,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40119,6 +52660,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40143,7 +52687,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40164,7 +52708,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40173,6 +52717,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40195,7 +52742,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40205,6 +52752,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40225,13 +52775,243 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } b := bool(v != 0) m.EnableServiceLinks = &b + case 31: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PreemptionPolicy(dAtA[iNdEx:postIndex]) + m.PreemptionPolicy = &s + iNdEx = postIndex + case 32: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Overhead == nil { + m.Overhead = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Overhead[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 33: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologySpreadConstraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologySpreadConstraints = append(m.TopologySpreadConstraints, TopologySpreadConstraint{}) + if err := m.TopologySpreadConstraints[len(m.TopologySpreadConstraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EphemeralContainers = append(m.EphemeralContainers, EphemeralContainer{}) + if err := m.EphemeralContainers[len(m.EphemeralContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -40241,6 +53021,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40268,7 +53051,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40296,7 +53079,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40306,6 +53089,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40325,7 +53111,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40334,6 +53120,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40356,7 +53145,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40366,6 +53155,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40385,7 +53177,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40395,6 +53187,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40414,7 +53209,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40424,6 +53219,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40443,7 +53241,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40453,6 +53251,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40472,7 +53273,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40481,11 +53282,14 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.StartTime == nil { - m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.StartTime = &v1.Time{} } if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -40505,7 +53309,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40514,6 +53318,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40536,7 +53343,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40546,6 +53353,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40565,7 +53375,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40574,6 +53384,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40596,7 +53409,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40606,11 +53419,82 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.NominatedNodeName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodIPs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodIPs = append(m.PodIPs, PodIP{}) + if err := m.PodIPs[len(m.PodIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainerStatuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EphemeralContainerStatuses = append(m.EphemeralContainerStatuses, ContainerStatus{}) + if err := m.EphemeralContainerStatuses[len(m.EphemeralContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -40620,6 +53504,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40647,7 +53534,7 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40675,7 +53562,7 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40684,6 +53571,9 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40705,7 +53595,7 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40714,6 +53604,9 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40730,6 +53623,9 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40757,7 +53653,7 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40785,7 +53681,7 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40794,6 +53690,9 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40815,7 +53714,7 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40824,6 +53723,9 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40840,6 +53742,9 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40867,7 +53772,7 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40895,7 +53800,7 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40904,6 +53809,9 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40925,7 +53833,7 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40934,6 +53842,9 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40951,6 +53862,9 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40978,7 +53892,7 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41006,7 +53920,7 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41015,6 +53929,9 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41036,7 +53953,7 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41045,6 +53962,9 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41061,6 +53981,9 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41088,7 +54011,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41116,7 +54039,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41126,6 +54049,9 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41145,7 +54071,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41155,6 +54081,9 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41174,7 +54103,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41189,6 +54118,9 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41216,7 +54148,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41244,7 +54176,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41254,6 +54186,9 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41269,6 +54204,9 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41296,7 +54234,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41324,7 +54262,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41333,6 +54271,9 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41354,7 +54295,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41363,6 +54304,9 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41384,7 +54328,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41394,6 +54338,9 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41413,7 +54360,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41423,6 +54370,9 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41437,6 +54387,9 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41464,7 +54417,7 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41492,7 +54445,7 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Weight |= (int32(b) & 0x7F) << shift + m.Weight |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -41511,7 +54464,7 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41520,6 +54473,9 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41536,6 +54492,9 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41563,7 +54522,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41591,7 +54550,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41600,6 +54559,9 @@ func (m *Probe) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41621,7 +54583,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.InitialDelaySeconds |= (int32(b) & 0x7F) << shift + m.InitialDelaySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -41640,7 +54602,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TimeoutSeconds |= (int32(b) & 0x7F) << shift + m.TimeoutSeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -41659,7 +54621,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.PeriodSeconds |= (int32(b) & 0x7F) << shift + m.PeriodSeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -41678,7 +54640,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.SuccessThreshold |= (int32(b) & 0x7F) << shift + m.SuccessThreshold |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -41697,7 +54659,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FailureThreshold |= (int32(b) & 0x7F) << shift + m.FailureThreshold |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -41711,6 +54673,9 @@ func (m *Probe) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41738,7 +54703,7 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41766,7 +54731,7 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41775,6 +54740,9 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41797,7 +54765,7 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -41812,6 +54780,9 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41839,7 +54810,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41867,7 +54838,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41877,6 +54848,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41896,7 +54870,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41906,6 +54880,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41925,7 +54902,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41945,7 +54922,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41955,6 +54932,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41974,7 +54954,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41984,6 +54964,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42003,7 +54986,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42013,6 +54996,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42027,6 +55013,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42054,7 +55043,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42082,7 +55071,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42092,6 +55081,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42111,7 +55103,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42121,6 +55113,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42140,7 +55135,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42150,6 +55145,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42169,7 +55167,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42179,6 +55177,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42198,7 +55199,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42208,6 +55209,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42227,7 +55231,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42237,6 +55241,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42256,7 +55263,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42265,6 +55272,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42289,7 +55299,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42304,6 +55314,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42331,7 +55344,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42359,7 +55372,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42369,6 +55382,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42388,7 +55404,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42398,6 +55414,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42417,7 +55436,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42427,6 +55446,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42446,7 +55468,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42456,6 +55478,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42475,7 +55500,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42485,6 +55510,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42504,7 +55532,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42514,6 +55542,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42533,7 +55564,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42542,6 +55573,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42566,7 +55600,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42581,6 +55615,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42608,7 +55645,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42636,7 +55673,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42645,6 +55682,9 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42666,7 +55706,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42676,6 +55716,9 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42695,7 +55738,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42704,6 +55747,9 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42721,6 +55767,9 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42748,7 +55797,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42776,7 +55825,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42785,6 +55834,9 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42806,7 +55858,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42815,6 +55867,9 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42836,7 +55891,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42845,6 +55900,9 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42861,6 +55919,9 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42888,7 +55949,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42916,7 +55977,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42926,6 +55987,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42945,7 +56009,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42955,6 +56019,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42974,7 +56041,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42983,6 +56050,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43004,7 +56074,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43014,6 +56084,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43033,7 +56106,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43043,6 +56116,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43057,6 +56133,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43084,7 +56163,7 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43112,7 +56191,7 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43121,6 +56200,9 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43142,7 +56224,7 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43151,6 +56233,9 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43168,6 +56253,9 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43195,7 +56283,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43223,7 +56311,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -43243,7 +56331,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43252,6 +56340,9 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43272,7 +56363,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43289,7 +56380,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43299,6 +56390,9 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -43315,7 +56409,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43325,6 +56419,9 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -43361,7 +56458,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43370,6 +56467,9 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43394,7 +56494,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -43408,6 +56508,9 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43435,7 +56538,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43463,7 +56566,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -43482,7 +56585,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + m.FullyLabeledReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -43501,7 +56604,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -43520,7 +56623,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -43539,7 +56642,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -43558,7 +56661,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43567,6 +56670,9 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43584,6 +56690,9 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43611,7 +56720,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43639,7 +56748,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43649,6 +56758,9 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43668,7 +56780,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43678,6 +56790,9 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43697,7 +56812,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43706,6 +56821,9 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43722,6 +56840,9 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43749,7 +56870,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43777,7 +56898,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43786,6 +56907,9 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43807,7 +56931,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43816,6 +56940,9 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43837,7 +56964,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43846,6 +56973,9 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43862,6 +56992,9 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43889,7 +57022,7 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43917,7 +57050,7 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43926,6 +57059,9 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43947,7 +57083,7 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43956,6 +57092,9 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43973,6 +57112,9 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44000,7 +57142,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44028,7 +57170,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44037,6 +57179,9 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44044,7 +57189,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { m.Hard = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -44057,7 +57202,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44074,7 +57219,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44084,6 +57229,9 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -44100,7 +57248,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44109,13 +57257,13 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -44151,7 +57299,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44161,6 +57309,9 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44180,7 +57331,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44189,6 +57340,9 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44208,6 +57362,9 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44235,7 +57392,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44263,7 +57420,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44272,6 +57429,9 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44279,7 +57439,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { m.Hard = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -44292,7 +57452,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44309,7 +57469,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44319,6 +57479,9 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -44335,7 +57498,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44344,13 +57507,13 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -44386,7 +57549,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44395,6 +57558,9 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44402,7 +57568,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { m.Used = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -44415,7 +57581,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44432,7 +57598,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44442,6 +57608,9 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -44458,7 +57627,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44467,13 +57636,13 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -44504,6 +57673,9 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44531,7 +57703,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44559,7 +57731,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44568,6 +57740,9 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44575,7 +57750,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { m.Limits = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -44588,7 +57763,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44605,7 +57780,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44615,6 +57790,9 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -44631,7 +57809,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44640,13 +57818,13 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -44682,7 +57860,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44691,6 +57869,9 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44698,7 +57879,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { m.Requests = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -44711,7 +57892,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44728,7 +57909,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44738,6 +57919,9 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -44754,7 +57938,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44763,13 +57947,13 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -44800,6 +57984,9 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44827,7 +58014,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44855,7 +58042,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44865,6 +58052,9 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44884,7 +58074,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44894,6 +58084,9 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44913,7 +58106,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44923,6 +58116,9 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44942,7 +58138,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44952,6 +58148,9 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44966,6 +58165,9 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44993,7 +58195,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45021,7 +58223,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45031,6 +58233,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45050,7 +58255,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45060,6 +58265,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45079,7 +58287,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45088,6 +58296,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45112,7 +58323,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45132,7 +58343,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45142,6 +58353,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45161,7 +58375,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45171,6 +58385,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45190,7 +58407,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45200,6 +58417,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45219,7 +58439,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45229,6 +58449,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45248,7 +58471,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45258,6 +58481,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45277,7 +58503,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45292,6 +58518,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -45319,7 +58548,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45347,7 +58576,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45357,6 +58586,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45376,7 +58608,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45386,6 +58618,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45405,7 +58640,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45414,6 +58649,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45438,7 +58676,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45458,7 +58696,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45468,6 +58706,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45487,7 +58728,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45497,6 +58738,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45516,7 +58760,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45526,6 +58770,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45545,7 +58792,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45555,6 +58802,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45574,7 +58824,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45584,6 +58834,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45603,7 +58856,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45618,6 +58871,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -45645,7 +58901,7 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45673,7 +58929,7 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45682,6 +58938,9 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45699,6 +58958,9 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -45726,7 +58988,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45754,7 +59016,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45764,6 +59026,9 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45783,7 +59048,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45793,6 +59058,9 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45812,7 +59080,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45822,6 +59090,9 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45836,6 +59107,9 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -45863,7 +59137,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45891,7 +59165,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45900,6 +59174,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45921,7 +59198,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45930,6 +59207,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45950,7 +59230,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45967,7 +59247,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45977,6 +59257,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -45993,7 +59276,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapbyteLen |= (uint64(b) & 0x7F) << shift + mapbyteLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46003,6 +59286,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex < 0 { + return ErrInvalidLengthGenerated + } if postbytesIndex > l { return io.ErrUnexpectedEOF } @@ -46040,7 +59326,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46050,6 +59336,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46069,7 +59358,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46078,6 +59367,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46098,7 +59390,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46115,7 +59407,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46125,6 +59417,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -46141,7 +59436,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46151,6 +59446,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -46182,6 +59480,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46209,7 +59510,7 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46237,7 +59538,7 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46246,6 +59547,9 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46267,7 +59571,7 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46283,6 +59587,9 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46310,7 +59617,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46338,7 +59645,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46347,6 +59654,9 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46368,7 +59678,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46378,6 +59688,9 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46397,7 +59710,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46413,6 +59726,9 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46440,7 +59756,7 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46468,7 +59784,7 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46477,6 +59793,9 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46498,7 +59817,7 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46507,6 +59826,9 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46524,6 +59846,9 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46551,7 +59876,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46579,7 +59904,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46588,6 +59913,9 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46609,7 +59937,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46618,6 +59946,9 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46640,7 +59971,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46656,6 +59987,9 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46683,7 +60017,7 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46711,7 +60045,7 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46721,6 +60055,9 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46740,7 +60077,7 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46750,6 +60087,9 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46764,6 +60104,9 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46791,7 +60134,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46819,7 +60162,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46829,6 +60172,9 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46848,7 +60194,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46857,6 +60203,9 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46879,7 +60228,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -46899,7 +60248,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46915,6 +60264,9 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46942,7 +60294,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46970,7 +60322,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46979,6 +60331,9 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47003,7 +60358,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47024,7 +60379,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47033,6 +60388,9 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47057,7 +60415,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -47077,7 +60435,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47098,7 +60456,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47119,7 +60477,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47140,7 +60498,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -47160,7 +60518,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47170,12 +60528,51 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } s := ProcMountType(dAtA[iNdEx:postIndex]) m.ProcMount = &s iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WindowsOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WindowsOptions == nil { + m.WindowsOptions = &WindowsSecurityContextOptions{} + } + if err := m.WindowsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -47185,6 +60582,9 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47212,7 +60612,7 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47240,7 +60640,7 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47249,6 +60649,9 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47265,6 +60668,9 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47292,7 +60698,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47320,7 +60726,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47329,6 +60735,9 @@ func (m *Service) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47350,7 +60759,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47359,6 +60768,9 @@ func (m *Service) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47380,7 +60792,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47389,6 +60801,9 @@ func (m *Service) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47405,6 +60820,9 @@ func (m *Service) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47432,7 +60850,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47460,7 +60878,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47469,6 +60887,9 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47490,7 +60911,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47499,6 +60920,9 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47521,7 +60945,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47530,6 +60954,9 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47552,7 +60979,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47568,6 +60995,9 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47595,7 +61025,7 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47623,7 +61053,7 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47632,6 +61062,9 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47653,7 +61086,7 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47662,6 +61095,9 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47679,6 +61115,9 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47706,7 +61145,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47734,7 +61173,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47744,6 +61183,9 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47763,7 +61205,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -47783,7 +61225,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47793,6 +61235,9 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47807,6 +61252,9 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47834,7 +61282,7 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47862,7 +61310,7 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47871,6 +61319,9 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47892,7 +61343,7 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47901,6 +61352,9 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47918,6 +61372,9 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47945,7 +61402,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47973,7 +61430,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47983,6 +61440,9 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48002,7 +61462,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48012,6 +61472,9 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48031,7 +61494,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Port |= (int32(b) & 0x7F) << shift + m.Port |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -48050,7 +61513,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48059,6 +61522,9 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48080,7 +61546,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NodePort |= (int32(b) & 0x7F) << shift + m.NodePort |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -48094,6 +61560,9 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48121,7 +61590,7 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48149,7 +61618,7 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48159,6 +61628,9 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48173,6 +61645,9 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48200,7 +61675,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48228,7 +61703,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48237,6 +61712,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48259,7 +61737,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48268,6 +61746,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48288,7 +61769,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48305,7 +61786,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48315,6 +61796,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -48331,7 +61815,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48341,6 +61825,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -48377,7 +61864,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48387,6 +61874,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48406,7 +61896,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48416,6 +61906,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48435,7 +61928,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48445,6 +61938,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48464,7 +61960,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48474,6 +61970,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48493,7 +61992,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48503,6 +62002,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48522,7 +62024,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48532,6 +62034,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48551,7 +62056,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48561,6 +62066,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48580,7 +62088,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48590,6 +62098,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48609,7 +62120,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.HealthCheckNodePort |= (int32(b) & 0x7F) << shift + m.HealthCheckNodePort |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -48628,7 +62139,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48648,7 +62159,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48657,6 +62168,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48667,6 +62181,39 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPFamily", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := IPFamily(dAtA[iNdEx:postIndex]) + m.IPFamily = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -48676,6 +62223,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48703,7 +62253,7 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48731,7 +62281,7 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48740,6 +62290,9 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48756,6 +62309,9 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48783,7 +62339,7 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48811,7 +62367,7 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48820,6 +62376,9 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48839,6 +62398,9 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48866,7 +62428,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48894,7 +62456,39 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48904,35 +62498,9 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeNamespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } @@ -48952,7 +62520,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48962,6 +62530,9 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48981,7 +62552,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49001,7 +62572,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49010,6 +62581,9 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49029,6 +62603,9 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49056,7 +62633,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49084,7 +62661,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49094,6 +62671,9 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49113,7 +62693,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49123,6 +62703,9 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49142,7 +62725,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49152,6 +62735,9 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49171,7 +62757,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49191,7 +62777,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49200,6 +62786,9 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49219,6 +62808,9 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49246,7 +62838,7 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49274,7 +62866,7 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49284,6 +62876,9 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49303,7 +62898,7 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49313,6 +62908,9 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49327,6 +62925,9 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49354,7 +62955,7 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49382,7 +62983,7 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49391,6 +62992,9 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49412,7 +63016,7 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49422,6 +63026,9 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49436,6 +63043,9 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49463,7 +63073,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49491,7 +63101,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49501,6 +63111,9 @@ func (m *Taint) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49520,7 +63133,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49530,6 +63143,9 @@ func (m *Taint) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49549,7 +63165,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49559,6 +63175,9 @@ func (m *Taint) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49578,7 +63197,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49587,11 +63206,14 @@ func (m *Taint) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TimeAdded == nil { - m.TimeAdded = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.TimeAdded = &v1.Time{} } if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -49606,6 +63228,9 @@ func (m *Taint) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49633,7 +63258,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49661,7 +63286,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49671,6 +63296,9 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49690,7 +63318,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49700,6 +63328,9 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49719,7 +63350,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49729,6 +63360,9 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49748,7 +63382,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49758,6 +63392,9 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49777,7 +63414,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -49792,6 +63429,9 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49819,7 +63459,7 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49847,7 +63487,7 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49857,6 +63497,9 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49876,7 +63519,7 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49886,6 +63529,9 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49900,6 +63546,9 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49927,7 +63576,7 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49955,7 +63604,7 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49964,6 +63613,9 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49981,6 +63633,181 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TopologySpreadConstraint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TopologySpreadConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSkew", wireType) + } + m.MaxSkew = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxSkew |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhenUnsatisfiable", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhenUnsatisfiable = UnsatisfiableConstraintAction(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &v1.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50008,7 +63835,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50036,7 +63863,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50046,6 +63873,9 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50066,7 +63896,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50076,6 +63906,9 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50095,7 +63928,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50105,6 +63938,9 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50119,6 +63955,9 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50146,7 +63985,7 @@ func (m *Volume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50174,7 +64013,7 @@ func (m *Volume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50184,6 +64023,9 @@ func (m *Volume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50203,7 +64045,7 @@ func (m *Volume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50212,6 +64054,9 @@ func (m *Volume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50228,6 +64073,9 @@ func (m *Volume) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50255,7 +64103,7 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50283,7 +64131,7 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50293,6 +64141,9 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50312,7 +64163,7 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50322,6 +64173,9 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50336,6 +64190,9 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50363,7 +64220,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50391,7 +64248,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50401,6 +64258,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50420,7 +64280,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50440,7 +64300,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50450,6 +64310,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50469,7 +64332,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50479,6 +64342,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50498,7 +64364,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50508,6 +64374,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50528,7 +64397,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50538,6 +64407,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50552,6 +64424,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50579,7 +64454,7 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50607,7 +64482,7 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50616,6 +64491,9 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50635,6 +64513,9 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50662,7 +64543,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50690,7 +64571,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50699,6 +64580,9 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50723,7 +64607,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50732,6 +64616,9 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50756,7 +64643,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50765,6 +64652,9 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50789,7 +64679,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50798,6 +64688,9 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50817,6 +64710,9 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50844,7 +64740,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50872,7 +64768,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50881,6 +64777,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50905,7 +64804,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50914,6 +64813,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50938,7 +64840,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50947,6 +64849,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50971,7 +64876,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50980,6 +64885,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51004,7 +64912,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51013,6 +64921,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51037,7 +64948,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51046,6 +64957,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51070,7 +64984,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51079,6 +64993,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51103,7 +65020,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51112,6 +65029,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51136,7 +65056,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51145,6 +65065,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51169,7 +65092,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51178,6 +65101,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51202,7 +65128,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51211,6 +65137,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51235,7 +65164,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51244,6 +65173,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51268,7 +65200,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51277,6 +65209,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51301,7 +65236,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51310,6 +65245,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51334,7 +65272,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51343,6 +65281,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51367,7 +65308,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51376,6 +65317,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51400,7 +65344,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51409,6 +65353,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51433,7 +65380,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51442,6 +65389,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51466,7 +65416,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51475,6 +65425,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51499,7 +65452,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51508,6 +65461,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51532,7 +65488,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51541,6 +65497,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51565,7 +65524,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51574,6 +65533,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51598,7 +65560,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51607,6 +65569,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51631,7 +65596,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51640,6 +65605,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51664,7 +65632,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51673,6 +65641,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51697,7 +65668,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51706,6 +65677,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51730,7 +65704,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51739,6 +65713,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51763,7 +65740,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51772,6 +65749,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51791,6 +65771,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -51818,7 +65801,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -51846,7 +65829,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -51856,6 +65839,9 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51875,7 +65861,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -51885,6 +65871,9 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51904,7 +65893,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -51914,6 +65903,9 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51933,7 +65925,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -51943,6 +65935,9 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51957,6 +65952,9 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -51984,7 +65982,7 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -52012,7 +66010,7 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Weight |= (int32(b) & 0x7F) << shift + m.Weight |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -52031,7 +66029,7 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -52040,6 +66038,9 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52056,6 +66057,161 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WindowsSecurityContextOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WindowsSecurityContextOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GMSACredentialSpecName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.GMSACredentialSpecName = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GMSACredentialSpec", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.GMSACredentialSpec = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUserName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.RunAsUserName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -52122,10 +66278,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -52154,6 +66313,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -52172,820 +66334,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 12934 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x7d, 0x70, 0x64, 0x57, - 0x56, 0x18, 0xbe, 0xaf, 0xbb, 0x25, 0x75, 0x1f, 0x7d, 0xdf, 0x99, 0xb1, 0x35, 0xf2, 0xcc, 0xf4, - 0xf8, 0x79, 0x77, 0x3c, 0x5e, 0xdb, 0xd2, 0x7a, 0x6c, 0xaf, 0xcd, 0xda, 0x6b, 0x90, 0xd4, 0xd2, - 0x4c, 0x7b, 0x46, 0x9a, 0xf6, 0x6d, 0xcd, 0x78, 0xd7, 0x78, 0x17, 0x3f, 0x75, 0x5f, 0x49, 0xcf, - 0x6a, 0xbd, 0xd7, 0x7e, 0xef, 0xb5, 0x66, 0xe4, 0x1f, 0xd4, 0x2f, 0x59, 0x02, 0x61, 0x03, 0x95, - 0xda, 0x4a, 0xb6, 0xf2, 0x01, 0x14, 0xa9, 0x22, 0xa4, 0x80, 0x90, 0xa4, 0x42, 0x20, 0x40, 0x58, - 0x48, 0x08, 0x24, 0x55, 0x24, 0x7f, 0x6c, 0x48, 0xaa, 0x52, 0x4b, 0x15, 0x15, 0x05, 0x44, 0x2a, - 0x29, 0xfe, 0x08, 0xa4, 0x42, 0xfe, 0x08, 0x0a, 0x15, 0x52, 0xf7, 0xf3, 0xdd, 0xfb, 0xfa, 0xbd, - 0xee, 0xd6, 0x58, 0x23, 0x9b, 0xad, 0xfd, 0xaf, 0xfb, 0x9e, 0x73, 0xcf, 0xbd, 0xef, 0x7e, 0x9e, - 0x73, 0xee, 0xf9, 0x80, 0x57, 0x76, 0x5e, 0x0e, 0xe7, 0x5c, 0x7f, 0x7e, 0xa7, 0xb3, 0x41, 0x02, - 0x8f, 0x44, 0x24, 0x9c, 0xdf, 0x23, 0x5e, 0xd3, 0x0f, 0xe6, 0x05, 0xc0, 0x69, 0xbb, 0xf3, 0x0d, - 0x3f, 0x20, 0xf3, 0x7b, 0xcf, 0xcd, 0x6f, 0x11, 0x8f, 0x04, 0x4e, 0x44, 0x9a, 0x73, 0xed, 0xc0, - 0x8f, 0x7c, 0x84, 0x38, 0xce, 0x9c, 0xd3, 0x76, 0xe7, 0x28, 0xce, 0xdc, 0xde, 0x73, 0xb3, 0xcf, - 0x6e, 0xb9, 0xd1, 0x76, 0x67, 0x63, 0xae, 0xe1, 0xef, 0xce, 0x6f, 0xf9, 0x5b, 0xfe, 0x3c, 0x43, - 0xdd, 0xe8, 0x6c, 0xb2, 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0xbe, 0x10, 0x37, 0xb3, 0xeb, - 0x34, 0xb6, 0x5d, 0x8f, 0x04, 0xfb, 0xf3, 0xed, 0x9d, 0x2d, 0xd6, 0x6e, 0x40, 0x42, 0xbf, 0x13, - 0x34, 0x48, 0xb2, 0xe1, 0x9e, 0xb5, 0xc2, 0xf9, 0x5d, 0x12, 0x39, 0x29, 0xdd, 0x9d, 0x9d, 0xcf, - 0xaa, 0x15, 0x74, 0xbc, 0xc8, 0xdd, 0xed, 0x6e, 0xe6, 0xd3, 0xfd, 0x2a, 0x84, 0x8d, 0x6d, 0xb2, - 0xeb, 0x74, 0xd5, 0x7b, 0x3e, 0xab, 0x5e, 0x27, 0x72, 0x5b, 0xf3, 0xae, 0x17, 0x85, 0x51, 0x90, - 0xac, 0x64, 0x7f, 0xc3, 0x82, 0xcb, 0x0b, 0x6f, 0xd6, 0x97, 0x5b, 0x4e, 0x18, 0xb9, 0x8d, 0xc5, - 0x96, 0xdf, 0xd8, 0xa9, 0x47, 0x7e, 0x40, 0xee, 0xfa, 0xad, 0xce, 0x2e, 0xa9, 0xb3, 0x81, 0x40, - 0xcf, 0x40, 0x71, 0x8f, 0xfd, 0xaf, 0x56, 0x66, 0xac, 0xcb, 0xd6, 0xd5, 0xd2, 0xe2, 0xd4, 0x6f, - 0x1e, 0x94, 0x3f, 0x76, 0x78, 0x50, 0x2e, 0xde, 0x15, 0xe5, 0x58, 0x61, 0xa0, 0x2b, 0x30, 0xbc, - 0x19, 0xae, 0xef, 0xb7, 0xc9, 0x4c, 0x8e, 0xe1, 0x4e, 0x08, 0xdc, 0xe1, 0x95, 0x3a, 0x2d, 0xc5, - 0x02, 0x8a, 0xe6, 0xa1, 0xd4, 0x76, 0x82, 0xc8, 0x8d, 0x5c, 0xdf, 0x9b, 0xc9, 0x5f, 0xb6, 0xae, - 0x0e, 0x2d, 0x4e, 0x0b, 0xd4, 0x52, 0x4d, 0x02, 0x70, 0x8c, 0x43, 0xbb, 0x11, 0x10, 0xa7, 0x79, - 0xdb, 0x6b, 0xed, 0xcf, 0x14, 0x2e, 0x5b, 0x57, 0x8b, 0x71, 0x37, 0xb0, 0x28, 0xc7, 0x0a, 0xc3, - 0xfe, 0xe1, 0x1c, 0x14, 0x17, 0x36, 0x37, 0x5d, 0xcf, 0x8d, 0xf6, 0xd1, 0x5d, 0x18, 0xf3, 0xfc, - 0x26, 0x91, 0xff, 0xd9, 0x57, 0x8c, 0x5e, 0xbb, 0x3c, 0xd7, 0xbd, 0x94, 0xe6, 0xd6, 0x34, 0xbc, - 0xc5, 0xa9, 0xc3, 0x83, 0xf2, 0x98, 0x5e, 0x82, 0x0d, 0x3a, 0x08, 0xc3, 0x68, 0xdb, 0x6f, 0x2a, - 0xb2, 0x39, 0x46, 0xb6, 0x9c, 0x46, 0xb6, 0x16, 0xa3, 0x2d, 0x4e, 0x1e, 0x1e, 0x94, 0x47, 0xb5, - 0x02, 0xac, 0x13, 0x41, 0x1b, 0x30, 0x49, 0xff, 0x7a, 0x91, 0xab, 0xe8, 0xe6, 0x19, 0xdd, 0x27, - 0xb2, 0xe8, 0x6a, 0xa8, 0x8b, 0x67, 0x0e, 0x0f, 0xca, 0x93, 0x89, 0x42, 0x9c, 0x24, 0x68, 0xbf, - 0x0f, 0x13, 0x0b, 0x51, 0xe4, 0x34, 0xb6, 0x49, 0x93, 0xcf, 0x20, 0x7a, 0x01, 0x0a, 0x9e, 0xb3, - 0x4b, 0xc4, 0xfc, 0x5e, 0x16, 0x03, 0x5b, 0x58, 0x73, 0x76, 0xc9, 0xd1, 0x41, 0x79, 0xea, 0x8e, - 0xe7, 0xbe, 0xd7, 0x11, 0xab, 0x82, 0x96, 0x61, 0x86, 0x8d, 0xae, 0x01, 0x34, 0xc9, 0x9e, 0xdb, - 0x20, 0x35, 0x27, 0xda, 0x16, 0xf3, 0x8d, 0x44, 0x5d, 0xa8, 0x28, 0x08, 0xd6, 0xb0, 0xec, 0xfb, - 0x50, 0x5a, 0xd8, 0xf3, 0xdd, 0x66, 0xcd, 0x6f, 0x86, 0x68, 0x07, 0x26, 0xdb, 0x01, 0xd9, 0x24, - 0x81, 0x2a, 0x9a, 0xb1, 0x2e, 0xe7, 0xaf, 0x8e, 0x5e, 0xbb, 0x9a, 0xfa, 0xb1, 0x26, 0xea, 0xb2, - 0x17, 0x05, 0xfb, 0x8b, 0x8f, 0x8a, 0xf6, 0x26, 0x13, 0x50, 0x9c, 0xa4, 0x6c, 0xff, 0xab, 0x1c, - 0x9c, 0x5b, 0x78, 0xbf, 0x13, 0x90, 0x8a, 0x1b, 0xee, 0x24, 0x57, 0x78, 0xd3, 0x0d, 0x77, 0xd6, - 0xe2, 0x11, 0x50, 0x4b, 0xab, 0x22, 0xca, 0xb1, 0xc2, 0x40, 0xcf, 0xc2, 0x08, 0xfd, 0x7d, 0x07, - 0x57, 0xc5, 0x27, 0x9f, 0x11, 0xc8, 0xa3, 0x15, 0x27, 0x72, 0x2a, 0x1c, 0x84, 0x25, 0x0e, 0x5a, - 0x85, 0xd1, 0x06, 0xdb, 0x90, 0x5b, 0xab, 0x7e, 0x93, 0xb0, 0xc9, 0x2c, 0x2d, 0x3e, 0x4d, 0xd1, - 0x97, 0xe2, 0xe2, 0xa3, 0x83, 0xf2, 0x0c, 0xef, 0x9b, 0x20, 0xa1, 0xc1, 0xb0, 0x5e, 0x1f, 0xd9, - 0x6a, 0x7f, 0x15, 0x18, 0x25, 0x48, 0xd9, 0x5b, 0x57, 0xb5, 0xad, 0x32, 0xc4, 0xb6, 0xca, 0x58, - 0xfa, 0x36, 0x41, 0xcf, 0x41, 0x61, 0xc7, 0xf5, 0x9a, 0x33, 0xc3, 0x8c, 0xd6, 0x45, 0x3a, 0xe7, - 0x37, 0x5d, 0xaf, 0x79, 0x74, 0x50, 0x9e, 0x36, 0xba, 0x43, 0x0b, 0x31, 0x43, 0xb5, 0xff, 0xd8, - 0x82, 0x32, 0x83, 0xad, 0xb8, 0x2d, 0x52, 0x23, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63, 0x40, - 0xaf, 0x01, 0x84, 0xa4, 0x11, 0x90, 0x48, 0x1b, 0x52, 0xb5, 0x30, 0xea, 0x0a, 0x82, 0x35, 0x2c, - 0x7a, 0x20, 0x84, 0xdb, 0x4e, 0xc0, 0xd6, 0x97, 0x18, 0x58, 0x75, 0x20, 0xd4, 0x25, 0x00, 0xc7, - 0x38, 0xc6, 0x81, 0x90, 0xef, 0x77, 0x20, 0xa0, 0xcf, 0xc2, 0x64, 0xdc, 0x58, 0xd8, 0x76, 0x1a, - 0x72, 0x00, 0xd9, 0x96, 0xa9, 0x9b, 0x20, 0x9c, 0xc4, 0xb5, 0xff, 0xbe, 0x25, 0x16, 0x0f, 0xfd, - 0xea, 0x8f, 0xf8, 0xb7, 0xda, 0xbf, 0x64, 0xc1, 0xc8, 0xa2, 0xeb, 0x35, 0x5d, 0x6f, 0x0b, 0xbd, - 0x03, 0x45, 0x7a, 0x37, 0x35, 0x9d, 0xc8, 0x11, 0xe7, 0xde, 0xa7, 0xb4, 0xbd, 0xa5, 0xae, 0x8a, - 0xb9, 0xf6, 0xce, 0x16, 0x2d, 0x08, 0xe7, 0x28, 0x36, 0xdd, 0x6d, 0xb7, 0x37, 0xde, 0x25, 0x8d, - 0x68, 0x95, 0x44, 0x4e, 0xfc, 0x39, 0x71, 0x19, 0x56, 0x54, 0xd1, 0x4d, 0x18, 0x8e, 0x9c, 0x60, - 0x8b, 0x44, 0xe2, 0x00, 0x4c, 0x3d, 0xa8, 0x78, 0x4d, 0x4c, 0x77, 0x24, 0xf1, 0x1a, 0x24, 0xbe, - 0x16, 0xd6, 0x59, 0x55, 0x2c, 0x48, 0xd8, 0x7f, 0x65, 0x18, 0xce, 0x2f, 0xd5, 0xab, 0x19, 0xeb, - 0xea, 0x0a, 0x0c, 0x37, 0x03, 0x77, 0x8f, 0x04, 0x62, 0x9c, 0x15, 0x95, 0x0a, 0x2b, 0xc5, 0x02, - 0x8a, 0x5e, 0x86, 0x31, 0x7e, 0x21, 0xdd, 0x70, 0xbc, 0x66, 0x4b, 0x0e, 0xf1, 0x59, 0x81, 0x3d, - 0x76, 0x57, 0x83, 0x61, 0x03, 0xf3, 0x98, 0x8b, 0xea, 0x4a, 0x62, 0x33, 0x66, 0x5d, 0x76, 0x5f, - 0xb6, 0x60, 0x8a, 0x37, 0xb3, 0x10, 0x45, 0x81, 0xbb, 0xd1, 0x89, 0x48, 0x38, 0x33, 0xc4, 0x4e, - 0xba, 0xa5, 0xb4, 0xd1, 0xca, 0x1c, 0x81, 0xb9, 0xbb, 0x09, 0x2a, 0xfc, 0x10, 0x9c, 0x11, 0xed, - 0x4e, 0x25, 0xc1, 0xb8, 0xab, 0x59, 0xf4, 0xbd, 0x16, 0xcc, 0x36, 0x7c, 0x2f, 0x0a, 0xfc, 0x56, - 0x8b, 0x04, 0xb5, 0xce, 0x46, 0xcb, 0x0d, 0xb7, 0xf9, 0x3a, 0xc5, 0x64, 0x93, 0x9d, 0x04, 0x19, - 0x73, 0xa8, 0x90, 0xc4, 0x1c, 0x5e, 0x3a, 0x3c, 0x28, 0xcf, 0x2e, 0x65, 0x92, 0xc2, 0x3d, 0x9a, - 0x41, 0x3b, 0x80, 0xe8, 0x55, 0x5a, 0x8f, 0x9c, 0x2d, 0x12, 0x37, 0x3e, 0x32, 0x78, 0xe3, 0x8f, - 0x1c, 0x1e, 0x94, 0xd1, 0x5a, 0x17, 0x09, 0x9c, 0x42, 0x16, 0xbd, 0x07, 0x67, 0x69, 0x69, 0xd7, - 0xb7, 0x16, 0x07, 0x6f, 0x6e, 0xe6, 0xf0, 0xa0, 0x7c, 0x76, 0x2d, 0x85, 0x08, 0x4e, 0x25, 0x3d, - 0xbb, 0x04, 0xe7, 0x52, 0xa7, 0x0a, 0x4d, 0x41, 0x7e, 0x87, 0x70, 0x16, 0xa4, 0x84, 0xe9, 0x4f, - 0x74, 0x16, 0x86, 0xf6, 0x9c, 0x56, 0x47, 0xac, 0x52, 0xcc, 0xff, 0x7c, 0x26, 0xf7, 0xb2, 0x65, - 0xff, 0xeb, 0x3c, 0x4c, 0x2e, 0xd5, 0xab, 0x0f, 0xb4, 0x05, 0xf4, 0x3b, 0x20, 0xd7, 0xf3, 0x0e, - 0x88, 0x6f, 0x94, 0x7c, 0xe6, 0x8d, 0xf2, 0xff, 0xa7, 0xac, 0xdf, 0x02, 0x5b, 0xbf, 0xdf, 0x96, - 0xb1, 0x7e, 0x4f, 0x78, 0xd5, 0xee, 0x65, 0x4c, 0xe1, 0x10, 0x9b, 0xc2, 0x54, 0x76, 0xe1, 0x96, - 0xdf, 0x70, 0x5a, 0xc9, 0x73, 0xe7, 0x43, 0x99, 0xc7, 0x06, 0x8c, 0x2d, 0x39, 0x6d, 0x67, 0xc3, - 0x6d, 0xb9, 0x91, 0x4b, 0x42, 0xf4, 0x24, 0xe4, 0x9d, 0x66, 0x93, 0xb1, 0x3a, 0xa5, 0xc5, 0x73, - 0x87, 0x07, 0xe5, 0xfc, 0x42, 0x93, 0xde, 0xb9, 0xa0, 0xb0, 0xf6, 0x31, 0xc5, 0x40, 0x9f, 0x84, - 0x42, 0x33, 0xf0, 0xdb, 0x33, 0x39, 0x86, 0x49, 0x97, 0x7c, 0xa1, 0x12, 0xf8, 0xed, 0x04, 0x2a, - 0xc3, 0xb1, 0x7f, 0x2d, 0x07, 0x17, 0x96, 0x48, 0x7b, 0x7b, 0xa5, 0x9e, 0x71, 0x78, 0x5e, 0x85, - 0xe2, 0xae, 0xef, 0xb9, 0x91, 0x1f, 0x84, 0xa2, 0x69, 0xb6, 0x22, 0x56, 0x45, 0x19, 0x56, 0x50, - 0x74, 0x19, 0x0a, 0xed, 0x98, 0xa3, 0x1b, 0x93, 0xdc, 0x20, 0xe3, 0xe5, 0x18, 0x84, 0x62, 0x74, - 0x42, 0x12, 0x88, 0x15, 0xa3, 0x30, 0xee, 0x84, 0x24, 0xc0, 0x0c, 0x12, 0x5f, 0x8b, 0xf4, 0xc2, - 0x14, 0xc7, 0x63, 0xe2, 0x5a, 0xa4, 0x10, 0xac, 0x61, 0xa1, 0x1a, 0x94, 0xc2, 0xc4, 0xcc, 0x0e, - 0xb4, 0x39, 0xc7, 0xd9, 0xbd, 0xa9, 0x66, 0x32, 0x26, 0x62, 0x1c, 0xe7, 0xc3, 0x7d, 0xef, 0xcd, - 0xaf, 0xe5, 0x00, 0xf1, 0x21, 0xfc, 0x73, 0x36, 0x70, 0x77, 0xba, 0x07, 0x6e, 0xf0, 0x2d, 0x71, - 0x52, 0xa3, 0xf7, 0xbf, 0x2c, 0xb8, 0xb0, 0xe4, 0x7a, 0x4d, 0x12, 0x64, 0x2c, 0xc0, 0x87, 0x23, - 0x48, 0x1e, 0xef, 0xc6, 0x36, 0x96, 0x58, 0xe1, 0x04, 0x96, 0x98, 0xfd, 0x47, 0x16, 0x20, 0xfe, - 0xd9, 0x1f, 0xb9, 0x8f, 0xbd, 0xd3, 0xfd, 0xb1, 0x27, 0xb0, 0x2c, 0xec, 0x5b, 0x30, 0xb1, 0xd4, - 0x72, 0x89, 0x17, 0x55, 0x6b, 0x4b, 0xbe, 0xb7, 0xe9, 0x6e, 0xa1, 0xcf, 0xc0, 0x44, 0xe4, 0xee, - 0x12, 0xbf, 0x13, 0xd5, 0x49, 0xc3, 0xf7, 0x98, 0x18, 0x47, 0x25, 0x7a, 0x74, 0x78, 0x50, 0x9e, - 0x58, 0x37, 0x20, 0x38, 0x81, 0x69, 0xff, 0x0e, 0x1d, 0x3f, 0x7f, 0xb7, 0xed, 0x7b, 0xc4, 0x8b, - 0x96, 0x7c, 0xaf, 0xc9, 0xc5, 0xfd, 0xcf, 0x40, 0x21, 0xa2, 0xe3, 0xc1, 0xc7, 0xee, 0x8a, 0xdc, - 0x28, 0x74, 0x14, 0x8e, 0x0e, 0xca, 0x8f, 0x74, 0xd7, 0x60, 0xe3, 0xc4, 0xea, 0xa0, 0x6f, 0x83, - 0xe1, 0x30, 0x72, 0xa2, 0x4e, 0x28, 0x46, 0xf3, 0x71, 0x39, 0x9a, 0x75, 0x56, 0x7a, 0x74, 0x50, - 0x9e, 0x54, 0xd5, 0x78, 0x11, 0x16, 0x15, 0xd0, 0x53, 0x30, 0xb2, 0x4b, 0xc2, 0xd0, 0xd9, 0x92, - 0xb7, 0xe1, 0xa4, 0xa8, 0x3b, 0xb2, 0xca, 0x8b, 0xb1, 0x84, 0xa3, 0x27, 0x60, 0x88, 0x04, 0x81, - 0x1f, 0x88, 0x3d, 0x3a, 0x2e, 0x10, 0x87, 0x96, 0x69, 0x21, 0xe6, 0x30, 0xfb, 0xdf, 0x59, 0x30, - 0xa9, 0xfa, 0xca, 0xdb, 0x3a, 0x05, 0x96, 0xfc, 0x2d, 0x80, 0x86, 0xfc, 0xc0, 0x90, 0xdd, 0x1e, - 0xa3, 0xd7, 0xae, 0xa4, 0x5e, 0xd4, 0x5d, 0xc3, 0x18, 0x53, 0x56, 0x45, 0x21, 0xd6, 0xa8, 0xd9, - 0xbf, 0x6a, 0xc1, 0x99, 0xc4, 0x17, 0xdd, 0x72, 0xc3, 0x08, 0xbd, 0xdd, 0xf5, 0x55, 0x73, 0x83, - 0x7d, 0x15, 0xad, 0xcd, 0xbe, 0x49, 0x2d, 0x65, 0x59, 0xa2, 0x7d, 0xd1, 0x0d, 0x18, 0x72, 0x23, - 0xb2, 0x2b, 0x3f, 0xe6, 0x89, 0x9e, 0x1f, 0xc3, 0x7b, 0x15, 0xcf, 0x48, 0x95, 0xd6, 0xc4, 0x9c, - 0x80, 0xfd, 0xd7, 0xf3, 0x50, 0xe2, 0xcb, 0x76, 0xd5, 0x69, 0x9f, 0xc2, 0x5c, 0x54, 0xa1, 0xc0, - 0xa8, 0xf3, 0x8e, 0x3f, 0x99, 0xde, 0x71, 0xd1, 0x9d, 0x39, 0x2a, 0x6f, 0x73, 0xe6, 0x48, 0x5d, - 0x0d, 0xb4, 0x08, 0x33, 0x12, 0xc8, 0x01, 0xd8, 0x70, 0x3d, 0x27, 0xd8, 0xa7, 0x65, 0x33, 0x79, - 0x46, 0xf0, 0xd9, 0xde, 0x04, 0x17, 0x15, 0x3e, 0x27, 0xab, 0xfa, 0x1a, 0x03, 0xb0, 0x46, 0x74, - 0xf6, 0x25, 0x28, 0x29, 0xe4, 0xe3, 0xf0, 0x38, 0xb3, 0x9f, 0x85, 0xc9, 0x44, 0x5b, 0xfd, 0xaa, - 0x8f, 0xe9, 0x2c, 0xd2, 0x2f, 0xb3, 0x53, 0x40, 0xf4, 0x7a, 0xd9, 0xdb, 0x13, 0xa7, 0xe8, 0xfb, - 0x70, 0xb6, 0x95, 0x72, 0x38, 0x89, 0xa9, 0x1a, 0xfc, 0x30, 0xbb, 0x20, 0x3e, 0xfb, 0x6c, 0x1a, - 0x14, 0xa7, 0xb6, 0x41, 0xaf, 0x7d, 0xbf, 0x4d, 0xd7, 0xbc, 0xd3, 0xd2, 0x39, 0xe8, 0xdb, 0xa2, - 0x0c, 0x2b, 0x28, 0x3d, 0xc2, 0xce, 0xaa, 0xce, 0xdf, 0x24, 0xfb, 0x75, 0xd2, 0x22, 0x8d, 0xc8, - 0x0f, 0x3e, 0xd4, 0xee, 0x5f, 0xe4, 0xa3, 0xcf, 0x4f, 0xc0, 0x51, 0x41, 0x20, 0x7f, 0x93, 0xec, - 0xf3, 0xa9, 0xd0, 0xbf, 0x2e, 0xdf, 0xf3, 0xeb, 0x7e, 0xd6, 0x82, 0x71, 0xf5, 0x75, 0xa7, 0xb0, - 0xd5, 0x17, 0xcd, 0xad, 0x7e, 0xb1, 0xe7, 0x02, 0xcf, 0xd8, 0xe4, 0x5f, 0xcb, 0xc1, 0x79, 0x85, - 0x43, 0xd9, 0x7d, 0xfe, 0x47, 0xac, 0xaa, 0x79, 0x28, 0x79, 0x4a, 0x0b, 0x64, 0x99, 0xea, 0x97, - 0x58, 0x07, 0x14, 0xe3, 0x50, 0xae, 0xcd, 0x8b, 0x55, 0x35, 0x63, 0xba, 0x7a, 0x54, 0xa8, 0x42, - 0x17, 0x21, 0xdf, 0x71, 0x9b, 0xe2, 0xce, 0xf8, 0x94, 0x1c, 0xed, 0x3b, 0xd5, 0xca, 0xd1, 0x41, - 0xf9, 0xf1, 0x2c, 0xd5, 0x3c, 0xbd, 0xac, 0xc2, 0xb9, 0x3b, 0xd5, 0x0a, 0xa6, 0x95, 0xd1, 0x02, - 0x4c, 0xca, 0xd7, 0x87, 0xbb, 0x94, 0x83, 0xf2, 0x3d, 0x71, 0xb5, 0x28, 0x1d, 0x27, 0x36, 0xc1, - 0x38, 0x89, 0x8f, 0x2a, 0x30, 0xb5, 0xd3, 0xd9, 0x20, 0x2d, 0x12, 0xf1, 0x0f, 0xbe, 0x49, 0xb8, - 0x06, 0xb0, 0x14, 0x0b, 0x5b, 0x37, 0x13, 0x70, 0xdc, 0x55, 0xc3, 0xfe, 0x33, 0x76, 0xc4, 0x8b, - 0xd1, 0xab, 0x05, 0x3e, 0x5d, 0x58, 0x94, 0xfa, 0x87, 0xb9, 0x9c, 0x07, 0x59, 0x15, 0x37, 0xc9, - 0xfe, 0xba, 0x4f, 0x99, 0xed, 0xf4, 0x55, 0x61, 0xac, 0xf9, 0x42, 0xcf, 0x35, 0xff, 0xf3, 0x39, - 0x38, 0xa7, 0x46, 0xc0, 0xe0, 0xeb, 0xfe, 0xbc, 0x8f, 0xc1, 0x73, 0x30, 0xda, 0x24, 0x9b, 0x4e, - 0xa7, 0x15, 0x29, 0x75, 0xf4, 0x10, 0x7f, 0x92, 0xa8, 0xc4, 0xc5, 0x58, 0xc7, 0x39, 0xc6, 0xb0, - 0xfd, 0xc4, 0x28, 0xbb, 0x5b, 0x23, 0x87, 0xae, 0x71, 0xb5, 0x6b, 0xac, 0xcc, 0x5d, 0xf3, 0x04, - 0x0c, 0xb9, 0xbb, 0x94, 0xd7, 0xca, 0x99, 0x2c, 0x54, 0x95, 0x16, 0x62, 0x0e, 0x43, 0x9f, 0x80, - 0x91, 0x86, 0xbf, 0xbb, 0xeb, 0x78, 0x4d, 0x76, 0xe5, 0x95, 0x16, 0x47, 0x29, 0x3b, 0xb6, 0xc4, - 0x8b, 0xb0, 0x84, 0xa1, 0x0b, 0x50, 0x70, 0x82, 0x2d, 0xae, 0x96, 0x28, 0x2d, 0x16, 0x69, 0x4b, - 0x0b, 0xc1, 0x56, 0x88, 0x59, 0x29, 0x95, 0xaa, 0xee, 0xf9, 0xc1, 0x8e, 0xeb, 0x6d, 0x55, 0xdc, - 0x40, 0x6c, 0x09, 0x75, 0x17, 0xbe, 0xa9, 0x20, 0x58, 0xc3, 0x42, 0x2b, 0x30, 0xd4, 0xf6, 0x83, - 0x28, 0x9c, 0x19, 0x66, 0xc3, 0xfd, 0x78, 0xc6, 0x41, 0xc4, 0xbf, 0xb6, 0xe6, 0x07, 0x51, 0xfc, - 0x01, 0xf4, 0x5f, 0x88, 0x79, 0x75, 0xf4, 0x6d, 0x90, 0x27, 0xde, 0xde, 0xcc, 0x08, 0xa3, 0x32, - 0x9b, 0x46, 0x65, 0xd9, 0xdb, 0xbb, 0xeb, 0x04, 0xf1, 0x29, 0xbd, 0xec, 0xed, 0x61, 0x5a, 0x07, - 0x7d, 0x1e, 0x4a, 0x72, 0x8b, 0x87, 0x42, 0x5d, 0x95, 0xba, 0xc4, 0xe4, 0xc1, 0x80, 0xc9, 0x7b, - 0x1d, 0x37, 0x20, 0xbb, 0xc4, 0x8b, 0xc2, 0xf8, 0x4c, 0x93, 0xd0, 0x10, 0xc7, 0xd4, 0xd0, 0xe7, - 0xa5, 0x8e, 0x74, 0xd5, 0xef, 0x78, 0x51, 0x38, 0x53, 0x62, 0xdd, 0x4b, 0x7d, 0xbd, 0xba, 0x1b, - 0xe3, 0x25, 0x95, 0xa8, 0xbc, 0x32, 0x36, 0x48, 0x21, 0x0c, 0xe3, 0x2d, 0x77, 0x8f, 0x78, 0x24, - 0x0c, 0x6b, 0x81, 0xbf, 0x41, 0x66, 0x80, 0xf5, 0xfc, 0x7c, 0xfa, 0xa3, 0x8e, 0xbf, 0x41, 0x16, - 0xa7, 0x0f, 0x0f, 0xca, 0xe3, 0xb7, 0xf4, 0x3a, 0xd8, 0x24, 0x81, 0xee, 0xc0, 0x04, 0x95, 0x6b, - 0xdc, 0x98, 0xe8, 0x68, 0x3f, 0xa2, 0x4c, 0xfa, 0xc0, 0x46, 0x25, 0x9c, 0x20, 0x82, 0x5e, 0x87, - 0x52, 0xcb, 0xdd, 0x24, 0x8d, 0xfd, 0x46, 0x8b, 0xcc, 0x8c, 0x31, 0x8a, 0xa9, 0xdb, 0xea, 0x96, - 0x44, 0xe2, 0x72, 0x91, 0xfa, 0x8b, 0xe3, 0xea, 0xe8, 0x2e, 0x3c, 0x12, 0x91, 0x60, 0xd7, 0xf5, - 0x1c, 0xba, 0x1d, 0x84, 0xbc, 0xc0, 0x9e, 0xc6, 0xc6, 0xd9, 0x7a, 0xbb, 0x24, 0x86, 0xee, 0x91, - 0xf5, 0x54, 0x2c, 0x9c, 0x51, 0x1b, 0xdd, 0x86, 0x49, 0xb6, 0x13, 0x6a, 0x9d, 0x56, 0xab, 0xe6, - 0xb7, 0xdc, 0xc6, 0xfe, 0xcc, 0x04, 0x23, 0xf8, 0x09, 0x79, 0x2f, 0x54, 0x4d, 0xf0, 0xd1, 0x41, - 0x19, 0xe2, 0x7f, 0x38, 0x59, 0x1b, 0x6d, 0xb0, 0xb7, 0x90, 0x4e, 0xe0, 0x46, 0xfb, 0x74, 0xfd, - 0x92, 0xfb, 0xd1, 0xcc, 0x64, 0x4f, 0x51, 0x58, 0x47, 0x55, 0x0f, 0x26, 0x7a, 0x21, 0x4e, 0x12, - 0xa4, 0x5b, 0x3b, 0x8c, 0x9a, 0xae, 0x37, 0x33, 0xc5, 0x4e, 0x0c, 0xb5, 0x33, 0xea, 0xb4, 0x10, - 0x73, 0x18, 0x7b, 0x07, 0xa1, 0x3f, 0x6e, 0xd3, 0x13, 0x74, 0x9a, 0x21, 0xc6, 0xef, 0x20, 0x12, - 0x80, 0x63, 0x1c, 0xca, 0xd4, 0x44, 0xd1, 0xfe, 0x0c, 0x62, 0xa8, 0x6a, 0xbb, 0xac, 0xaf, 0x7f, - 0x1e, 0xd3, 0x72, 0x74, 0x0b, 0x46, 0x88, 0xb7, 0xb7, 0x12, 0xf8, 0xbb, 0x33, 0x67, 0xb2, 0xf7, - 0xec, 0x32, 0x47, 0xe1, 0x07, 0x7a, 0x2c, 0xe0, 0x89, 0x62, 0x2c, 0x49, 0xa0, 0xfb, 0x30, 0x93, - 0x32, 0x23, 0x7c, 0x02, 0xce, 0xb2, 0x09, 0x78, 0x55, 0xd4, 0x9d, 0x59, 0xcf, 0xc0, 0x3b, 0xea, - 0x01, 0xc3, 0x99, 0xd4, 0xd1, 0x17, 0x60, 0x9c, 0x6f, 0x28, 0xfe, 0x88, 0x1a, 0xce, 0x9c, 0x63, - 0x5f, 0x73, 0x39, 0x7b, 0x73, 0x72, 0xc4, 0xc5, 0x73, 0xa2, 0x43, 0xe3, 0x7a, 0x69, 0x88, 0x4d, - 0x6a, 0xf6, 0x06, 0x4c, 0xa8, 0x73, 0x8b, 0x2d, 0x1d, 0x54, 0x86, 0x21, 0xc6, 0xed, 0x08, 0xfd, - 0x56, 0x89, 0xce, 0x14, 0xe3, 0x84, 0x30, 0x2f, 0x67, 0x33, 0xe5, 0xbe, 0x4f, 0x16, 0xf7, 0x23, - 0xc2, 0xa5, 0xea, 0xbc, 0x36, 0x53, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0xff, 0x72, 0xae, 0x31, 0x3e, - 0x1c, 0x07, 0xb8, 0x0e, 0x9e, 0x81, 0xe2, 0xb6, 0x1f, 0x46, 0x14, 0x9b, 0xb5, 0x31, 0x14, 0xf3, - 0x89, 0x37, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x15, 0x18, 0x6f, 0xe8, 0x0d, 0x88, 0xbb, 0x4c, 0x0d, - 0x81, 0xd1, 0x3a, 0x36, 0x71, 0xd1, 0xcb, 0x50, 0x64, 0x26, 0x10, 0x0d, 0xbf, 0x25, 0x98, 0x2c, - 0x79, 0x21, 0x17, 0x6b, 0xa2, 0xfc, 0x48, 0xfb, 0x8d, 0x15, 0x36, 0xba, 0x02, 0xc3, 0xb4, 0x0b, - 0xd5, 0x9a, 0xb8, 0x45, 0x94, 0xaa, 0xe6, 0x06, 0x2b, 0xc5, 0x02, 0x6a, 0xff, 0xb5, 0x9c, 0x36, - 0xca, 0x54, 0x22, 0x25, 0xa8, 0x06, 0x23, 0xf7, 0x1c, 0x37, 0x72, 0xbd, 0x2d, 0xc1, 0x2e, 0x3c, - 0xd5, 0xf3, 0x4a, 0x61, 0x95, 0xde, 0xe4, 0x15, 0xf8, 0xa5, 0x27, 0xfe, 0x60, 0x49, 0x86, 0x52, - 0x0c, 0x3a, 0x9e, 0x47, 0x29, 0xe6, 0x06, 0xa5, 0x88, 0x79, 0x05, 0x4e, 0x51, 0xfc, 0xc1, 0x92, - 0x0c, 0x7a, 0x1b, 0x40, 0x2e, 0x4b, 0xd2, 0x14, 0xa6, 0x07, 0xcf, 0xf4, 0x27, 0xba, 0xae, 0xea, - 0x2c, 0x4e, 0xd0, 0x2b, 0x35, 0xfe, 0x8f, 0x35, 0x7a, 0x76, 0xc4, 0xd8, 0xaa, 0xee, 0xce, 0xa0, - 0xef, 0xa4, 0x27, 0x81, 0x13, 0x44, 0xa4, 0xb9, 0x10, 0x89, 0xc1, 0xf9, 0xe4, 0x60, 0x32, 0xc5, - 0xba, 0xbb, 0x4b, 0xf4, 0x53, 0x43, 0x10, 0xc1, 0x31, 0x3d, 0xfb, 0x17, 0xf3, 0x30, 0x93, 0xd5, - 0x5d, 0xba, 0xe8, 0xc8, 0x7d, 0x37, 0x5a, 0xa2, 0xdc, 0x90, 0x65, 0x2e, 0xba, 0x65, 0x51, 0x8e, - 0x15, 0x06, 0x9d, 0xfd, 0xd0, 0xdd, 0x92, 0x22, 0xe1, 0x50, 0x3c, 0xfb, 0x75, 0x56, 0x8a, 0x05, - 0x94, 0xe2, 0x05, 0xc4, 0x09, 0x85, 0x6d, 0x8b, 0xb6, 0x4a, 0x30, 0x2b, 0xc5, 0x02, 0xaa, 0xeb, - 0x9b, 0x0a, 0x7d, 0xf4, 0x4d, 0xc6, 0x10, 0x0d, 0x9d, 0xec, 0x10, 0xa1, 0x2f, 0x02, 0x6c, 0xba, - 0x9e, 0x1b, 0x6e, 0x33, 0xea, 0xc3, 0xc7, 0xa6, 0xae, 0x78, 0xa9, 0x15, 0x45, 0x05, 0x6b, 0x14, - 0xd1, 0x8b, 0x30, 0xaa, 0x36, 0x60, 0xb5, 0xc2, 0x1e, 0xfa, 0x34, 0xc3, 0x89, 0xf8, 0x34, 0xaa, - 0x60, 0x1d, 0xcf, 0x7e, 0x37, 0xb9, 0x5e, 0xc4, 0x0e, 0xd0, 0xc6, 0xd7, 0x1a, 0x74, 0x7c, 0x73, - 0xbd, 0xc7, 0xd7, 0xfe, 0xf5, 0x3c, 0x4c, 0x1a, 0x8d, 0x75, 0xc2, 0x01, 0xce, 0xac, 0xeb, 0xf4, - 0x9e, 0x73, 0x22, 0x22, 0xf6, 0x9f, 0xdd, 0x7f, 0xab, 0xe8, 0x77, 0x21, 0xdd, 0x01, 0xbc, 0x3e, - 0xfa, 0x22, 0x94, 0x5a, 0x4e, 0xc8, 0x74, 0x57, 0x44, 0xec, 0xbb, 0x41, 0x88, 0xc5, 0x72, 0x84, - 0x13, 0x46, 0xda, 0x55, 0xc3, 0x69, 0xc7, 0x24, 0xe9, 0x85, 0x4c, 0x79, 0x1f, 0x69, 0x3c, 0xa5, - 0x3a, 0x41, 0x19, 0xa4, 0x7d, 0xcc, 0x61, 0xe8, 0x65, 0x18, 0x0b, 0x08, 0x5b, 0x15, 0x4b, 0x94, - 0x95, 0x63, 0xcb, 0x6c, 0x28, 0xe6, 0xf9, 0xb0, 0x06, 0xc3, 0x06, 0x66, 0xcc, 0xca, 0x0f, 0xf7, - 0x60, 0xe5, 0x9f, 0x82, 0x11, 0xf6, 0x43, 0xad, 0x00, 0x35, 0x1b, 0x55, 0x5e, 0x8c, 0x25, 0x3c, - 0xb9, 0x60, 0x8a, 0x03, 0x2e, 0x98, 0x4f, 0xc2, 0x44, 0xc5, 0x21, 0xbb, 0xbe, 0xb7, 0xec, 0x35, - 0xdb, 0xbe, 0xeb, 0x45, 0x68, 0x06, 0x0a, 0xec, 0x76, 0xe0, 0x7b, 0xbb, 0x40, 0x29, 0xe0, 0x02, - 0x65, 0xcc, 0xed, 0x2d, 0x38, 0x57, 0xf1, 0xef, 0x79, 0xf7, 0x9c, 0xa0, 0xb9, 0x50, 0xab, 0x6a, - 0x72, 0xee, 0x9a, 0x94, 0xb3, 0xb8, 0x31, 0x52, 0xea, 0x99, 0xaa, 0xd5, 0xe4, 0x77, 0xed, 0x8a, - 0xdb, 0x22, 0x19, 0xda, 0x88, 0xbf, 0x99, 0x33, 0x5a, 0x8a, 0xf1, 0xd5, 0x83, 0x91, 0x95, 0xf9, - 0x60, 0xf4, 0x06, 0x14, 0x37, 0x5d, 0xd2, 0x6a, 0x62, 0xb2, 0x29, 0x96, 0xd8, 0x93, 0xd9, 0xf6, - 0x15, 0x2b, 0x14, 0x53, 0x6a, 0x9f, 0xb8, 0x94, 0xb6, 0x22, 0x2a, 0x63, 0x45, 0x06, 0xed, 0xc0, - 0x94, 0x14, 0x03, 0x24, 0x54, 0x2c, 0xb8, 0xa7, 0x7a, 0xc9, 0x16, 0x26, 0xf1, 0xb3, 0x87, 0x07, - 0xe5, 0x29, 0x9c, 0x20, 0x83, 0xbb, 0x08, 0x53, 0xb1, 0x6c, 0x97, 0x1e, 0xad, 0x05, 0x36, 0xfc, - 0x4c, 0x2c, 0x63, 0x12, 0x26, 0x2b, 0xb5, 0x7f, 0xd4, 0x82, 0x47, 0xbb, 0x46, 0x46, 0x48, 0xda, - 0x27, 0x3c, 0x0b, 0x49, 0xc9, 0x37, 0xd7, 0x5f, 0xf2, 0xb5, 0xff, 0x81, 0x05, 0x67, 0x97, 0x77, - 0xdb, 0xd1, 0x7e, 0xc5, 0x35, 0x5f, 0x77, 0x5e, 0x82, 0xe1, 0x5d, 0xd2, 0x74, 0x3b, 0xbb, 0x62, - 0xe6, 0xca, 0xf2, 0xf8, 0x59, 0x65, 0xa5, 0x47, 0x07, 0xe5, 0xf1, 0x7a, 0xe4, 0x07, 0xce, 0x16, - 0xe1, 0x05, 0x58, 0xa0, 0xb3, 0x43, 0xdc, 0x7d, 0x9f, 0xdc, 0x72, 0x77, 0x5d, 0x69, 0x2f, 0xd3, - 0x53, 0x77, 0x36, 0x27, 0x07, 0x74, 0xee, 0x8d, 0x8e, 0xe3, 0x45, 0x6e, 0xb4, 0x2f, 0x1e, 0x66, - 0x24, 0x11, 0x1c, 0xd3, 0xb3, 0xbf, 0x61, 0xc1, 0xa4, 0x5c, 0xf7, 0x0b, 0xcd, 0x66, 0x40, 0xc2, - 0x10, 0xcd, 0x42, 0xce, 0x6d, 0x8b, 0x5e, 0x82, 0xe8, 0x65, 0xae, 0x5a, 0xc3, 0x39, 0xb7, 0x8d, - 0x6a, 0x50, 0xe2, 0x66, 0x37, 0xf1, 0xe2, 0x1a, 0xc8, 0x78, 0x87, 0xf5, 0x60, 0x5d, 0xd6, 0xc4, - 0x31, 0x11, 0xc9, 0xc1, 0xb1, 0x33, 0x33, 0x6f, 0xbe, 0x7a, 0xdd, 0x10, 0xe5, 0x58, 0x61, 0xa0, - 0xab, 0x50, 0xf4, 0xfc, 0x26, 0xb7, 0x82, 0xe2, 0xb7, 0x1f, 0x5b, 0xb2, 0x6b, 0xa2, 0x0c, 0x2b, - 0xa8, 0xfd, 0x43, 0x16, 0x8c, 0xc9, 0x2f, 0x1b, 0x90, 0x99, 0xa4, 0x5b, 0x2b, 0x66, 0x24, 0xe3, - 0xad, 0x45, 0x99, 0x41, 0x06, 0x31, 0x78, 0xc0, 0xfc, 0x71, 0x78, 0x40, 0xfb, 0x47, 0x72, 0x30, - 0x21, 0xbb, 0x53, 0xef, 0x6c, 0x84, 0x24, 0x42, 0xeb, 0x50, 0x72, 0xf8, 0x90, 0x13, 0xb9, 0x62, - 0x9f, 0x48, 0x17, 0x3e, 0x8c, 0xf9, 0x89, 0xaf, 0xe5, 0x05, 0x59, 0x1b, 0xc7, 0x84, 0x50, 0x0b, - 0xa6, 0x3d, 0x3f, 0x62, 0x47, 0xb4, 0x82, 0xf7, 0x7a, 0x02, 0x49, 0x52, 0x3f, 0x2f, 0xa8, 0x4f, - 0xaf, 0x25, 0xa9, 0xe0, 0x6e, 0xc2, 0x68, 0x59, 0x2a, 0x3c, 0xf2, 0xd9, 0xe2, 0x86, 0x3e, 0x0b, - 0xe9, 0xfa, 0x0e, 0xfb, 0x57, 0x2c, 0x28, 0x49, 0xb4, 0xd3, 0x78, 0xed, 0x5a, 0x85, 0x91, 0x90, - 0x4d, 0x82, 0x1c, 0x1a, 0xbb, 0x57, 0xc7, 0xf9, 0x7c, 0xc5, 0x37, 0x0f, 0xff, 0x1f, 0x62, 0x49, - 0x83, 0xe9, 0xbb, 0x55, 0xf7, 0x3f, 0x22, 0xfa, 0x6e, 0xd5, 0x9f, 0x8c, 0x1b, 0xe6, 0xbf, 0xb1, - 0x3e, 0x6b, 0x62, 0x2d, 0x65, 0x90, 0xda, 0x01, 0xd9, 0x74, 0xef, 0x27, 0x19, 0xa4, 0x1a, 0x2b, - 0xc5, 0x02, 0x8a, 0xde, 0x86, 0xb1, 0x86, 0x54, 0x74, 0xc6, 0xc7, 0xc0, 0x95, 0x9e, 0x4a, 0x77, - 0xf5, 0x3e, 0xc3, 0x2d, 0xa4, 0x97, 0xb4, 0xfa, 0xd8, 0xa0, 0x66, 0x3e, 0xb7, 0xe7, 0xfb, 0x3d, - 0xb7, 0xc7, 0x74, 0xb3, 0x1f, 0x9f, 0x7f, 0xcc, 0x82, 0x61, 0xae, 0x2e, 0x1b, 0x4c, 0xbf, 0xa8, - 0x3d, 0x57, 0xc5, 0x63, 0x77, 0x97, 0x16, 0x8a, 0xe7, 0x27, 0xb4, 0x0a, 0x25, 0xf6, 0x83, 0xa9, - 0x0d, 0xf2, 0xd9, 0xa6, 0xe1, 0xbc, 0x55, 0xbd, 0x83, 0x77, 0x65, 0x35, 0x1c, 0x53, 0xb0, 0xbf, - 0x9a, 0xa7, 0x47, 0x55, 0x8c, 0x6a, 0xdc, 0xe0, 0xd6, 0xc3, 0xbb, 0xc1, 0x73, 0x0f, 0xeb, 0x06, - 0xdf, 0x82, 0xc9, 0x86, 0xf6, 0xb8, 0x15, 0xcf, 0xe4, 0xd5, 0x9e, 0x8b, 0x44, 0x7b, 0x07, 0xe3, - 0x2a, 0xa3, 0x25, 0x93, 0x08, 0x4e, 0x52, 0x45, 0xdf, 0x09, 0x63, 0x7c, 0x9e, 0x45, 0x2b, 0xdc, - 0x62, 0xe1, 0x13, 0xd9, 0xeb, 0x45, 0x6f, 0x82, 0xad, 0xc4, 0xba, 0x56, 0x1d, 0x1b, 0xc4, 0xec, - 0x5f, 0x2c, 0xc2, 0xd0, 0xf2, 0x1e, 0xf1, 0xa2, 0x53, 0x38, 0x90, 0x1a, 0x30, 0xe1, 0x7a, 0x7b, - 0x7e, 0x6b, 0x8f, 0x34, 0x39, 0xfc, 0x38, 0x97, 0xeb, 0x23, 0x82, 0xf4, 0x44, 0xd5, 0x20, 0x81, - 0x13, 0x24, 0x1f, 0x86, 0x84, 0x79, 0x1d, 0x86, 0xf9, 0xdc, 0x0b, 0xf1, 0x32, 0x55, 0x19, 0xcc, - 0x06, 0x51, 0xec, 0x82, 0x58, 0xfa, 0xe5, 0xda, 0x67, 0x51, 0x1d, 0xbd, 0x0b, 0x13, 0x9b, 0x6e, - 0x10, 0x46, 0x54, 0x34, 0x0c, 0x23, 0x67, 0xb7, 0xfd, 0x00, 0x12, 0xa5, 0x1a, 0x87, 0x15, 0x83, - 0x12, 0x4e, 0x50, 0x46, 0x5b, 0x30, 0x4e, 0x85, 0x9c, 0xb8, 0xa9, 0x91, 0x63, 0x37, 0xa5, 0x54, - 0x46, 0xb7, 0x74, 0x42, 0xd8, 0xa4, 0x4b, 0x0f, 0x93, 0x06, 0x13, 0x8a, 0x8a, 0x8c, 0xa3, 0x50, - 0x87, 0x09, 0x97, 0x86, 0x38, 0x8c, 0x9e, 0x49, 0xcc, 0x6c, 0xa5, 0x64, 0x9e, 0x49, 0x9a, 0x71, - 0xca, 0x3b, 0x50, 0x22, 0x74, 0x08, 0x29, 0x61, 0xa1, 0x18, 0x9f, 0x1f, 0xac, 0xaf, 0xab, 0x6e, - 0x23, 0xf0, 0x4d, 0x59, 0x7e, 0x59, 0x52, 0xc2, 0x31, 0x51, 0xb4, 0x04, 0xc3, 0x21, 0x09, 0x5c, - 0x12, 0x0a, 0x15, 0x79, 0x8f, 0x69, 0x64, 0x68, 0xdc, 0xe2, 0x93, 0xff, 0xc6, 0xa2, 0x2a, 0x5d, - 0x5e, 0x0e, 0x93, 0x86, 0x98, 0x56, 0x5c, 0x5b, 0x5e, 0x0b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x1d, - 0x46, 0x02, 0xd2, 0x62, 0xca, 0xa2, 0xf1, 0xc1, 0x17, 0x39, 0xd7, 0x3d, 0xf1, 0x7a, 0x58, 0x12, - 0x40, 0x37, 0x01, 0x05, 0x84, 0xf2, 0x10, 0xae, 0xb7, 0xa5, 0x8c, 0x39, 0x84, 0xae, 0xfb, 0x31, - 0xd1, 0xfe, 0x19, 0x1c, 0x63, 0x48, 0xeb, 0x62, 0x9c, 0x52, 0x0d, 0x5d, 0x87, 0x69, 0x55, 0x5a, - 0xf5, 0xc2, 0xc8, 0xf1, 0x1a, 0x84, 0xa9, 0xb9, 0x4b, 0x31, 0x57, 0x84, 0x93, 0x08, 0xb8, 0xbb, - 0x8e, 0xfd, 0xd3, 0x94, 0x9d, 0xa1, 0xa3, 0x75, 0x0a, 0xbc, 0xc0, 0x6b, 0x26, 0x2f, 0x70, 0x3e, - 0x73, 0xe6, 0x32, 0xf8, 0x80, 0x43, 0x0b, 0x46, 0xb5, 0x99, 0x8d, 0xd7, 0xac, 0xd5, 0x63, 0xcd, - 0x76, 0x60, 0x8a, 0xae, 0xf4, 0xdb, 0x1b, 0x21, 0x09, 0xf6, 0x48, 0x93, 0x2d, 0xcc, 0xdc, 0x83, - 0x2d, 0x4c, 0xf5, 0xca, 0x7c, 0x2b, 0x41, 0x10, 0x77, 0x35, 0x81, 0x5e, 0x92, 0x9a, 0x93, 0xbc, - 0x61, 0xa4, 0xc5, 0xb5, 0x22, 0x47, 0x07, 0xe5, 0x29, 0xed, 0x43, 0x74, 0x4d, 0x89, 0xfd, 0x8e, - 0xfc, 0x46, 0xf5, 0x9a, 0xdf, 0x50, 0x8b, 0x25, 0xf1, 0x9a, 0xaf, 0x96, 0x03, 0x8e, 0x71, 0xe8, - 0x1e, 0xa5, 0x22, 0x48, 0xf2, 0x35, 0x9f, 0x0a, 0x28, 0x98, 0x41, 0xec, 0xe7, 0x01, 0x96, 0xef, - 0x93, 0x06, 0x5f, 0xea, 0xfa, 0x03, 0xa4, 0x95, 0xfd, 0x00, 0x69, 0xff, 0x07, 0x0b, 0x26, 0x56, - 0x96, 0x0c, 0x31, 0x71, 0x0e, 0x80, 0xcb, 0x46, 0x6f, 0xbe, 0xb9, 0x26, 0x75, 0xeb, 0x5c, 0x3d, - 0xaa, 0x4a, 0xb1, 0x86, 0x81, 0xce, 0x43, 0xbe, 0xd5, 0xf1, 0x84, 0xc8, 0x32, 0x72, 0x78, 0x50, - 0xce, 0xdf, 0xea, 0x78, 0x98, 0x96, 0x69, 0x16, 0x82, 0xf9, 0x81, 0x2d, 0x04, 0xfb, 0xba, 0xc9, - 0xa1, 0x32, 0x0c, 0xdd, 0xbb, 0xe7, 0x36, 0xb9, 0x33, 0x82, 0xd0, 0xfb, 0xbf, 0xf9, 0x66, 0xb5, - 0x12, 0x62, 0x5e, 0x6e, 0x7f, 0x25, 0x0f, 0xb3, 0x2b, 0x2d, 0x72, 0xff, 0x03, 0x3a, 0x64, 0x0c, - 0x6a, 0xdf, 0x78, 0x3c, 0x7e, 0xf1, 0xb8, 0x36, 0xac, 0xfd, 0xc7, 0x63, 0x13, 0x46, 0xf8, 0x63, - 0xb6, 0x74, 0xcf, 0x78, 0x25, 0xad, 0xf5, 0xec, 0x01, 0x99, 0xe3, 0x8f, 0xe2, 0xc2, 0xc0, 0x5d, - 0xdd, 0xb4, 0xa2, 0x14, 0x4b, 0xe2, 0xb3, 0x9f, 0x81, 0x31, 0x1d, 0xf3, 0x58, 0xd6, 0xe4, 0x7f, - 0x31, 0x0f, 0x53, 0xb4, 0x07, 0x0f, 0x75, 0x22, 0xee, 0x74, 0x4f, 0xc4, 0x49, 0x5b, 0x14, 0xf7, - 0x9f, 0x8d, 0xb7, 0x93, 0xb3, 0xf1, 0x5c, 0xd6, 0x6c, 0x9c, 0xf6, 0x1c, 0x7c, 0xaf, 0x05, 0x67, - 0x56, 0x5a, 0x7e, 0x63, 0x27, 0x61, 0xf5, 0xfb, 0x22, 0x8c, 0xd2, 0x73, 0x3c, 0x34, 0xbc, 0xc1, - 0x0c, 0xff, 0x40, 0x01, 0xc2, 0x3a, 0x9e, 0x56, 0xed, 0xce, 0x9d, 0x6a, 0x25, 0xcd, 0xad, 0x50, - 0x80, 0xb0, 0x8e, 0x67, 0x7f, 0xdd, 0x82, 0x8b, 0xd7, 0x97, 0x96, 0xe3, 0xa5, 0xd8, 0xe5, 0xd9, - 0x48, 0xa5, 0xc0, 0xa6, 0xd6, 0x95, 0x58, 0x0a, 0xac, 0xb0, 0x5e, 0x08, 0xe8, 0x47, 0xc5, 0x6b, - 0xf7, 0xa7, 0x2c, 0x38, 0x73, 0xdd, 0x8d, 0xe8, 0xb5, 0x9c, 0xf4, 0xb1, 0xa3, 0xf7, 0x72, 0xe8, - 0x46, 0x7e, 0xb0, 0x9f, 0xf4, 0xb1, 0xc3, 0x0a, 0x82, 0x35, 0x2c, 0xde, 0xf2, 0x9e, 0xcb, 0xcc, - 0xa8, 0x72, 0xa6, 0x2a, 0x0a, 0x8b, 0x72, 0xac, 0x30, 0xe8, 0x87, 0x35, 0xdd, 0x80, 0x89, 0x12, - 0xfb, 0xe2, 0x84, 0x55, 0x1f, 0x56, 0x91, 0x00, 0x1c, 0xe3, 0xd8, 0x7f, 0x68, 0x41, 0xf9, 0x7a, - 0xab, 0x13, 0x46, 0x24, 0xd8, 0x0c, 0x33, 0x4e, 0xc7, 0xe7, 0xa1, 0x44, 0xa4, 0xe0, 0x2e, 0x7a, - 0xad, 0x58, 0x4d, 0x25, 0xd1, 0x73, 0x57, 0x3f, 0x85, 0x37, 0x80, 0x0f, 0xc1, 0xf1, 0x8c, 0xc0, - 0x57, 0x00, 0x11, 0xbd, 0x2d, 0xdd, 0xf7, 0x91, 0x39, 0x51, 0x2d, 0x77, 0x41, 0x71, 0x4a, 0x0d, - 0xfb, 0x47, 0x2d, 0x38, 0xa7, 0x3e, 0xf8, 0x23, 0xf7, 0x99, 0xf6, 0xcf, 0xe5, 0x60, 0xfc, 0xc6, - 0xfa, 0x7a, 0xed, 0x3a, 0x89, 0xc4, 0xb5, 0xdd, 0x5f, 0xb7, 0x8e, 0x35, 0x15, 0x61, 0x2f, 0x29, - 0xb0, 0x13, 0xb9, 0xad, 0x39, 0xee, 0x42, 0x3f, 0x57, 0xf5, 0xa2, 0xdb, 0x41, 0x3d, 0x0a, 0x5c, - 0x6f, 0x2b, 0x55, 0xa9, 0x28, 0x99, 0x8b, 0x7c, 0x16, 0x73, 0x81, 0x9e, 0x87, 0x61, 0xe6, 0xc3, - 0x2f, 0x27, 0xe1, 0x31, 0x25, 0x44, 0xb1, 0xd2, 0xa3, 0x83, 0x72, 0xe9, 0x0e, 0xae, 0xf2, 0x3f, - 0x58, 0xa0, 0xa2, 0x3b, 0x30, 0xba, 0x1d, 0x45, 0xed, 0x1b, 0xc4, 0x69, 0x92, 0x40, 0x1e, 0x87, - 0x97, 0xd2, 0x8e, 0x43, 0x3a, 0x08, 0x1c, 0x2d, 0x3e, 0x41, 0xe2, 0xb2, 0x10, 0xeb, 0x74, 0xec, - 0x3a, 0x40, 0x0c, 0x3b, 0x21, 0x85, 0x8a, 0xfd, 0xfb, 0x16, 0x8c, 0x70, 0x77, 0xca, 0x00, 0xbd, - 0x0a, 0x05, 0x72, 0x9f, 0x34, 0x04, 0xab, 0x9c, 0xda, 0xe1, 0x98, 0xd3, 0xe2, 0xcf, 0x03, 0xf4, - 0x3f, 0x66, 0xb5, 0xd0, 0x0d, 0x18, 0xa1, 0xbd, 0xbd, 0xae, 0x7c, 0x4b, 0x1f, 0xcf, 0xfa, 0x62, - 0x35, 0xed, 0x9c, 0x39, 0x13, 0x45, 0x58, 0x56, 0x67, 0xaa, 0xee, 0x46, 0xbb, 0x4e, 0x4f, 0xec, - 0xa8, 0x17, 0x63, 0xb1, 0xbe, 0x54, 0xe3, 0x48, 0x82, 0x1a, 0x57, 0x75, 0xcb, 0x42, 0x1c, 0x13, - 0xb1, 0xd7, 0xa1, 0x44, 0x27, 0x75, 0xa1, 0xe5, 0x3a, 0xbd, 0xb5, 0xec, 0x4f, 0x43, 0x49, 0x6a, - 0xbc, 0x43, 0xe1, 0xc9, 0xc5, 0xa8, 0x4a, 0x85, 0x78, 0x88, 0x63, 0xb8, 0xbd, 0x09, 0x67, 0x99, - 0xa9, 0x83, 0x13, 0x6d, 0x1b, 0x7b, 0xac, 0xff, 0x62, 0x7e, 0x46, 0x48, 0x9e, 0x7c, 0x66, 0x66, - 0x34, 0x67, 0x89, 0x31, 0x49, 0x31, 0x96, 0x42, 0xed, 0x3f, 0x28, 0xc0, 0x63, 0xd5, 0x7a, 0xb6, - 0xa7, 0xed, 0xcb, 0x30, 0xc6, 0xf9, 0x52, 0xba, 0xb4, 0x9d, 0x96, 0x68, 0x57, 0x3d, 0x04, 0xae, - 0x6b, 0x30, 0x6c, 0x60, 0xa2, 0x8b, 0x90, 0x77, 0xdf, 0xf3, 0x92, 0x76, 0xc7, 0xd5, 0x37, 0xd6, - 0x30, 0x2d, 0xa7, 0x60, 0xca, 0xe2, 0xf2, 0xbb, 0x43, 0x81, 0x15, 0x9b, 0xfb, 0x1a, 0x4c, 0xb8, - 0x61, 0x23, 0x74, 0xab, 0x1e, 0x3d, 0x67, 0xb4, 0x93, 0x4a, 0x69, 0x45, 0x68, 0xa7, 0x15, 0x14, - 0x27, 0xb0, 0xb5, 0x8b, 0x6c, 0x68, 0x60, 0x36, 0xb9, 0xaf, 0x6b, 0x13, 0x95, 0x00, 0xda, 0xec, - 0xeb, 0x42, 0x66, 0xc5, 0x27, 0x24, 0x00, 0xfe, 0xc1, 0x21, 0x96, 0x30, 0x2a, 0x72, 0x36, 0xb6, - 0x9d, 0xf6, 0x42, 0x27, 0xda, 0xae, 0xb8, 0x61, 0xc3, 0xdf, 0x23, 0xc1, 0x3e, 0xd3, 0x16, 0x14, - 0x63, 0x91, 0x53, 0x01, 0x96, 0x6e, 0x2c, 0xd4, 0x28, 0x26, 0xee, 0xae, 0x63, 0xb2, 0xc1, 0x70, - 0x12, 0x6c, 0xf0, 0x02, 0x4c, 0xca, 0x66, 0xea, 0x24, 0x64, 0x97, 0xe2, 0x28, 0xeb, 0x98, 0xb2, - 0x2d, 0x16, 0xc5, 0xaa, 0x5b, 0x49, 0x7c, 0xf4, 0x12, 0x8c, 0xbb, 0x9e, 0x1b, 0xb9, 0x4e, 0xe4, - 0x07, 0x8c, 0xa5, 0xe0, 0x8a, 0x01, 0x66, 0xba, 0x57, 0xd5, 0x01, 0xd8, 0xc4, 0xb3, 0xff, 0x4b, - 0x01, 0xa6, 0xd9, 0xb4, 0x7d, 0x6b, 0x85, 0x7d, 0x64, 0x56, 0xd8, 0x9d, 0xee, 0x15, 0x76, 0x12, - 0xfc, 0xfd, 0x87, 0xb9, 0xcc, 0xde, 0x85, 0x92, 0x32, 0x7e, 0x96, 0xde, 0x0f, 0x56, 0x86, 0xf7, - 0x43, 0x7f, 0xee, 0x43, 0xbe, 0x5b, 0xe7, 0x53, 0xdf, 0xad, 0xff, 0xb6, 0x05, 0xb1, 0x0d, 0x28, - 0xba, 0x01, 0xa5, 0xb6, 0xcf, 0xec, 0x2c, 0x02, 0x69, 0xbc, 0xf4, 0x58, 0xea, 0x45, 0xc5, 0x2f, - 0x45, 0x3e, 0x7e, 0x35, 0x59, 0x03, 0xc7, 0x95, 0xd1, 0x22, 0x8c, 0xb4, 0x03, 0x52, 0x8f, 0x98, - 0xcf, 0x6f, 0x5f, 0x3a, 0x7c, 0x8d, 0x70, 0x7c, 0x2c, 0x2b, 0xda, 0x3f, 0x6f, 0x01, 0xf0, 0xa7, - 0x61, 0xc7, 0xdb, 0x22, 0xa7, 0xa0, 0xee, 0xae, 0x40, 0x21, 0x6c, 0x93, 0x46, 0x2f, 0x0b, 0x98, - 0xb8, 0x3f, 0xf5, 0x36, 0x69, 0xc4, 0x03, 0x4e, 0xff, 0x61, 0x56, 0xdb, 0xfe, 0x3e, 0x80, 0x89, - 0x18, 0xad, 0x1a, 0x91, 0x5d, 0xf4, 0xac, 0xe1, 0x03, 0x78, 0x3e, 0xe1, 0x03, 0x58, 0x62, 0xd8, - 0x9a, 0x66, 0xf5, 0x5d, 0xc8, 0xef, 0x3a, 0xf7, 0x85, 0xea, 0xec, 0xe9, 0xde, 0xdd, 0xa0, 0xf4, - 0xe7, 0x56, 0x9d, 0xfb, 0x5c, 0x48, 0x7c, 0x5a, 0x2e, 0x90, 0x55, 0xe7, 0xfe, 0x11, 0xb7, 0x73, - 0x61, 0x87, 0xd4, 0x2d, 0x37, 0x8c, 0xbe, 0xf4, 0x9f, 0xe3, 0xff, 0x6c, 0xd9, 0xd1, 0x46, 0x58, - 0x5b, 0xae, 0x27, 0x1e, 0x4a, 0x07, 0x6a, 0xcb, 0xf5, 0x92, 0x6d, 0xb9, 0xde, 0x00, 0x6d, 0xb9, - 0x1e, 0x7a, 0x1f, 0x46, 0x84, 0x51, 0x82, 0xf0, 0xb9, 0x9f, 0x1f, 0xa0, 0x3d, 0x61, 0xd3, 0xc0, - 0xdb, 0x9c, 0x97, 0x42, 0xb0, 0x28, 0xed, 0xdb, 0xae, 0x6c, 0x10, 0xfd, 0x0d, 0x0b, 0x26, 0xc4, - 0x6f, 0x4c, 0xde, 0xeb, 0x90, 0x30, 0x12, 0xbc, 0xe7, 0xa7, 0x07, 0xef, 0x83, 0xa8, 0xc8, 0xbb, - 0xf2, 0x69, 0x79, 0xcc, 0x9a, 0xc0, 0xbe, 0x3d, 0x4a, 0xf4, 0x02, 0xfd, 0x23, 0x0b, 0xce, 0xee, - 0x3a, 0xf7, 0x79, 0x8b, 0xbc, 0x0c, 0x3b, 0x91, 0xeb, 0x0b, 0x63, 0xfd, 0x57, 0x07, 0x9b, 0xfe, - 0xae, 0xea, 0xbc, 0x93, 0xd2, 0xae, 0xf7, 0x6c, 0x1a, 0x4a, 0xdf, 0xae, 0xa6, 0xf6, 0x6b, 0x76, - 0x13, 0x8a, 0x72, 0xbd, 0xa5, 0xa8, 0x1a, 0x2a, 0x3a, 0x63, 0x7d, 0x6c, 0x9b, 0x10, 0xdd, 0x11, - 0x8f, 0xb6, 0x23, 0xd6, 0xda, 0x43, 0x6d, 0xe7, 0x5d, 0x18, 0xd3, 0xd7, 0xd8, 0x43, 0x6d, 0xeb, - 0x3d, 0x38, 0x93, 0xb2, 0x96, 0x1e, 0x6a, 0x93, 0xf7, 0xe0, 0x7c, 0xe6, 0xfa, 0x78, 0x98, 0x0d, - 0xdb, 0x3f, 0x67, 0xe9, 0xe7, 0xe0, 0x29, 0xbc, 0x39, 0x2c, 0x99, 0x6f, 0x0e, 0x97, 0x7a, 0xef, - 0x9c, 0x8c, 0x87, 0x87, 0xb7, 0xf5, 0x4e, 0xd3, 0x53, 0x1d, 0xbd, 0x0e, 0xc3, 0x2d, 0x5a, 0x22, - 0xad, 0x61, 0xec, 0xfe, 0x3b, 0x32, 0xe6, 0xa5, 0x58, 0x79, 0x88, 0x05, 0x05, 0xfb, 0x97, 0x2c, - 0x28, 0x9c, 0xc2, 0x48, 0x60, 0x73, 0x24, 0x9e, 0xcd, 0x24, 0x2d, 0x62, 0xf1, 0xcd, 0x61, 0xe7, - 0xde, 0xf2, 0xfd, 0x88, 0x78, 0x21, 0x13, 0x15, 0x53, 0x07, 0xe6, 0xbb, 0xe0, 0xcc, 0x2d, 0xdf, - 0x69, 0x2e, 0x3a, 0x2d, 0xc7, 0x6b, 0x90, 0xa0, 0xea, 0x6d, 0xf5, 0x35, 0xcb, 0xd2, 0x8d, 0xa8, - 0x72, 0xfd, 0x8c, 0xa8, 0xec, 0x6d, 0x40, 0x7a, 0x03, 0xc2, 0x70, 0x15, 0xc3, 0x88, 0xcb, 0x9b, - 0x12, 0xc3, 0xff, 0x64, 0x3a, 0x77, 0xd7, 0xd5, 0x33, 0xcd, 0x24, 0x93, 0x17, 0x60, 0x49, 0xc8, - 0x7e, 0x19, 0x52, 0x9d, 0xd5, 0xfa, 0xab, 0x0d, 0xec, 0xcf, 0xc3, 0x34, 0xab, 0x79, 0x4c, 0x91, - 0xd6, 0x4e, 0x68, 0x25, 0x53, 0x22, 0xd3, 0xd8, 0x5f, 0xb6, 0x60, 0x72, 0x2d, 0x11, 0xb0, 0xe3, - 0x0a, 0x7b, 0x00, 0x4d, 0x51, 0x86, 0xd7, 0x59, 0x29, 0x16, 0xd0, 0x13, 0xd7, 0x41, 0xfd, 0x99, - 0x05, 0xb1, 0xff, 0xe8, 0x29, 0x30, 0x5e, 0x4b, 0x06, 0xe3, 0x95, 0xaa, 0x1b, 0x51, 0xdd, 0xc9, - 0xe2, 0xbb, 0xd0, 0x4d, 0x15, 0x2c, 0xa1, 0x87, 0x5a, 0x24, 0x26, 0xc3, 0x5d, 0xeb, 0x27, 0xcc, - 0x88, 0x0a, 0x32, 0x7c, 0x02, 0xb3, 0x9d, 0x52, 0xb8, 0x1f, 0x11, 0xdb, 0x29, 0xd5, 0x9f, 0x8c, - 0x1d, 0x5a, 0xd3, 0xba, 0xcc, 0x4e, 0xae, 0x6f, 0x67, 0xb6, 0xf0, 0x4e, 0xcb, 0x7d, 0x9f, 0xa8, - 0x88, 0x2f, 0x65, 0x61, 0xdb, 0x2e, 0x4a, 0x8f, 0x0e, 0xca, 0xe3, 0xea, 0x1f, 0x0f, 0xef, 0x16, - 0x57, 0xb1, 0x6f, 0xc0, 0x64, 0x62, 0xc0, 0xd0, 0x8b, 0x30, 0xd4, 0xde, 0x76, 0x42, 0x92, 0xb0, - 0x17, 0x1d, 0xaa, 0xd1, 0xc2, 0xa3, 0x83, 0xf2, 0x84, 0xaa, 0xc0, 0x4a, 0x30, 0xc7, 0xb6, 0xff, - 0x87, 0x05, 0x85, 0x35, 0xbf, 0x79, 0x1a, 0x8b, 0xe9, 0x35, 0x63, 0x31, 0x5d, 0xc8, 0x0a, 0x8e, - 0x99, 0xb9, 0x8e, 0x56, 0x12, 0xeb, 0xe8, 0x52, 0x26, 0x85, 0xde, 0x4b, 0x68, 0x17, 0x46, 0x59, - 0xc8, 0x4d, 0x61, 0xbf, 0xfa, 0xbc, 0x21, 0x03, 0x94, 0x13, 0x32, 0xc0, 0xa4, 0x86, 0xaa, 0x49, - 0x02, 0x4f, 0xc1, 0x88, 0xb0, 0xa1, 0x4c, 0x5a, 0xfd, 0x0b, 0x5c, 0x2c, 0xe1, 0xf6, 0x8f, 0xe5, - 0xc1, 0x08, 0xf1, 0x89, 0x7e, 0xc5, 0x82, 0xb9, 0x80, 0xbb, 0x51, 0x36, 0x2b, 0x9d, 0xc0, 0xf5, - 0xb6, 0xea, 0x8d, 0x6d, 0xd2, 0xec, 0xb4, 0x5c, 0x6f, 0xab, 0xba, 0xe5, 0xf9, 0xaa, 0x78, 0xf9, - 0x3e, 0x69, 0x74, 0xd8, 0x43, 0x48, 0x9f, 0x78, 0xa2, 0xca, 0x46, 0xe9, 0xda, 0xe1, 0x41, 0x79, - 0x0e, 0x1f, 0x8b, 0x36, 0x3e, 0x66, 0x5f, 0xd0, 0xd7, 0x2d, 0x98, 0xe7, 0x91, 0x2f, 0x07, 0xef, - 0x7f, 0x0f, 0x89, 0xa9, 0x26, 0x49, 0xc5, 0x44, 0xd6, 0x49, 0xb0, 0xbb, 0xf8, 0x92, 0x18, 0xd0, - 0xf9, 0xda, 0xf1, 0xda, 0xc2, 0xc7, 0xed, 0x9c, 0xfd, 0x2f, 0xf3, 0x30, 0x2e, 0x3c, 0xf8, 0x45, - 0x68, 0x98, 0x17, 0x8d, 0x25, 0xf1, 0x78, 0x62, 0x49, 0x4c, 0x1b, 0xc8, 0x27, 0x13, 0x15, 0x26, - 0x84, 0xe9, 0x96, 0x13, 0x46, 0x37, 0x88, 0x13, 0x44, 0x1b, 0xc4, 0xe1, 0xb6, 0x3b, 0xf9, 0x63, - 0xdb, 0x19, 0x29, 0x15, 0xcd, 0xad, 0x24, 0x31, 0xdc, 0x4d, 0x1f, 0xed, 0x01, 0x62, 0x06, 0x48, - 0x81, 0xe3, 0x85, 0xfc, 0x5b, 0x5c, 0xf1, 0x66, 0x70, 0xbc, 0x56, 0x67, 0x45, 0xab, 0xe8, 0x56, - 0x17, 0x35, 0x9c, 0xd2, 0x82, 0x66, 0x58, 0x36, 0x34, 0xa8, 0x61, 0xd9, 0x70, 0x1f, 0xd7, 0x1a, - 0x0f, 0xa6, 0xba, 0x82, 0x30, 0xbc, 0x05, 0x25, 0x65, 0x00, 0x28, 0x0e, 0x9d, 0xde, 0xb1, 0x4c, - 0x92, 0x14, 0xb8, 0x1a, 0x25, 0x36, 0x3e, 0x8d, 0xc9, 0xd9, 0xff, 0x38, 0x67, 0x34, 0xc8, 0x27, - 0x71, 0x0d, 0x8a, 0x4e, 0x18, 0xba, 0x5b, 0x1e, 0x69, 0x8a, 0x1d, 0xfb, 0xf1, 0xac, 0x1d, 0x6b, - 0x34, 0xc3, 0x8c, 0x30, 0x17, 0x44, 0x4d, 0xac, 0x68, 0xa0, 0x1b, 0xdc, 0x42, 0x6a, 0x4f, 0xf2, - 0xfc, 0x83, 0x51, 0x03, 0x69, 0x43, 0xb5, 0x47, 0xb0, 0xa8, 0x8f, 0xbe, 0xc0, 0x4d, 0xd8, 0x6e, - 0x7a, 0xfe, 0x3d, 0xef, 0xba, 0xef, 0x4b, 0xb7, 0xbb, 0xc1, 0x08, 0x4e, 0x4b, 0xc3, 0x35, 0x55, - 0x1d, 0x9b, 0xd4, 0x06, 0x0b, 0x54, 0xf4, 0xdd, 0x70, 0x86, 0x92, 0x36, 0x9d, 0x67, 0x42, 0x44, - 0x60, 0x52, 0x84, 0x87, 0x90, 0x65, 0x62, 0xec, 0x52, 0xd9, 0x79, 0xb3, 0x76, 0xac, 0xf4, 0xbb, - 0x69, 0x92, 0xc0, 0x49, 0x9a, 0xf6, 0x4f, 0x5a, 0xc0, 0xcc, 0xfe, 0x4f, 0x81, 0x65, 0xf8, 0xac, - 0xc9, 0x32, 0xcc, 0x64, 0x0d, 0x72, 0x06, 0xb7, 0xf0, 0x02, 0x5f, 0x59, 0xb5, 0xc0, 0xbf, 0xbf, - 0x2f, 0xcc, 0x07, 0xfa, 0x73, 0xb2, 0xf6, 0xff, 0xb1, 0xf8, 0x21, 0xa6, 0x3c, 0xf1, 0xd1, 0xf7, - 0x40, 0xb1, 0xe1, 0xb4, 0x9d, 0x06, 0x8f, 0x47, 0x9d, 0xa9, 0xd5, 0x31, 0x2a, 0xcd, 0x2d, 0x89, - 0x1a, 0x5c, 0x4b, 0x21, 0xc3, 0x8c, 0x14, 0x65, 0x71, 0x5f, 0xcd, 0x84, 0x6a, 0x72, 0x76, 0x07, - 0xc6, 0x0d, 0x62, 0x0f, 0x55, 0xa4, 0xfd, 0x1e, 0x7e, 0xc5, 0xaa, 0xb0, 0x38, 0xbb, 0x30, 0xed, - 0x69, 0xff, 0xe9, 0x85, 0x22, 0xc5, 0x94, 0x8f, 0xf7, 0xbb, 0x44, 0xd9, 0xed, 0xa3, 0xb9, 0x35, - 0x24, 0xc8, 0xe0, 0x6e, 0xca, 0xf6, 0x8f, 0x5b, 0xf0, 0xa8, 0x8e, 0xa8, 0x05, 0x49, 0xe8, 0xa7, - 0x27, 0xae, 0x40, 0xd1, 0x6f, 0x93, 0xc0, 0x89, 0xfc, 0x40, 0xdc, 0x1a, 0x57, 0xe5, 0xa0, 0xdf, - 0x16, 0xe5, 0x47, 0x22, 0xa0, 0xa4, 0xa4, 0x2e, 0xcb, 0xb1, 0xaa, 0x49, 0xe5, 0x18, 0x36, 0x18, - 0xa1, 0x08, 0x60, 0xc1, 0xce, 0x00, 0xf6, 0x64, 0x1a, 0x62, 0x01, 0xb1, 0xff, 0xc0, 0xe2, 0x0b, - 0x4b, 0xef, 0x3a, 0x7a, 0x0f, 0xa6, 0x76, 0x9d, 0xa8, 0xb1, 0xbd, 0x7c, 0xbf, 0x1d, 0x70, 0xf5, - 0xb8, 0x1c, 0xa7, 0xa7, 0xfb, 0x8d, 0x93, 0xf6, 0x91, 0xb1, 0x55, 0xde, 0x6a, 0x82, 0x18, 0xee, - 0x22, 0x8f, 0x36, 0x60, 0x94, 0x95, 0x31, 0xf3, 0xef, 0xb0, 0x17, 0x6b, 0x90, 0xd5, 0x9a, 0x7a, - 0x75, 0x5e, 0x8d, 0xe9, 0x60, 0x9d, 0xa8, 0xfd, 0xa5, 0x3c, 0xdf, 0xed, 0x8c, 0xdb, 0x7e, 0x0a, - 0x46, 0xda, 0x7e, 0x73, 0xa9, 0x5a, 0xc1, 0x62, 0x16, 0xd4, 0x35, 0x52, 0xe3, 0xc5, 0x58, 0xc2, - 0xd1, 0x2b, 0x00, 0xe4, 0x7e, 0x44, 0x02, 0xcf, 0x69, 0x29, 0x2b, 0x19, 0x65, 0x17, 0x5a, 0xf1, - 0xd7, 0xfc, 0xe8, 0x4e, 0x48, 0xbe, 0x6b, 0x59, 0xa1, 0x60, 0x0d, 0x1d, 0x5d, 0x03, 0x68, 0x07, - 0xfe, 0x9e, 0xdb, 0x64, 0xfe, 0x84, 0x79, 0xd3, 0x86, 0xa4, 0xa6, 0x20, 0x58, 0xc3, 0x42, 0xaf, - 0xc0, 0x78, 0xc7, 0x0b, 0x39, 0x87, 0xe2, 0x6c, 0x88, 0x70, 0x8c, 0xc5, 0xd8, 0xba, 0xe1, 0x8e, - 0x0e, 0xc4, 0x26, 0x2e, 0x5a, 0x80, 0xe1, 0xc8, 0x61, 0x36, 0x11, 0x43, 0xd9, 0xc6, 0x9c, 0xeb, - 0x14, 0x43, 0x8f, 0x86, 0x4c, 0x2b, 0x60, 0x51, 0x11, 0xbd, 0x25, 0x9d, 0x33, 0xf8, 0x59, 0x2f, - 0xac, 0xa8, 0x07, 0xbb, 0x17, 0x34, 0xd7, 0x0c, 0x61, 0x9d, 0x6d, 0xd0, 0xb2, 0xbf, 0x5e, 0x02, - 0x88, 0xd9, 0x71, 0xf4, 0x7e, 0xd7, 0x79, 0xf4, 0x4c, 0x6f, 0x06, 0xfe, 0xe4, 0x0e, 0x23, 0xf4, - 0xfd, 0x16, 0x8c, 0x3a, 0xad, 0x96, 0xdf, 0x70, 0x22, 0x36, 0xca, 0xb9, 0xde, 0xe7, 0xa1, 0x68, - 0x7f, 0x21, 0xae, 0xc1, 0xbb, 0xf0, 0xbc, 0x5c, 0x78, 0x1a, 0xa4, 0x6f, 0x2f, 0xf4, 0x86, 0xd1, - 0xa7, 0xa4, 0x94, 0xc6, 0x97, 0xc7, 0x6c, 0x52, 0x4a, 0x2b, 0xb1, 0xa3, 0x5f, 0x13, 0xd0, 0xd0, - 0x1d, 0x23, 0xd2, 0x5e, 0x21, 0x3b, 0xe8, 0x84, 0xc1, 0x95, 0xf6, 0x0b, 0xb2, 0x87, 0x6a, 0xba, - 0x37, 0xd9, 0x50, 0x76, 0x64, 0x16, 0x4d, 0xfc, 0xe9, 0xe3, 0x49, 0xf6, 0x2e, 0x4c, 0x36, 0xcd, - 0xbb, 0x5d, 0xac, 0xa6, 0x27, 0xb3, 0xe8, 0x26, 0x58, 0x81, 0xf8, 0x36, 0x4f, 0x00, 0x70, 0x92, - 0x30, 0xaa, 0x71, 0xbf, 0xbe, 0xaa, 0xb7, 0xe9, 0x0b, 0x6b, 0x7c, 0x3b, 0x73, 0x2e, 0xf7, 0xc3, - 0x88, 0xec, 0x52, 0xcc, 0xf8, 0xd2, 0x5e, 0x13, 0x75, 0xb1, 0xa2, 0x82, 0x5e, 0x87, 0x61, 0xe6, - 0x18, 0x1c, 0xce, 0x14, 0xb3, 0x95, 0x89, 0x66, 0x4c, 0x8b, 0x78, 0x53, 0xb1, 0xbf, 0x21, 0x16, - 0x14, 0xd0, 0x0d, 0x19, 0xf8, 0x26, 0xac, 0x7a, 0x77, 0x42, 0xc2, 0x02, 0xdf, 0x94, 0x16, 0x3f, - 0x1e, 0xc7, 0xb4, 0xe1, 0xe5, 0xa9, 0x79, 0x0f, 0x8c, 0x9a, 0x94, 0x39, 0x12, 0xff, 0x65, 0x3a, - 0x85, 0x19, 0xc8, 0xee, 0x9e, 0x99, 0x72, 0x21, 0x1e, 0xce, 0xbb, 0x26, 0x09, 0x9c, 0xa4, 0x49, - 0x19, 0x4d, 0xbe, 0x73, 0x85, 0x3d, 0x7f, 0xbf, 0xfd, 0xcf, 0xe5, 0x6b, 0x76, 0xc9, 0xf0, 0x12, - 0x2c, 0xea, 0x9f, 0xea, 0xad, 0x3f, 0xeb, 0xc1, 0x54, 0x72, 0x8b, 0x3e, 0x54, 0x2e, 0xe3, 0xf7, - 0x0b, 0x30, 0x61, 0x2e, 0x29, 0x34, 0x0f, 0x25, 0x41, 0x44, 0x45, 0x61, 0x55, 0xbb, 0x64, 0x55, - 0x02, 0x70, 0x8c, 0xc3, 0x82, 0xef, 0xb2, 0xea, 0x9a, 0x1d, 0x66, 0x1c, 0x7c, 0x57, 0x41, 0xb0, - 0x86, 0x45, 0xe5, 0xa5, 0x0d, 0xdf, 0x8f, 0xd4, 0xa5, 0xa2, 0xd6, 0xdd, 0x22, 0x2b, 0xc5, 0x02, - 0x4a, 0x2f, 0x93, 0x1d, 0x12, 0x78, 0xa4, 0x65, 0x06, 0x77, 0x53, 0x97, 0xc9, 0x4d, 0x1d, 0x88, - 0x4d, 0x5c, 0x7a, 0x4b, 0xfa, 0x21, 0x5b, 0xc8, 0x42, 0x2a, 0x8b, 0xed, 0x5a, 0xeb, 0xdc, 0xc5, - 0x5e, 0xc2, 0xd1, 0xe7, 0xe1, 0x51, 0xe5, 0x11, 0x8f, 0xb9, 0xa2, 0x5a, 0xb6, 0x38, 0x6c, 0x28, - 0x51, 0x1e, 0x5d, 0x4a, 0x47, 0xc3, 0x59, 0xf5, 0xd1, 0x6b, 0x30, 0x21, 0x38, 0x77, 0x49, 0x71, - 0xc4, 0xb4, 0x9d, 0xb8, 0x69, 0x40, 0x71, 0x02, 0x5b, 0x86, 0xa7, 0x63, 0xcc, 0xb3, 0xa4, 0x50, - 0xec, 0x0e, 0x4f, 0xa7, 0xc3, 0x71, 0x57, 0x0d, 0xb4, 0x00, 0x93, 0x9c, 0xb5, 0x72, 0xbd, 0x2d, - 0x3e, 0x27, 0xc2, 0xdd, 0x46, 0x6d, 0xa9, 0xdb, 0x26, 0x18, 0x27, 0xf1, 0xd1, 0xcb, 0x30, 0xe6, - 0x04, 0x8d, 0x6d, 0x37, 0x22, 0x8d, 0xa8, 0x13, 0x70, 0x3f, 0x1c, 0xcd, 0xf8, 0x64, 0x41, 0x83, - 0x61, 0x03, 0xd3, 0x7e, 0x1f, 0xce, 0xa4, 0x78, 0xea, 0xd1, 0x85, 0xe3, 0xb4, 0x5d, 0xf9, 0x4d, - 0x09, 0x0b, 0xd5, 0x85, 0x5a, 0x55, 0x7e, 0x8d, 0x86, 0x45, 0x57, 0x27, 0xf3, 0xe8, 0xd3, 0xb2, - 0xa7, 0xa8, 0xd5, 0xb9, 0x22, 0x01, 0x38, 0xc6, 0xb1, 0xff, 0x67, 0x0e, 0x26, 0x53, 0x94, 0xef, - 0x2c, 0x83, 0x47, 0x42, 0xf6, 0x88, 0x13, 0x76, 0x98, 0xd1, 0x0e, 0x73, 0xc7, 0x88, 0x76, 0x98, - 0xef, 0x17, 0xed, 0xb0, 0xf0, 0x41, 0xa2, 0x1d, 0x9a, 0x23, 0x36, 0x34, 0xd0, 0x88, 0xa5, 0x44, - 0x48, 0x1c, 0x3e, 0x66, 0x84, 0x44, 0x63, 0xd0, 0x47, 0x06, 0x18, 0xf4, 0xaf, 0xe6, 0x60, 0x2a, - 0x69, 0x24, 0x77, 0x0a, 0xea, 0xd8, 0xd7, 0x0d, 0x75, 0x6c, 0x7a, 0x3e, 0x9c, 0xa4, 0xe9, 0x5e, - 0x96, 0x6a, 0x16, 0x27, 0x54, 0xb3, 0x9f, 0x1c, 0x88, 0x5a, 0x6f, 0x35, 0xed, 0xdf, 0xcd, 0xc1, - 0xb9, 0x64, 0x95, 0xa5, 0x96, 0xe3, 0xee, 0x9e, 0xc2, 0xd8, 0xdc, 0x36, 0xc6, 0xe6, 0xd9, 0x41, - 0xbe, 0x86, 0x75, 0x2d, 0x73, 0x80, 0xde, 0x4c, 0x0c, 0xd0, 0xfc, 0xe0, 0x24, 0x7b, 0x8f, 0xd2, - 0x37, 0xf2, 0x70, 0x29, 0xb5, 0x5e, 0xac, 0xcd, 0x5c, 0x31, 0xb4, 0x99, 0xd7, 0x12, 0xda, 0x4c, - 0xbb, 0x77, 0xed, 0x93, 0x51, 0x6f, 0x0a, 0x17, 0x4a, 0x16, 0x11, 0xef, 0x01, 0x55, 0x9b, 0x86, - 0x0b, 0xa5, 0x22, 0x84, 0x4d, 0xba, 0xdf, 0x4c, 0x2a, 0xcd, 0x7f, 0x63, 0xc1, 0xf9, 0xd4, 0xb9, - 0x39, 0x05, 0x15, 0xd6, 0x9a, 0xa9, 0xc2, 0x7a, 0x6a, 0xe0, 0xd5, 0x9a, 0xa1, 0xd3, 0xfa, 0x8d, - 0x42, 0xc6, 0xb7, 0x30, 0x01, 0xfd, 0x36, 0x8c, 0x3a, 0x8d, 0x06, 0x09, 0xc3, 0x55, 0xbf, 0xa9, - 0x22, 0xc4, 0x3d, 0xcb, 0xe4, 0xac, 0xb8, 0xf8, 0xe8, 0xa0, 0x3c, 0x9b, 0x24, 0x11, 0x83, 0xb1, - 0x4e, 0xc1, 0x0c, 0x6a, 0x99, 0x3b, 0xd1, 0xa0, 0x96, 0xd7, 0x00, 0xf6, 0x14, 0xb7, 0x9e, 0x14, - 0xf2, 0x35, 0x3e, 0x5e, 0xc3, 0x42, 0x5f, 0x80, 0x62, 0x28, 0xae, 0x71, 0xb1, 0x14, 0x9f, 0x1f, - 0x70, 0xae, 0x9c, 0x0d, 0xd2, 0x32, 0x7d, 0xf5, 0x95, 0x3e, 0x44, 0x91, 0x44, 0xdf, 0x01, 0x53, - 0x21, 0x0f, 0x05, 0xb3, 0xd4, 0x72, 0x42, 0xe6, 0x07, 0x21, 0x56, 0x21, 0x73, 0xc0, 0xaf, 0x27, - 0x60, 0xb8, 0x0b, 0x1b, 0xad, 0xc8, 0x8f, 0x62, 0x71, 0x6b, 0xf8, 0xc2, 0xbc, 0x12, 0x7f, 0x90, - 0xc8, 0x1f, 0x76, 0x36, 0x39, 0xfc, 0x6c, 0xe0, 0xb5, 0x9a, 0xe8, 0x0b, 0x00, 0x74, 0xf9, 0x08, - 0x5d, 0xc2, 0x48, 0xf6, 0xe1, 0x49, 0x4f, 0x95, 0x66, 0xaa, 0xe5, 0x27, 0x73, 0x5e, 0xac, 0x28, - 0x22, 0x58, 0x23, 0x68, 0x7f, 0xb5, 0x00, 0x8f, 0xf5, 0x38, 0x23, 0xd1, 0x82, 0xf9, 0x04, 0xfa, - 0x74, 0x52, 0xb8, 0x9e, 0x4d, 0xad, 0x6c, 0x48, 0xdb, 0x89, 0xa5, 0x98, 0xfb, 0xc0, 0x4b, 0xf1, - 0x07, 0x2d, 0x4d, 0xed, 0xc1, 0x8d, 0xf9, 0x3e, 0x7b, 0xcc, 0xb3, 0xff, 0x04, 0xf5, 0x20, 0x9b, - 0x29, 0xca, 0x84, 0x6b, 0x03, 0x77, 0x67, 0x60, 0xed, 0xc2, 0xe9, 0x2a, 0x7f, 0xbf, 0x64, 0xc1, - 0xe3, 0xa9, 0xfd, 0x35, 0x4c, 0x36, 0xe6, 0xa1, 0xd4, 0xa0, 0x85, 0x9a, 0xaf, 0x5a, 0xec, 0xc4, - 0x2b, 0x01, 0x38, 0xc6, 0x31, 0x2c, 0x33, 0x72, 0x7d, 0x2d, 0x33, 0xfe, 0x85, 0x05, 0x5d, 0xfb, - 0xe3, 0x14, 0x0e, 0xea, 0xaa, 0x79, 0x50, 0x7f, 0x7c, 0x90, 0xb9, 0xcc, 0x38, 0xa3, 0xff, 0x68, - 0x12, 0x1e, 0xc9, 0xf0, 0xd5, 0xd8, 0x83, 0xe9, 0xad, 0x06, 0x31, 0xbd, 0x00, 0xc5, 0xc7, 0xa4, - 0x3a, 0x4c, 0xf6, 0x74, 0x19, 0x64, 0xf9, 0x88, 0xa6, 0xbb, 0x50, 0x70, 0x77, 0x13, 0xe8, 0x4b, - 0x16, 0x9c, 0x75, 0xee, 0x85, 0x5d, 0xd9, 0x43, 0xc5, 0x9a, 0x79, 0x21, 0x55, 0x09, 0xd2, 0x27, - 0xdb, 0x28, 0x4f, 0xd0, 0x94, 0x86, 0x85, 0x53, 0xdb, 0x42, 0x58, 0xc4, 0x0c, 0xa5, 0xec, 0x7c, - 0x0f, 0x3f, 0xd5, 0x34, 0xa7, 0x1a, 0x7e, 0x64, 0x4b, 0x08, 0x56, 0x74, 0xd0, 0x3b, 0x50, 0xda, - 0x92, 0x9e, 0x6e, 0x29, 0x57, 0x42, 0x3c, 0x90, 0xbd, 0xfd, 0xff, 0xf8, 0x03, 0xa5, 0x42, 0xc2, - 0x31, 0x51, 0xf4, 0x1a, 0xe4, 0xbd, 0xcd, 0xb0, 0x57, 0x8e, 0xa3, 0x84, 0x4d, 0x13, 0xf7, 0x06, - 0x5f, 0x5b, 0xa9, 0x63, 0x5a, 0x11, 0xdd, 0x80, 0x7c, 0xb0, 0xd1, 0x14, 0x1a, 0xbc, 0xd4, 0x33, - 0x1c, 0x2f, 0x56, 0x32, 0x7a, 0xc5, 0x28, 0xe1, 0xc5, 0x0a, 0xa6, 0x24, 0x50, 0x0d, 0x86, 0x98, - 0x83, 0x83, 0xb8, 0x0f, 0x52, 0x39, 0xdf, 0x1e, 0x8e, 0x42, 0xdc, 0x65, 0x9c, 0x21, 0x60, 0x4e, - 0x08, 0xad, 0xc3, 0x70, 0x83, 0xe5, 0xc3, 0x11, 0x01, 0xab, 0x3f, 0x95, 0xaa, 0xab, 0xeb, 0x91, - 0x28, 0x48, 0xa8, 0xae, 0x18, 0x06, 0x16, 0xb4, 0x18, 0x55, 0xd2, 0xde, 0xde, 0x0c, 0x99, 0xac, - 0x9f, 0x45, 0xb5, 0x47, 0xfe, 0x2b, 0x41, 0x95, 0x61, 0x60, 0x41, 0x0b, 0x7d, 0x06, 0x72, 0x9b, - 0x0d, 0xe1, 0xff, 0x90, 0xaa, 0xb4, 0x33, 0x1d, 0xfa, 0x17, 0x87, 0x0f, 0x0f, 0xca, 0xb9, 0x95, - 0x25, 0x9c, 0xdb, 0x6c, 0xa0, 0x35, 0x18, 0xd9, 0xe4, 0x2e, 0xc0, 0x42, 0x2f, 0xf7, 0x64, 0xba, - 0x77, 0x72, 0x97, 0x97, 0x30, 0xb7, 0xdb, 0x17, 0x00, 0x2c, 0x89, 0xb0, 0x10, 0x9c, 0xca, 0x95, - 0x59, 0xc4, 0xa2, 0x9e, 0x3b, 0x9e, 0xfb, 0x39, 0xbf, 0x9f, 0x63, 0x87, 0x68, 0xac, 0x51, 0xa4, - 0xab, 0xda, 0x91, 0x19, 0x2c, 0x45, 0xac, 0x8e, 0xd4, 0x55, 0xdd, 0x27, 0xb9, 0x27, 0x5f, 0xd5, - 0x0a, 0x09, 0xc7, 0x44, 0xd1, 0x0e, 0x8c, 0xef, 0x85, 0xed, 0x6d, 0x22, 0xb7, 0x34, 0x0b, 0xdd, - 0x91, 0x71, 0x85, 0xdd, 0x15, 0x88, 0x6e, 0x10, 0x75, 0x9c, 0x56, 0xd7, 0x29, 0xc4, 0x5e, 0xb5, - 0xef, 0xea, 0xc4, 0xb0, 0x49, 0x9b, 0x0e, 0xff, 0x7b, 0x1d, 0x7f, 0x63, 0x3f, 0x22, 0x22, 0x78, - 0x75, 0xea, 0xf0, 0xbf, 0xc1, 0x51, 0xba, 0x87, 0x5f, 0x00, 0xb0, 0x24, 0x82, 0xee, 0x8a, 0xe1, - 0x61, 0xa7, 0xe7, 0x54, 0x76, 0x30, 0xa5, 0xd4, 0x14, 0xb2, 0xda, 0xa0, 0xb0, 0xd3, 0x32, 0x26, - 0xc5, 0x4e, 0xc9, 0xf6, 0xb6, 0x1f, 0xf9, 0x5e, 0xe2, 0x84, 0x9e, 0xce, 0x3e, 0x25, 0x6b, 0x29, - 0xf8, 0xdd, 0xa7, 0x64, 0x1a, 0x16, 0x4e, 0x6d, 0x0b, 0x35, 0x61, 0xa2, 0xed, 0x07, 0xd1, 0x3d, - 0x3f, 0x90, 0xeb, 0x0b, 0xf5, 0xd0, 0x2b, 0x18, 0x98, 0xa2, 0x45, 0x16, 0x4c, 0xdd, 0x84, 0xe0, - 0x04, 0x4d, 0xf4, 0x39, 0x18, 0x09, 0x1b, 0x4e, 0x8b, 0x54, 0x6f, 0xcf, 0x9c, 0xc9, 0xbe, 0x7e, - 0xea, 0x1c, 0x25, 0x63, 0x75, 0xb1, 0xc9, 0x11, 0x28, 0x58, 0x92, 0x43, 0x2b, 0x30, 0xc4, 0x32, - 0x22, 0xb0, 0xb8, 0xdb, 0x19, 0x31, 0xa1, 0xba, 0x2c, 0x4c, 0xf9, 0xd9, 0xc4, 0x8a, 0x31, 0xaf, - 0x4e, 0xf7, 0x80, 0x60, 0xaf, 0xfd, 0x70, 0xe6, 0x5c, 0xf6, 0x1e, 0x10, 0x5c, 0xf9, 0xed, 0x7a, - 0xaf, 0x3d, 0xa0, 0x90, 0x70, 0x4c, 0x94, 0x9e, 0xcc, 0xf4, 0x34, 0x7d, 0xa4, 0x87, 0x41, 0x4b, - 0xe6, 0x59, 0xca, 0x4e, 0x66, 0x7a, 0x92, 0x52, 0x12, 0xf6, 0xef, 0x8e, 0x74, 0xf3, 0x2c, 0x4c, - 0x20, 0xfb, 0x4b, 0x56, 0xd7, 0x5b, 0xdd, 0xa7, 0x07, 0xd5, 0x0f, 0x9d, 0x20, 0xb7, 0xfa, 0x25, - 0x0b, 0x1e, 0x69, 0xa7, 0x7e, 0x88, 0x60, 0x00, 0x06, 0x53, 0x33, 0xf1, 0x4f, 0x57, 0xb1, 0xf1, - 0xd3, 0xe1, 0x38, 0xa3, 0xa5, 0xa4, 0x44, 0x90, 0xff, 0xc0, 0x12, 0xc1, 0x2a, 0x14, 0x19, 0x93, - 0xd9, 0x27, 0x3f, 0x5c, 0x52, 0x30, 0x62, 0xac, 0xc4, 0x92, 0xa8, 0x88, 0x15, 0x09, 0xf4, 0x43, - 0x16, 0x5c, 0x4c, 0x76, 0x1d, 0x13, 0x06, 0x16, 0x91, 0xe4, 0xb9, 0x2c, 0xb8, 0x22, 0xbe, 0xff, - 0x62, 0xad, 0x17, 0xf2, 0x51, 0x3f, 0x04, 0xdc, 0xbb, 0x31, 0x54, 0x49, 0x11, 0x46, 0x87, 0x4d, - 0x05, 0xfc, 0x00, 0x02, 0xe9, 0x0b, 0x30, 0xb6, 0xeb, 0x77, 0xbc, 0x48, 0xd8, 0xbf, 0x08, 0x8f, - 0x45, 0xf6, 0xe0, 0xbc, 0xaa, 0x95, 0x63, 0x03, 0x2b, 0x21, 0xc6, 0x16, 0x1f, 0x58, 0x8c, 0x7d, - 0x3b, 0x91, 0xcd, 0xbd, 0x94, 0x1d, 0xb1, 0x50, 0x48, 0xfc, 0xc7, 0xc8, 0xe9, 0x7e, 0xba, 0xb2, - 0xd1, 0x4f, 0x5b, 0x29, 0x4c, 0x3d, 0x97, 0x96, 0x5f, 0x35, 0xa5, 0xe5, 0x2b, 0x49, 0x69, 0xb9, - 0x4b, 0xf9, 0x6a, 0x08, 0xca, 0x83, 0x87, 0xbd, 0x1e, 0x34, 0x8e, 0x9c, 0xdd, 0x82, 0xcb, 0xfd, - 0xae, 0x25, 0x66, 0x08, 0xd5, 0x54, 0x4f, 0x6d, 0xb1, 0x21, 0x54, 0xb3, 0x5a, 0xc1, 0x0c, 0x32, - 0x68, 0xa0, 0x11, 0xfb, 0xbf, 0x5b, 0x90, 0xaf, 0xf9, 0xcd, 0x53, 0x50, 0x26, 0x7f, 0xd6, 0x50, - 0x26, 0x3f, 0x96, 0x91, 0x65, 0x3f, 0x53, 0x75, 0xbc, 0x9c, 0x50, 0x1d, 0x5f, 0xcc, 0x22, 0xd0, - 0x5b, 0x51, 0xfc, 0x13, 0x79, 0x18, 0xad, 0xf9, 0x4d, 0x65, 0x85, 0xfc, 0x1b, 0x0f, 0x62, 0x85, - 0x9c, 0x19, 0x16, 0x56, 0xa3, 0xcc, 0xec, 0xa7, 0xa4, 0x13, 0xde, 0x9f, 0x33, 0x63, 0xe4, 0x37, - 0x89, 0xbb, 0xb5, 0x1d, 0x91, 0x66, 0xf2, 0x73, 0x4e, 0xcf, 0x18, 0xf9, 0xbf, 0x5a, 0x30, 0x99, - 0x68, 0x1d, 0xb5, 0x60, 0xbc, 0xa5, 0x6b, 0x02, 0xc5, 0x3a, 0x7d, 0x20, 0x25, 0xa2, 0x30, 0xe6, - 0xd4, 0x8a, 0xb0, 0x49, 0x1c, 0xcd, 0x01, 0xa8, 0x97, 0x3a, 0xa9, 0x01, 0x63, 0x5c, 0xbf, 0x7a, - 0xca, 0x0b, 0xb1, 0x86, 0x81, 0x5e, 0x84, 0xd1, 0xc8, 0x6f, 0xfb, 0x2d, 0x7f, 0x6b, 0xff, 0x26, - 0x91, 0xa1, 0x6d, 0x94, 0x89, 0xd6, 0x7a, 0x0c, 0xc2, 0x3a, 0x9e, 0xfd, 0x53, 0x79, 0xfe, 0xa1, - 0x5e, 0xe4, 0x7e, 0x6b, 0x4d, 0x7e, 0xb4, 0xd7, 0xe4, 0x37, 0x2c, 0x98, 0xa2, 0xad, 0x33, 0x73, - 0x11, 0x79, 0xd9, 0xaa, 0xf4, 0x3b, 0x56, 0x8f, 0xf4, 0x3b, 0x57, 0xe8, 0xd9, 0xd5, 0xf4, 0x3b, - 0x91, 0xd0, 0xa0, 0x69, 0x87, 0x13, 0x2d, 0xc5, 0x02, 0x2a, 0xf0, 0x48, 0x10, 0x08, 0x1f, 0x28, - 0x1d, 0x8f, 0x04, 0x01, 0x16, 0x50, 0x99, 0x9d, 0xa7, 0x90, 0x91, 0x9d, 0x87, 0x05, 0xea, 0x13, - 0x86, 0x05, 0x82, 0xed, 0xd1, 0x02, 0xf5, 0x49, 0x8b, 0x83, 0x18, 0xc7, 0xfe, 0xb9, 0x3c, 0x8c, - 0xd5, 0xfc, 0x66, 0xfc, 0x56, 0xf6, 0x82, 0xf1, 0x56, 0x76, 0x39, 0xf1, 0x56, 0x36, 0xa5, 0xe3, - 0x7e, 0xeb, 0x65, 0xec, 0xc3, 0x7a, 0x19, 0xfb, 0xe7, 0x16, 0x9b, 0xb5, 0xca, 0x5a, 0x5d, 0x64, - 0x07, 0x7e, 0x0e, 0x46, 0xd9, 0x81, 0xc4, 0x9c, 0xee, 0xe4, 0x03, 0x12, 0x0b, 0xbc, 0xbf, 0x16, - 0x17, 0x63, 0x1d, 0x07, 0x5d, 0x85, 0x62, 0x48, 0x9c, 0xa0, 0xb1, 0xad, 0xce, 0x38, 0xf1, 0xbc, - 0xc2, 0xcb, 0xb0, 0x82, 0xa2, 0x37, 0xe2, 0x18, 0x71, 0xf9, 0xec, 0x3c, 0xb7, 0x7a, 0x7f, 0xf8, - 0x16, 0xc9, 0x0e, 0x0c, 0x67, 0xbf, 0x09, 0xa8, 0x1b, 0x7f, 0x80, 0xe0, 0x48, 0x65, 0x33, 0x38, - 0x52, 0xa9, 0x2b, 0x30, 0xd2, 0x9f, 0x5a, 0x30, 0x51, 0xf3, 0x9b, 0x74, 0xeb, 0x7e, 0x33, 0xed, - 0x53, 0x3d, 0x40, 0xe6, 0x70, 0x8f, 0x00, 0x99, 0x7f, 0xcf, 0x82, 0x91, 0x9a, 0xdf, 0x3c, 0x05, - 0xbd, 0xfb, 0xab, 0xa6, 0xde, 0xfd, 0xd1, 0x8c, 0x25, 0x91, 0xa1, 0x6a, 0xff, 0x85, 0x3c, 0x8c, - 0xd3, 0x7e, 0xfa, 0x5b, 0x72, 0x96, 0x8c, 0x11, 0xb1, 0x06, 0x18, 0x11, 0xca, 0xe6, 0xfa, 0xad, - 0x96, 0x7f, 0x2f, 0x39, 0x63, 0x2b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x06, 0x8a, 0xed, 0x80, 0xec, - 0xb9, 0xbe, 0xe0, 0x1f, 0xb5, 0x57, 0x8c, 0x9a, 0x28, 0xc7, 0x0a, 0x83, 0xca, 0x5d, 0xa1, 0xeb, - 0x35, 0x88, 0x4c, 0xb2, 0x5d, 0x60, 0x79, 0xb8, 0x78, 0xe4, 0x6b, 0xad, 0x1c, 0x1b, 0x58, 0xe8, - 0x4d, 0x28, 0xb1, 0xff, 0xec, 0x44, 0x39, 0x7e, 0xde, 0x20, 0x91, 0x6e, 0x42, 0x10, 0xc0, 0x31, - 0x2d, 0x74, 0x0d, 0x20, 0x92, 0xd1, 0x91, 0x43, 0x11, 0xe3, 0x46, 0xf1, 0xda, 0x2a, 0x6e, 0x72, - 0x88, 0x35, 0x2c, 0xf4, 0x34, 0x94, 0x22, 0xc7, 0x6d, 0xdd, 0x72, 0x3d, 0x12, 0x32, 0x95, 0x73, - 0x5e, 0x66, 0x93, 0x10, 0x85, 0x38, 0x86, 0x53, 0x5e, 0x87, 0x39, 0x80, 0xf3, 0xac, 0x63, 0x45, - 0x86, 0xcd, 0x78, 0x9d, 0x5b, 0xaa, 0x14, 0x6b, 0x18, 0xf6, 0xcb, 0x70, 0xae, 0xe6, 0x37, 0x6b, - 0x7e, 0x10, 0xad, 0xf8, 0xc1, 0x3d, 0x27, 0x68, 0xca, 0xf9, 0x2b, 0xcb, 0xc4, 0x06, 0xf4, 0xec, - 0x19, 0xe2, 0x3b, 0xd3, 0x48, 0x59, 0xf0, 0x3c, 0xe3, 0x76, 0x8e, 0xe9, 0xd4, 0xd1, 0x60, 0xf7, - 0xae, 0x4a, 0x30, 0x78, 0xdd, 0x89, 0x08, 0xba, 0xcd, 0x92, 0x92, 0xc5, 0x57, 0x90, 0xa8, 0xfe, - 0x94, 0x96, 0x94, 0x2c, 0x06, 0xa6, 0xde, 0x59, 0x66, 0x7d, 0xfb, 0x57, 0xf3, 0xec, 0x34, 0x4a, - 0xe4, 0xdb, 0x43, 0x5f, 0x84, 0x89, 0x90, 0xdc, 0x72, 0xbd, 0xce, 0x7d, 0x29, 0x84, 0xf7, 0x70, - 0xcb, 0xa9, 0x2f, 0xeb, 0x98, 0x5c, 0x95, 0x67, 0x96, 0xe1, 0x04, 0x35, 0x3a, 0x4f, 0x41, 0xc7, - 0x5b, 0x08, 0xef, 0x84, 0x24, 0x10, 0xf9, 0xde, 0xd8, 0x3c, 0x61, 0x59, 0x88, 0x63, 0x38, 0x5d, - 0x97, 0xec, 0xcf, 0x9a, 0xef, 0x61, 0xdf, 0x8f, 0xe4, 0x4a, 0x66, 0x19, 0x83, 0xb4, 0x72, 0x6c, - 0x60, 0xa1, 0x15, 0x40, 0x61, 0xa7, 0xdd, 0x6e, 0xb1, 0x87, 0x7d, 0xa7, 0x75, 0x3d, 0xf0, 0x3b, - 0x6d, 0xfe, 0xea, 0x99, 0xe7, 0x81, 0x09, 0xeb, 0x5d, 0x50, 0x9c, 0x52, 0x83, 0x9e, 0x3e, 0x9b, - 0x21, 0xfb, 0xcd, 0x56, 0x77, 0x5e, 0xa8, 0xd7, 0xeb, 0xac, 0x08, 0x4b, 0x18, 0x5d, 0x4c, 0xac, - 0x79, 0x8e, 0x39, 0x1c, 0x2f, 0x26, 0xac, 0x4a, 0xb1, 0x86, 0x81, 0x96, 0x61, 0x24, 0xdc, 0x0f, - 0x1b, 0x91, 0x88, 0xc8, 0x94, 0x91, 0xb9, 0xb3, 0xce, 0x50, 0xb4, 0x6c, 0x12, 0xbc, 0x0a, 0x96, - 0x75, 0xed, 0xef, 0x61, 0x97, 0x21, 0xcb, 0x0e, 0x16, 0x75, 0x02, 0x82, 0x76, 0x61, 0xbc, 0xcd, - 0xa6, 0x5c, 0xc4, 0xae, 0x16, 0xf3, 0xf6, 0xc2, 0x80, 0x52, 0xed, 0x3d, 0x7a, 0xd0, 0x28, 0xad, - 0x13, 0x13, 0x17, 0x6a, 0x3a, 0x39, 0x6c, 0x52, 0xb7, 0xbf, 0x8a, 0xd8, 0x99, 0x5b, 0xe7, 0xa2, - 0xea, 0x88, 0x30, 0x2d, 0x16, 0x7c, 0xf9, 0x6c, 0xb6, 0xce, 0x24, 0xfe, 0x22, 0x61, 0x9e, 0x8c, - 0x65, 0x5d, 0xf4, 0x06, 0x7b, 0xa5, 0xe6, 0x07, 0x5d, 0xbf, 0x24, 0xcd, 0x1c, 0xcb, 0x78, 0x90, - 0x16, 0x15, 0xb1, 0x46, 0x04, 0xdd, 0x82, 0x71, 0x91, 0x4c, 0x4a, 0x28, 0xc5, 0xf2, 0x86, 0xd2, - 0x63, 0x1c, 0xeb, 0xc0, 0xa3, 0x64, 0x01, 0x36, 0x2b, 0xa3, 0x2d, 0xb8, 0xa8, 0x65, 0x56, 0xbc, - 0x1e, 0x38, 0xec, 0xe5, 0xd2, 0x65, 0x9b, 0x48, 0x3b, 0x37, 0x1f, 0x3f, 0x3c, 0x28, 0x5f, 0x5c, - 0xef, 0x85, 0x88, 0x7b, 0xd3, 0x41, 0xb7, 0xe1, 0x1c, 0xf7, 0xe0, 0xab, 0x10, 0xa7, 0xd9, 0x72, - 0x3d, 0x75, 0x30, 0xf3, 0x75, 0x78, 0xfe, 0xf0, 0xa0, 0x7c, 0x6e, 0x21, 0x0d, 0x01, 0xa7, 0xd7, - 0x43, 0xaf, 0x42, 0xa9, 0xe9, 0x85, 0x62, 0x0c, 0x86, 0x8d, 0xa4, 0xa1, 0xa5, 0xca, 0x5a, 0x5d, - 0x7d, 0x7f, 0xfc, 0x07, 0xc7, 0x15, 0xd0, 0x16, 0x57, 0x8c, 0x29, 0x39, 0x74, 0x24, 0x3b, 0x41, - 0xbc, 0x58, 0x12, 0x86, 0x0f, 0x0f, 0xd7, 0x08, 0x2b, 0x1b, 0x58, 0xc3, 0xbd, 0xc7, 0x20, 0x8c, - 0x5e, 0x07, 0x44, 0x19, 0x35, 0xb7, 0x41, 0x16, 0x1a, 0x2c, 0x84, 0x38, 0xd3, 0x23, 0x16, 0x0d, - 0x9f, 0x09, 0x54, 0xef, 0xc2, 0xc0, 0x29, 0xb5, 0xd0, 0x0d, 0x7a, 0x90, 0xe9, 0xa5, 0xc2, 0x96, - 0x57, 0x32, 0xf7, 0x33, 0x15, 0xd2, 0x0e, 0x48, 0xc3, 0x89, 0x48, 0xd3, 0xa4, 0x88, 0x13, 0xf5, - 0xe8, 0x5d, 0xaa, 0xb2, 0x09, 0x81, 0x19, 0x36, 0xa3, 0x3b, 0xa3, 0x10, 0x95, 0x8b, 0xb7, 0xfd, - 0x30, 0x5a, 0x23, 0xd1, 0x3d, 0x3f, 0xd8, 0x11, 0x51, 0xca, 0xe2, 0x80, 0x99, 0x31, 0x08, 0xeb, - 0x78, 0x94, 0x0f, 0x66, 0xcf, 0xc4, 0xd5, 0x0a, 0x7b, 0xa1, 0x2b, 0xc6, 0xfb, 0xe4, 0x06, 0x2f, - 0xc6, 0x12, 0x2e, 0x51, 0xab, 0xb5, 0x25, 0xf6, 0xda, 0x96, 0x40, 0xad, 0xd6, 0x96, 0xb0, 0x84, - 0x23, 0xd2, 0x9d, 0x90, 0x75, 0x22, 0x5b, 0xab, 0xd9, 0x7d, 0x1d, 0x0c, 0x98, 0x93, 0xd5, 0x83, - 0x29, 0x95, 0x0a, 0x96, 0x87, 0x6f, 0x0b, 0x67, 0x26, 0xd9, 0x22, 0x19, 0x3c, 0xf6, 0x9b, 0xd2, - 0x13, 0x57, 0x13, 0x94, 0x70, 0x17, 0x6d, 0x23, 0x90, 0xc9, 0x54, 0xdf, 0x6c, 0x50, 0xf3, 0x50, - 0x0a, 0x3b, 0x1b, 0x4d, 0x7f, 0xd7, 0x71, 0x3d, 0xf6, 0x38, 0xa6, 0x31, 0x59, 0x75, 0x09, 0xc0, - 0x31, 0x0e, 0x5a, 0x81, 0xa2, 0x23, 0x95, 0xc0, 0x28, 0x3b, 0x6a, 0x81, 0x52, 0xfd, 0x72, 0x47, - 0x5e, 0xa9, 0xf6, 0x55, 0x75, 0xd1, 0x2b, 0x30, 0x2e, 0xfc, 0xb6, 0x78, 0x2c, 0x07, 0xf6, 0x78, - 0xa5, 0x19, 0xe6, 0xd7, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x17, 0x60, 0x82, 0x52, 0x89, 0x0f, 0xb6, - 0x99, 0xb3, 0x83, 0x9c, 0x88, 0x5a, 0x96, 0x0f, 0xbd, 0x32, 0x4e, 0x10, 0x43, 0x4d, 0xb8, 0xe0, - 0x74, 0x22, 0x9f, 0x29, 0xd2, 0xcd, 0xf5, 0xbf, 0xee, 0xef, 0x10, 0x8f, 0xbd, 0x61, 0x15, 0x17, - 0x2f, 0x1f, 0x1e, 0x94, 0x2f, 0x2c, 0xf4, 0xc0, 0xc3, 0x3d, 0xa9, 0xa0, 0x3b, 0x30, 0x1a, 0xf9, - 0x2d, 0x66, 0x22, 0x4f, 0x59, 0x89, 0x47, 0xb2, 0x03, 0x01, 0xad, 0x2b, 0x34, 0x5d, 0x89, 0xa4, - 0xaa, 0x62, 0x9d, 0x0e, 0x5a, 0xe7, 0x7b, 0x8c, 0x85, 0x48, 0x25, 0xe1, 0xcc, 0xa3, 0xd9, 0x03, - 0xa3, 0x22, 0xa9, 0x9a, 0x5b, 0x50, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x1d, 0xa6, 0xdb, 0x81, 0xeb, - 0xb3, 0x85, 0xad, 0x1e, 0x31, 0x66, 0xcc, 0xc4, 0x0e, 0xb5, 0x24, 0x02, 0xee, 0xae, 0x43, 0x85, - 0x4c, 0x59, 0x38, 0x73, 0x9e, 0x67, 0x09, 0xe3, 0x8c, 0x37, 0x2f, 0xc3, 0x0a, 0x8a, 0x56, 0xd9, - 0xb9, 0xcc, 0xc5, 0xc1, 0x99, 0xd9, 0xec, 0x68, 0x0f, 0xba, 0xd8, 0xc8, 0xf9, 0x25, 0xf5, 0x17, - 0xc7, 0x14, 0xe8, 0xbd, 0x11, 0x6e, 0x3b, 0x01, 0xa9, 0x05, 0x7e, 0x83, 0x84, 0x5a, 0x54, 0xe6, - 0xc7, 0x78, 0x24, 0x47, 0x7a, 0x6f, 0xd4, 0xd3, 0x10, 0x70, 0x7a, 0x3d, 0xd4, 0xd4, 0x92, 0x63, - 0x53, 0x36, 0x34, 0x9c, 0xb9, 0xd0, 0xc3, 0xe0, 0x28, 0xc1, 0xb3, 0xc6, 0x6b, 0xd1, 0x28, 0x0e, - 0x71, 0x82, 0x26, 0xfa, 0x0e, 0x98, 0x12, 0x81, 0x8f, 0xe2, 0x71, 0xbf, 0x18, 0x5b, 0x32, 0xe2, - 0x04, 0x0c, 0x77, 0x61, 0xf3, 0x58, 0xd4, 0xce, 0x46, 0x8b, 0x88, 0x45, 0x78, 0xcb, 0xf5, 0x76, - 0xc2, 0x99, 0x4b, 0xec, 0xab, 0x45, 0x2c, 0xea, 0x24, 0x14, 0xa7, 0xd4, 0x98, 0xfd, 0x76, 0x98, - 0xee, 0xba, 0xb9, 0x8e, 0x15, 0xbf, 0xfd, 0x4f, 0x86, 0xa0, 0xa4, 0x94, 0xf2, 0x68, 0xde, 0x7c, - 0x6b, 0x39, 0x9f, 0x7c, 0x6b, 0x29, 0x52, 0xd9, 0x40, 0x7f, 0x5e, 0x59, 0x37, 0x0c, 0xf5, 0x72, - 0xd9, 0xd9, 0xd2, 0x74, 0xee, 0xbe, 0xaf, 0xd3, 0x9f, 0xa6, 0x63, 0xc9, 0x0f, 0xfc, 0x68, 0x53, - 0xe8, 0xa9, 0xb6, 0x19, 0x30, 0x59, 0x31, 0x7a, 0x82, 0x0a, 0x48, 0xcd, 0x6a, 0x2d, 0x99, 0xbd, - 0xb3, 0x46, 0x0b, 0x31, 0x87, 0x31, 0x41, 0x92, 0xb2, 0x59, 0x4c, 0x90, 0x1c, 0x79, 0x40, 0x41, - 0x52, 0x12, 0xc0, 0x31, 0x2d, 0xd4, 0x82, 0xe9, 0x86, 0x99, 0x78, 0x55, 0x39, 0xfa, 0x3d, 0xd1, - 0x37, 0x05, 0x6a, 0x47, 0xcb, 0x72, 0xb7, 0x94, 0xa4, 0x82, 0xbb, 0x09, 0xa3, 0x57, 0xa0, 0xf8, - 0x9e, 0x1f, 0xb2, 0x45, 0x29, 0x78, 0x0d, 0xe9, 0x10, 0x55, 0x7c, 0xe3, 0x76, 0x9d, 0x95, 0x1f, - 0x1d, 0x94, 0x47, 0x6b, 0x7e, 0x53, 0xfe, 0xc5, 0xaa, 0x02, 0xba, 0x0f, 0xe7, 0x8c, 0x13, 0x5a, - 0x75, 0x17, 0x06, 0xef, 0xee, 0x45, 0xd1, 0xdc, 0xb9, 0x6a, 0x1a, 0x25, 0x9c, 0xde, 0x00, 0x3d, - 0xf6, 0x3c, 0x5f, 0x24, 0x2d, 0x96, 0xfc, 0x0c, 0x63, 0x5b, 0x4a, 0xba, 0x3b, 0x7c, 0x02, 0x01, - 0x77, 0xd7, 0xb1, 0x7f, 0x99, 0xbf, 0x61, 0x08, 0x4d, 0x27, 0x09, 0x3b, 0xad, 0xd3, 0xc8, 0x89, - 0xb5, 0x6c, 0x28, 0x61, 0x1f, 0xf8, 0x9d, 0xec, 0xd7, 0x2d, 0xf6, 0x4e, 0xb6, 0x4e, 0x76, 0xdb, - 0x2d, 0x2a, 0x6f, 0x3f, 0xfc, 0x8e, 0xbf, 0x01, 0xc5, 0x48, 0xb4, 0xd6, 0x2b, 0x8d, 0x97, 0xd6, - 0x29, 0xf6, 0x56, 0xa8, 0x38, 0x1d, 0x59, 0x8a, 0x15, 0x19, 0xfb, 0x9f, 0xf2, 0x19, 0x90, 0x90, - 0x53, 0x50, 0x88, 0x55, 0x4c, 0x85, 0x58, 0xb9, 0xcf, 0x17, 0x64, 0x28, 0xc6, 0xfe, 0x89, 0xd9, - 0x6f, 0x26, 0x54, 0x7e, 0xd4, 0x1f, 0x68, 0xed, 0x1f, 0xb6, 0xe0, 0x6c, 0x9a, 0x45, 0x13, 0xe5, - 0x4e, 0xb9, 0x48, 0xab, 0x1e, 0xac, 0xd5, 0x08, 0xde, 0x15, 0xe5, 0x58, 0x61, 0x0c, 0x9c, 0x21, - 0xe3, 0x78, 0x11, 0xe3, 0x6e, 0xc3, 0x78, 0x2d, 0x20, 0xda, 0x1d, 0xf0, 0x1a, 0xf7, 0xac, 0xe3, - 0xfd, 0x79, 0xe6, 0xd8, 0x5e, 0x75, 0xf6, 0xcf, 0xe4, 0xe0, 0x2c, 0x7f, 0x71, 0x5a, 0xd8, 0xf3, - 0xdd, 0x66, 0xcd, 0x6f, 0x8a, 0xec, 0x26, 0x6f, 0xc1, 0x58, 0x5b, 0xd3, 0x43, 0xf4, 0x8a, 0x59, - 0xa5, 0xeb, 0x2b, 0x62, 0x79, 0x50, 0x2f, 0xc5, 0x06, 0x2d, 0xd4, 0x84, 0x31, 0xb2, 0xe7, 0x36, - 0xd4, 0xb3, 0x45, 0xee, 0xd8, 0x77, 0x83, 0x6a, 0x65, 0x59, 0xa3, 0x83, 0x0d, 0xaa, 0x0f, 0x21, - 0xe1, 0x9d, 0xfd, 0x23, 0x16, 0x3c, 0x9a, 0x11, 0xe1, 0x8a, 0x36, 0x77, 0x8f, 0xbd, 0xed, 0x89, - 0xdc, 0x59, 0xaa, 0x39, 0xfe, 0xe2, 0x87, 0x05, 0x14, 0x7d, 0x0e, 0x80, 0xbf, 0xd8, 0x51, 0xf1, - 0xa8, 0x5f, 0x28, 0x20, 0x23, 0x8a, 0x89, 0x16, 0x7d, 0x42, 0xd6, 0xc7, 0x1a, 0x2d, 0xfb, 0x27, - 0xf3, 0x30, 0xc4, 0x5e, 0x88, 0xd0, 0x0a, 0x8c, 0x6c, 0xf3, 0x98, 0xcf, 0x83, 0x84, 0x97, 0x8e, - 0xe5, 0x4c, 0x5e, 0x80, 0x65, 0x65, 0xb4, 0x0a, 0x67, 0x78, 0xcc, 0xec, 0x56, 0x85, 0xb4, 0x9c, - 0x7d, 0xa9, 0xae, 0xe0, 0xf9, 0xa6, 0x54, 0x24, 0x8d, 0x6a, 0x37, 0x0a, 0x4e, 0xab, 0x87, 0x5e, - 0x83, 0x09, 0xca, 0xdf, 0xf9, 0x9d, 0x48, 0x52, 0xe2, 0xd1, 0xb2, 0x15, 0x43, 0xb9, 0x6e, 0x40, - 0x71, 0x02, 0x9b, 0x0a, 0x5e, 0xed, 0x2e, 0xc5, 0xcc, 0x50, 0x2c, 0x78, 0x99, 0xca, 0x18, 0x13, - 0x97, 0x99, 0x32, 0x75, 0x98, 0xe1, 0xd6, 0xfa, 0x76, 0x40, 0xc2, 0x6d, 0xbf, 0xd5, 0x14, 0xe9, - 0xca, 0x63, 0x53, 0xa6, 0x04, 0x1c, 0x77, 0xd5, 0xa0, 0x54, 0x36, 0x1d, 0xb7, 0xd5, 0x09, 0x48, - 0x4c, 0x65, 0xd8, 0xa4, 0xb2, 0x92, 0x80, 0xe3, 0xae, 0x1a, 0x74, 0x1d, 0x9d, 0x13, 0xf9, 0xc3, - 0xa5, 0x7f, 0xbf, 0xb2, 0x4f, 0x1b, 0x91, 0x9e, 0x4e, 0x3d, 0x02, 0xdc, 0x08, 0x0b, 0x1e, 0x95, - 0x81, 0x5c, 0xd3, 0x27, 0x0a, 0x1f, 0x27, 0x49, 0xe5, 0x41, 0xb2, 0x58, 0xff, 0x40, 0x0e, 0xce, - 0xa4, 0xd8, 0xc1, 0xf2, 0xa3, 0x6a, 0xcb, 0x0d, 0x23, 0x95, 0x53, 0x47, 0x3b, 0xaa, 0x78, 0x39, - 0x56, 0x18, 0x74, 0x3f, 0xf0, 0xc3, 0x30, 0x79, 0x00, 0x0a, 0x3b, 0x33, 0x01, 0x3d, 0x66, 0x76, - 0x9a, 0xcb, 0x50, 0xe8, 0x84, 0x44, 0x86, 0xa6, 0x52, 0xe7, 0x37, 0xd3, 0x30, 0x33, 0x08, 0x65, - 0x4d, 0xb7, 0x94, 0x72, 0x57, 0x63, 0x4d, 0xb9, 0xc6, 0x96, 0xc3, 0x68, 0xe7, 0x22, 0xe2, 0x39, - 0x5e, 0x24, 0x18, 0xd8, 0x38, 0xa0, 0x0a, 0x2b, 0xc5, 0x02, 0x6a, 0x7f, 0x25, 0x0f, 0xe7, 0x33, - 0x2d, 0xe3, 0x69, 0xd7, 0x77, 0x7d, 0xcf, 0x8d, 0x7c, 0xf5, 0x4a, 0xc9, 0x83, 0xa8, 0x90, 0xf6, - 0xf6, 0xaa, 0x28, 0xc7, 0x0a, 0x03, 0x5d, 0x91, 0x19, 0xef, 0x93, 0xd9, 0x85, 0x16, 0x2b, 0x46, - 0xd2, 0xfb, 0x41, 0x33, 0xb7, 0x3d, 0x01, 0x85, 0xb6, 0xef, 0xb7, 0x92, 0x87, 0x16, 0xed, 0xae, - 0xef, 0xb7, 0x30, 0x03, 0xa2, 0x4f, 0x88, 0xf1, 0x4a, 0x3c, 0xcb, 0x61, 0xa7, 0xe9, 0x87, 0xda, - 0xa0, 0x3d, 0x05, 0x23, 0x3b, 0x64, 0x3f, 0x70, 0xbd, 0xad, 0xe4, 0x73, 0xed, 0x4d, 0x5e, 0x8c, - 0x25, 0xdc, 0xcc, 0x35, 0x31, 0x72, 0xd2, 0x29, 0xd7, 0x8a, 0x7d, 0xaf, 0xc0, 0x1f, 0xcc, 0xc3, - 0x24, 0x5e, 0xac, 0x7c, 0x6b, 0x22, 0xee, 0x74, 0x4f, 0xc4, 0x49, 0xa7, 0x5c, 0xeb, 0x3f, 0x1b, - 0xbf, 0x60, 0xc1, 0x24, 0x8b, 0xc7, 0x2c, 0x42, 0x77, 0xb8, 0xbe, 0x77, 0x0a, 0x2c, 0xde, 0x13, - 0x30, 0x14, 0xd0, 0x46, 0x93, 0x69, 0x85, 0x58, 0x4f, 0x30, 0x87, 0xa1, 0x0b, 0x50, 0x60, 0x5d, - 0xa0, 0x93, 0x37, 0xc6, 0x33, 0x32, 0x54, 0x9c, 0xc8, 0xc1, 0xac, 0x94, 0xf9, 0xa3, 0x63, 0xd2, - 0x6e, 0xb9, 0xbc, 0xd3, 0xf1, 0x13, 0xc8, 0x47, 0xc3, 0x1f, 0x3d, 0xb5, 0x6b, 0x1f, 0xcc, 0x1f, - 0x3d, 0x9d, 0x64, 0x6f, 0xf1, 0xe9, 0x0f, 0x73, 0x70, 0x29, 0xb5, 0xde, 0xc0, 0xfe, 0xe8, 0xbd, - 0x6b, 0x9f, 0x8c, 0xd5, 0x4d, 0xba, 0x31, 0x4c, 0xfe, 0x14, 0x8d, 0x61, 0x0a, 0x83, 0x72, 0x98, - 0x43, 0x03, 0xb8, 0x89, 0xa7, 0x0e, 0xd9, 0x47, 0xc4, 0x4d, 0x3c, 0xb5, 0x6f, 0x19, 0xe2, 0xdf, - 0x9f, 0xe5, 0x32, 0xbe, 0x85, 0x09, 0x82, 0x57, 0xe9, 0x39, 0xc3, 0x80, 0xa1, 0xe0, 0x98, 0xc7, - 0xf8, 0x19, 0xc3, 0xcb, 0xb0, 0x82, 0x22, 0x57, 0x73, 0xb8, 0xce, 0x65, 0x67, 0xd9, 0xcc, 0x6c, - 0x6a, 0xce, 0x7c, 0xb1, 0x52, 0x43, 0x90, 0xe2, 0x7c, 0xbd, 0xaa, 0x09, 0xef, 0xf9, 0xc1, 0x85, - 0xf7, 0xb1, 0x74, 0xc1, 0x1d, 0x2d, 0xc0, 0xe4, 0xae, 0xeb, 0xd1, 0x63, 0x73, 0xdf, 0x64, 0x59, - 0x55, 0xfc, 0x91, 0x55, 0x13, 0x8c, 0x93, 0xf8, 0xb3, 0xaf, 0xc0, 0xf8, 0x83, 0xab, 0x2d, 0xbf, - 0x91, 0x87, 0xc7, 0x7a, 0x6c, 0x7b, 0x7e, 0xd6, 0x1b, 0x73, 0xa0, 0x9d, 0xf5, 0x5d, 0xf3, 0x50, - 0x83, 0xb3, 0x9b, 0x9d, 0x56, 0x6b, 0x9f, 0xd9, 0x9b, 0x92, 0xa6, 0xc4, 0x10, 0x3c, 0xe5, 0x05, - 0x99, 0x03, 0x63, 0x25, 0x05, 0x07, 0xa7, 0xd6, 0x44, 0xaf, 0x03, 0xf2, 0x45, 0x8a, 0xdf, 0xeb, - 0xc4, 0x13, 0xef, 0x00, 0x6c, 0xe0, 0xf3, 0xf1, 0x66, 0xbc, 0xdd, 0x85, 0x81, 0x53, 0x6a, 0x51, - 0xe1, 0x80, 0xde, 0x4a, 0xfb, 0xaa, 0x5b, 0x09, 0xe1, 0x00, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0xeb, - 0x30, 0xed, 0xec, 0x39, 0x2e, 0x8f, 0xcb, 0x27, 0x09, 0x70, 0xe9, 0x40, 0x29, 0xcb, 0x16, 0x92, - 0x08, 0xb8, 0xbb, 0x4e, 0xc2, 0x25, 0x7b, 0x38, 0xdb, 0x25, 0xbb, 0xf7, 0xb9, 0xd8, 0x4f, 0xf7, - 0x6b, 0xff, 0x27, 0x8b, 0x5e, 0x5f, 0x29, 0x69, 0xfa, 0xe9, 0x38, 0x28, 0x1d, 0xa6, 0xe6, 0x1d, - 0x7d, 0x4e, 0xb3, 0x28, 0x89, 0x81, 0xd8, 0xc4, 0xe5, 0x0b, 0x22, 0x8c, 0x9d, 0x72, 0x0c, 0x16, - 0x5f, 0x44, 0x57, 0x50, 0x18, 0xe8, 0xf3, 0x30, 0xd2, 0x74, 0xf7, 0xdc, 0xd0, 0x0f, 0xc4, 0x66, - 0x39, 0xa6, 0x6b, 0x43, 0x7c, 0x0e, 0x56, 0x38, 0x19, 0x2c, 0xe9, 0xd9, 0x3f, 0x98, 0x83, 0x71, - 0xd9, 0xe2, 0x1b, 0x1d, 0x3f, 0x72, 0x4e, 0xe1, 0x5a, 0xbe, 0x6e, 0x5c, 0xcb, 0x9f, 0xe8, 0x15, - 0x62, 0x82, 0x75, 0x29, 0xf3, 0x3a, 0xbe, 0x9d, 0xb8, 0x8e, 0x9f, 0xec, 0x4f, 0xaa, 0xf7, 0x35, - 0xfc, 0xcf, 0x2c, 0x98, 0x36, 0xf0, 0x4f, 0xe1, 0x36, 0x58, 0x31, 0x6f, 0x83, 0xc7, 0xfb, 0x7e, - 0x43, 0xc6, 0x2d, 0xf0, 0x7d, 0xf9, 0x44, 0xdf, 0xd9, 0xe9, 0xff, 0x1e, 0x14, 0xb6, 0x9d, 0xa0, - 0xd9, 0x2b, 0x94, 0x6d, 0x57, 0xa5, 0xb9, 0x1b, 0x4e, 0xd0, 0xe4, 0x67, 0xf8, 0x33, 0x2a, 0x4f, - 0xa6, 0x13, 0x34, 0xfb, 0xfa, 0xa0, 0xb1, 0xa6, 0xd0, 0xcb, 0x30, 0x1c, 0x36, 0xfc, 0xb6, 0xb2, - 0x10, 0xbd, 0xcc, 0x73, 0x68, 0xd2, 0x92, 0xa3, 0x83, 0x32, 0x32, 0x9b, 0xa3, 0xc5, 0x58, 0xe0, - 0xa3, 0xb7, 0x60, 0x9c, 0xfd, 0x52, 0x96, 0x12, 0xf9, 0xec, 0x04, 0x0a, 0x75, 0x1d, 0x91, 0x1b, - 0xdc, 0x18, 0x45, 0xd8, 0x24, 0x35, 0xbb, 0x05, 0x25, 0xf5, 0x59, 0x0f, 0xd5, 0x77, 0xe8, 0xdf, - 0xe7, 0xe1, 0x4c, 0xca, 0x9a, 0x43, 0xa1, 0x31, 0x13, 0xcf, 0x0d, 0xb8, 0x54, 0x3f, 0xe0, 0x5c, - 0x84, 0x4c, 0x1a, 0x6a, 0x8a, 0xb5, 0x35, 0x70, 0xa3, 0x77, 0x42, 0x92, 0x6c, 0x94, 0x16, 0xf5, - 0x6f, 0x94, 0x36, 0x76, 0x6a, 0x43, 0x4d, 0x1b, 0x52, 0x3d, 0x7d, 0xa8, 0x73, 0xfa, 0xc7, 0x79, - 0x38, 0x9b, 0x16, 0xf5, 0x06, 0x7d, 0x77, 0x22, 0x99, 0xce, 0x0b, 0x83, 0xc6, 0xcb, 0xe1, 0x19, - 0x76, 0x44, 0x2e, 0xec, 0x39, 0x33, 0xbd, 0x4e, 0xdf, 0x61, 0x16, 0x6d, 0x32, 0x87, 0xd3, 0x80, - 0x27, 0x41, 0x92, 0xc7, 0xc7, 0xa7, 0x07, 0xee, 0x80, 0xc8, 0x9e, 0x14, 0x26, 0x1c, 0x4e, 0x65, - 0x71, 0x7f, 0x87, 0x53, 0xd9, 0xf2, 0xac, 0x0b, 0xa3, 0xda, 0xd7, 0x3c, 0xd4, 0x19, 0xdf, 0xa1, - 0xb7, 0x95, 0xd6, 0xef, 0x87, 0x3a, 0xeb, 0x3f, 0x62, 0x41, 0xc2, 0x1c, 0x53, 0xa9, 0xc5, 0xac, - 0x4c, 0xb5, 0xd8, 0x65, 0x28, 0x04, 0x7e, 0x8b, 0x24, 0x73, 0xd7, 0x60, 0xbf, 0x45, 0x30, 0x83, - 0x50, 0x8c, 0x28, 0x56, 0x76, 0x8c, 0xe9, 0x82, 0x9c, 0x10, 0xd1, 0x9e, 0x80, 0xa1, 0x16, 0xd9, - 0x23, 0xad, 0x64, 0x60, 0xf8, 0x5b, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x0b, 0x05, 0xb8, 0xd8, 0xd3, - 0x65, 0x9b, 0x8a, 0x43, 0x5b, 0x4e, 0x44, 0xee, 0x39, 0xfb, 0xc9, 0x08, 0xce, 0xd7, 0x79, 0x31, - 0x96, 0x70, 0x66, 0xa1, 0xce, 0x23, 0x36, 0x26, 0x94, 0x88, 0x22, 0x50, 0xa3, 0x80, 0x9a, 0x4a, - 0xa9, 0xfc, 0x49, 0x28, 0xa5, 0xae, 0x01, 0x84, 0x61, 0x8b, 0xdb, 0x17, 0x34, 0x85, 0xe9, 0x7b, - 0x1c, 0xd9, 0xb3, 0x7e, 0x4b, 0x40, 0xb0, 0x86, 0x85, 0x2a, 0x30, 0xd5, 0x0e, 0xfc, 0x88, 0xeb, - 0x64, 0x2b, 0xdc, 0x30, 0x69, 0xc8, 0xf4, 0x96, 0xad, 0x25, 0xe0, 0xb8, 0xab, 0x06, 0x7a, 0x11, - 0x46, 0x85, 0x07, 0x6d, 0xcd, 0xf7, 0x5b, 0x42, 0x0d, 0xa4, 0xcc, 0x5c, 0xea, 0x31, 0x08, 0xeb, - 0x78, 0x5a, 0x35, 0xa6, 0xe8, 0x1d, 0x49, 0xad, 0xc6, 0x95, 0xbd, 0x1a, 0x5e, 0x22, 0x02, 0x56, - 0x71, 0xa0, 0x08, 0x58, 0xb1, 0x62, 0xac, 0x34, 0xf0, 0xdb, 0x16, 0xf4, 0x55, 0x25, 0xfd, 0x6c, - 0x01, 0xce, 0x88, 0x85, 0xf3, 0xb0, 0x97, 0xcb, 0x9d, 0xee, 0xe5, 0x72, 0x12, 0xaa, 0xb3, 0x6f, - 0xad, 0x99, 0xd3, 0x5e, 0x33, 0x3f, 0x64, 0x81, 0xc9, 0x5e, 0xa1, 0xff, 0x2f, 0x33, 0x04, 0xfe, - 0x8b, 0x99, 0xec, 0x5a, 0x53, 0x5e, 0x20, 0x1f, 0x30, 0x18, 0xbe, 0xfd, 0x1f, 0x2d, 0x78, 0xbc, - 0x2f, 0x45, 0xb4, 0x0c, 0x25, 0xc6, 0x03, 0x6a, 0xd2, 0xd9, 0x93, 0xca, 0x70, 0x51, 0x02, 0x32, - 0x58, 0xd2, 0xb8, 0x26, 0x5a, 0xee, 0xca, 0x35, 0xf0, 0x54, 0x4a, 0xae, 0x81, 0x73, 0xc6, 0xf0, - 0x3c, 0x60, 0xb2, 0x81, 0x5f, 0xce, 0xc3, 0x30, 0x5f, 0xf1, 0xa7, 0x20, 0x86, 0xad, 0x08, 0xbd, - 0x6d, 0x8f, 0x18, 0x58, 0xbc, 0x2f, 0x73, 0x15, 0x27, 0x72, 0x38, 0x9b, 0xa0, 0x6e, 0xab, 0x58, - 0xc3, 0x8b, 0xe6, 0x8c, 0xfb, 0x6c, 0x36, 0xa1, 0x98, 0x04, 0x4e, 0x43, 0xbb, 0xdd, 0xbe, 0x08, - 0x10, 0xb2, 0x3c, 0xfd, 0x94, 0x86, 0x88, 0xa6, 0xf6, 0xc9, 0x1e, 0xad, 0xd7, 0x15, 0x32, 0xef, - 0x43, 0xbc, 0xd3, 0x15, 0x00, 0x6b, 0x14, 0x67, 0x5f, 0x82, 0x92, 0x42, 0xee, 0xa7, 0xc5, 0x19, - 0xd3, 0x99, 0x8b, 0xcf, 0xc2, 0x64, 0xa2, 0xad, 0x63, 0x29, 0x81, 0x7e, 0xd1, 0x82, 0x49, 0xde, - 0xe5, 0x65, 0x6f, 0x4f, 0x9c, 0xa9, 0xef, 0xc3, 0xd9, 0x56, 0xca, 0xd9, 0x26, 0x66, 0x74, 0xf0, - 0xb3, 0x50, 0x29, 0x7d, 0xd2, 0xa0, 0x38, 0xb5, 0x0d, 0x74, 0x95, 0xae, 0x5b, 0x7a, 0x76, 0x39, - 0x2d, 0xe1, 0xed, 0x34, 0xc6, 0xd7, 0x2c, 0x2f, 0xc3, 0x0a, 0x6a, 0xff, 0xb6, 0x05, 0xd3, 0xbc, - 0xe7, 0x37, 0xc9, 0xbe, 0xda, 0xe1, 0x1f, 0x66, 0xdf, 0x45, 0xfa, 0x8f, 0x5c, 0x46, 0xfa, 0x0f, - 0xfd, 0xd3, 0xf2, 0x3d, 0x3f, 0xed, 0x67, 0x2c, 0x10, 0x2b, 0xf0, 0x14, 0x44, 0xf9, 0x6f, 0x37, - 0x45, 0xf9, 0xd9, 0xec, 0x45, 0x9d, 0x21, 0xc3, 0xff, 0xa9, 0x05, 0x53, 0x1c, 0x21, 0x7e, 0x73, - 0xfe, 0x50, 0xe7, 0x61, 0x90, 0x3c, 0x7e, 0x2a, 0xb9, 0x77, 0xfa, 0x47, 0x19, 0x93, 0x55, 0xe8, - 0x39, 0x59, 0x4d, 0xb9, 0x81, 0x8e, 0x91, 0xc3, 0xf2, 0xd8, 0x61, 0xb4, 0xed, 0x3f, 0xb0, 0x00, - 0xf1, 0x66, 0x0c, 0xf6, 0x87, 0x32, 0x15, 0xac, 0x54, 0xbb, 0x2e, 0xe2, 0xa3, 0x46, 0x41, 0xb0, - 0x86, 0x75, 0x22, 0xc3, 0x93, 0x30, 0x1c, 0xc8, 0xf7, 0x37, 0x1c, 0x38, 0xc6, 0x88, 0xfe, 0xef, - 0x02, 0x24, 0xdd, 0x0f, 0xd0, 0x5d, 0x18, 0x6b, 0x38, 0x6d, 0x67, 0xc3, 0x6d, 0xb9, 0x91, 0x4b, - 0xc2, 0x5e, 0x16, 0x47, 0x4b, 0x1a, 0x9e, 0x78, 0xea, 0xd5, 0x4a, 0xb0, 0x41, 0x07, 0xcd, 0x01, - 0xb4, 0x03, 0x77, 0xcf, 0x6d, 0x91, 0x2d, 0xa6, 0x71, 0x60, 0xfe, 0x95, 0xdc, 0x8c, 0x46, 0x96, - 0x62, 0x0d, 0x23, 0xc5, 0x55, 0x2e, 0xff, 0xf0, 0x5c, 0xe5, 0x0a, 0xc7, 0x74, 0x95, 0x1b, 0x1a, - 0xc8, 0x55, 0x0e, 0xc3, 0x23, 0x92, 0x45, 0xa2, 0xff, 0x57, 0xdc, 0x16, 0x11, 0x7c, 0x31, 0xf7, - 0xba, 0x9c, 0x3d, 0x3c, 0x28, 0x3f, 0x82, 0x53, 0x31, 0x70, 0x46, 0x4d, 0xf4, 0x39, 0x98, 0x71, - 0x5a, 0x2d, 0xff, 0x9e, 0x1a, 0xb5, 0xe5, 0xb0, 0xe1, 0xb4, 0xb8, 0xc6, 0x7e, 0x84, 0x51, 0xbd, - 0x70, 0x78, 0x50, 0x9e, 0x59, 0xc8, 0xc0, 0xc1, 0x99, 0xb5, 0x13, 0x9e, 0x76, 0xc5, 0xbe, 0x9e, - 0x76, 0xaf, 0x42, 0xa9, 0x1d, 0xf8, 0x8d, 0x55, 0xcd, 0xfb, 0xe7, 0x12, 0xcb, 0x90, 0x2f, 0x0b, - 0x8f, 0x0e, 0xca, 0xe3, 0xea, 0x0f, 0xbb, 0xe1, 0xe3, 0x0a, 0xf6, 0x0e, 0x9c, 0xa9, 0x93, 0xc0, - 0x65, 0xb9, 0x37, 0x9b, 0xf1, 0x86, 0x5e, 0x87, 0x52, 0x90, 0x38, 0xc2, 0x06, 0x0a, 0xe4, 0xa4, - 0xc5, 0x17, 0x96, 0x47, 0x56, 0x4c, 0xc8, 0xfe, 0x13, 0x0b, 0x46, 0x84, 0x25, 0xfa, 0x29, 0x70, - 0x4e, 0x0b, 0x86, 0x02, 0xbb, 0x9c, 0x7e, 0xcc, 0xb3, 0xce, 0x64, 0xaa, 0xae, 0xab, 0x09, 0xd5, - 0xf5, 0xe3, 0xbd, 0x88, 0xf4, 0x56, 0x5a, 0xff, 0xad, 0x3c, 0x4c, 0x98, 0xce, 0x23, 0xa7, 0x30, - 0x04, 0x6b, 0x30, 0x12, 0x0a, 0x4f, 0xa5, 0x5c, 0xb6, 0x85, 0x75, 0x72, 0x12, 0x63, 0xf3, 0x29, - 0xe1, 0x9b, 0x24, 0x89, 0xa4, 0xba, 0x40, 0xe5, 0x1f, 0xa2, 0x0b, 0x54, 0x3f, 0xff, 0x9d, 0xc2, - 0x49, 0xf8, 0xef, 0xd8, 0x5f, 0x63, 0x57, 0x8d, 0x5e, 0x7e, 0x0a, 0x5c, 0xc8, 0x75, 0xf3, 0x52, - 0xb2, 0x7b, 0xac, 0x2c, 0xd1, 0xa9, 0x0c, 0x6e, 0xe4, 0xe7, 0x2d, 0xb8, 0x98, 0xf2, 0x55, 0x1a, - 0x6b, 0xf2, 0x0c, 0x14, 0x9d, 0x4e, 0xd3, 0x55, 0x7b, 0x59, 0x7b, 0xc6, 0x5a, 0x10, 0xe5, 0x58, - 0x61, 0xa0, 0x25, 0x98, 0x26, 0xf7, 0xdb, 0x2e, 0x7f, 0x47, 0xd4, 0x6d, 0x1c, 0xf3, 0x3c, 0xb8, - 0xed, 0x72, 0x12, 0x88, 0xbb, 0xf1, 0x95, 0xfb, 0x77, 0x3e, 0xd3, 0xfd, 0xfb, 0x1f, 0x5a, 0x30, - 0xaa, 0xbc, 0x52, 0x1e, 0xfa, 0x68, 0x7f, 0x87, 0x39, 0xda, 0x8f, 0xf5, 0x18, 0xed, 0x8c, 0x61, - 0xfe, 0x3b, 0x39, 0xd5, 0xdf, 0x9a, 0x1f, 0x44, 0x03, 0xb0, 0x3c, 0x2f, 0x43, 0xb1, 0x1d, 0xf8, - 0x91, 0xdf, 0xf0, 0x5b, 0x82, 0xe3, 0xb9, 0x10, 0x47, 0x27, 0xe0, 0xe5, 0x47, 0xda, 0x6f, 0xac, - 0xb0, 0xd9, 0xe8, 0xf9, 0x41, 0x24, 0xb8, 0x8c, 0x78, 0xf4, 0xfc, 0x20, 0xc2, 0x0c, 0x82, 0x9a, - 0x00, 0x91, 0x13, 0x6c, 0x91, 0x88, 0x96, 0x89, 0x40, 0x27, 0xd9, 0x87, 0x47, 0x27, 0x72, 0x5b, - 0x73, 0xae, 0x17, 0x85, 0x51, 0x30, 0x57, 0xf5, 0xa2, 0xdb, 0x01, 0x17, 0xa0, 0xb4, 0x70, 0x03, - 0x8a, 0x16, 0xd6, 0xe8, 0x4a, 0x9f, 0x50, 0xd6, 0xc6, 0x90, 0xf9, 0x20, 0xbe, 0x26, 0xca, 0xb1, - 0xc2, 0xb0, 0x5f, 0x62, 0x57, 0x09, 0x1b, 0xa0, 0xe3, 0x45, 0x02, 0xf8, 0x7a, 0x51, 0x0d, 0x2d, - 0x7b, 0x0d, 0xab, 0xe8, 0xf1, 0x06, 0x7a, 0x9f, 0xdc, 0xb4, 0x61, 0xdd, 0xdf, 0x26, 0x0e, 0x4a, - 0x80, 0xbe, 0xb3, 0xcb, 0x4e, 0xe2, 0xd9, 0x3e, 0x57, 0xc0, 0x31, 0x2c, 0x23, 0x58, 0xc0, 0x6d, - 0x16, 0x8e, 0xb8, 0x5a, 0x13, 0x8b, 0x5c, 0x0b, 0xb8, 0x2d, 0x00, 0x38, 0xc6, 0x41, 0xf3, 0x42, - 0xfc, 0x2e, 0x18, 0x69, 0xf7, 0xa4, 0xf8, 0x2d, 0x3f, 0x5f, 0x93, 0xbf, 0x9f, 0x83, 0x51, 0x95, - 0x7e, 0xaf, 0xc6, 0xb3, 0x98, 0x89, 0xb0, 0x2f, 0xcb, 0x71, 0x31, 0xd6, 0x71, 0xd0, 0x3a, 0x4c, - 0x86, 0x5c, 0xf7, 0xa2, 0xa2, 0xfb, 0x71, 0x1d, 0xd6, 0x27, 0xa5, 0x7d, 0x45, 0xdd, 0x04, 0x1f, - 0xb1, 0x22, 0x7e, 0x74, 0x48, 0xc7, 0xce, 0x24, 0x09, 0xf4, 0x1a, 0x4c, 0xb4, 0xf4, 0x44, 0xf7, - 0x35, 0xa1, 0xe2, 0x52, 0x66, 0xca, 0x46, 0x1a, 0xfc, 0x1a, 0x4e, 0x60, 0x53, 0x4e, 0x49, 0x2f, - 0x11, 0x11, 0x29, 0x1d, 0x6f, 0x8b, 0x84, 0x22, 0x79, 0x18, 0xe3, 0x94, 0x6e, 0x65, 0xe0, 0xe0, - 0xcc, 0xda, 0xe8, 0x65, 0x18, 0x93, 0x9f, 0xaf, 0xb9, 0x2d, 0xc7, 0xc6, 0xf0, 0x1a, 0x0c, 0x1b, - 0x98, 0xe8, 0x1e, 0x9c, 0x93, 0xff, 0xd7, 0x03, 0x67, 0x73, 0xd3, 0x6d, 0x08, 0xaf, 0x71, 0xee, - 0x11, 0xb4, 0x20, 0x5d, 0x8c, 0x96, 0xd3, 0x90, 0x8e, 0x0e, 0xca, 0x97, 0xc5, 0xa8, 0xa5, 0xc2, - 0xd9, 0x24, 0xa6, 0xd3, 0x47, 0xab, 0x70, 0x66, 0x9b, 0x38, 0xad, 0x68, 0x7b, 0x69, 0x9b, 0x34, - 0x76, 0xe4, 0x26, 0x62, 0xce, 0xd0, 0x9a, 0x09, 0xf9, 0x8d, 0x6e, 0x14, 0x9c, 0x56, 0x0f, 0xbd, - 0x0d, 0x33, 0xed, 0xce, 0x46, 0xcb, 0x0d, 0xb7, 0xd7, 0xfc, 0x88, 0x99, 0x74, 0xa8, 0xec, 0x75, - 0xc2, 0x6b, 0x5a, 0x39, 0x82, 0xd7, 0x32, 0xf0, 0x70, 0x26, 0x05, 0xf4, 0x3e, 0x9c, 0x4b, 0x2c, - 0x06, 0xe1, 0xc3, 0x39, 0x91, 0x1d, 0xdf, 0xb7, 0x9e, 0x56, 0x41, 0xf8, 0x64, 0xa6, 0x81, 0x70, - 0x7a, 0x13, 0x1f, 0xcc, 0xd0, 0xe7, 0x3d, 0x5a, 0x59, 0x63, 0xca, 0xd0, 0x3b, 0x30, 0xa6, 0xaf, - 0x22, 0x71, 0xc1, 0x5c, 0x49, 0xe7, 0x59, 0xb4, 0xd5, 0xc6, 0x59, 0x3a, 0xb5, 0xa2, 0x74, 0x18, - 0x36, 0x28, 0xda, 0x04, 0xd2, 0xbf, 0x0f, 0xdd, 0x82, 0x62, 0xa3, 0xe5, 0x12, 0x2f, 0xaa, 0xd6, - 0x7a, 0x05, 0x19, 0x59, 0x12, 0x38, 0x62, 0xc0, 0x44, 0x40, 0x54, 0x5e, 0x86, 0x15, 0x05, 0xfb, - 0xd7, 0x72, 0x50, 0xee, 0x13, 0x5d, 0x37, 0xa1, 0x8f, 0xb6, 0x06, 0xd2, 0x47, 0x2f, 0xc8, 0x5c, - 0x7c, 0x6b, 0x09, 0x21, 0x3d, 0x91, 0x67, 0x2f, 0x16, 0xd5, 0x93, 0xf8, 0x03, 0xdb, 0x07, 0xeb, - 0x2a, 0xed, 0x42, 0x5f, 0x0b, 0x77, 0xe3, 0x29, 0x6b, 0x68, 0x70, 0x41, 0x24, 0xf3, 0x59, 0xc2, - 0xfe, 0x5a, 0x0e, 0xce, 0xa9, 0x21, 0xfc, 0xe6, 0x1d, 0xb8, 0x3b, 0xdd, 0x03, 0x77, 0x02, 0x8f, - 0x3a, 0xf6, 0x6d, 0x18, 0xe6, 0x41, 0x5a, 0x06, 0x60, 0x80, 0x9e, 0x30, 0x23, 0x7a, 0xa9, 0x6b, - 0xda, 0x88, 0xea, 0xf5, 0x97, 0x2d, 0x98, 0x5c, 0x5f, 0xaa, 0xd5, 0xfd, 0xc6, 0x0e, 0x89, 0x16, - 0x38, 0xc3, 0x8a, 0x05, 0xff, 0x63, 0x3d, 0x20, 0x5f, 0x93, 0xc6, 0x31, 0x5d, 0x86, 0xc2, 0xb6, - 0x1f, 0x46, 0xc9, 0x17, 0xdf, 0x1b, 0x7e, 0x18, 0x61, 0x06, 0xb1, 0x7f, 0xc7, 0x82, 0x21, 0x96, - 0x41, 0xb6, 0x5f, 0x5a, 0xe3, 0x41, 0xbe, 0x0b, 0xbd, 0x08, 0xc3, 0x64, 0x73, 0x93, 0x34, 0x22, - 0x31, 0xab, 0xd2, 0x6d, 0x75, 0x78, 0x99, 0x95, 0xd2, 0x4b, 0x9f, 0x35, 0xc6, 0xff, 0x62, 0x81, - 0x8c, 0xde, 0x84, 0x52, 0xe4, 0xee, 0x92, 0x85, 0x66, 0x53, 0xbc, 0x99, 0x3d, 0x80, 0x97, 0xf0, - 0xba, 0x24, 0x80, 0x63, 0x5a, 0xf6, 0x57, 0x72, 0x00, 0x71, 0xa8, 0x81, 0x7e, 0x9f, 0xb8, 0xd8, - 0xf5, 0x9a, 0x72, 0x25, 0xe5, 0x35, 0x05, 0xc5, 0x04, 0x53, 0x9e, 0x52, 0xd4, 0x30, 0xe5, 0x07, - 0x1a, 0xa6, 0xc2, 0x71, 0x86, 0x69, 0x09, 0xa6, 0xe3, 0x50, 0x09, 0x66, 0xdc, 0x18, 0x26, 0xa4, - 0xac, 0x27, 0x81, 0xb8, 0x1b, 0xdf, 0x26, 0x70, 0x59, 0x46, 0xf0, 0x94, 0x77, 0x0d, 0x33, 0xc9, - 0x3c, 0x46, 0x86, 0xeb, 0xf8, 0xb9, 0x28, 0x97, 0xf9, 0x5c, 0xf4, 0xe3, 0x16, 0x9c, 0x4d, 0xb6, - 0xc3, 0x7c, 0xe4, 0xbe, 0x6c, 0xc1, 0x39, 0xf6, 0x68, 0xc6, 0x5a, 0xed, 0x7e, 0xa2, 0x7b, 0x21, - 0x3d, 0x84, 0x44, 0xef, 0x1e, 0xc7, 0xfe, 0xd1, 0xab, 0x69, 0xa4, 0x71, 0x7a, 0x8b, 0xf6, 0x97, - 0x2d, 0x38, 0x9f, 0x99, 0xb8, 0x08, 0x5d, 0x85, 0xa2, 0xd3, 0x76, 0xb9, 0x46, 0x4a, 0xec, 0x77, - 0x26, 0x3d, 0xd6, 0xaa, 0x5c, 0x1f, 0xa5, 0xa0, 0x2a, 0xa1, 0x62, 0x2e, 0x33, 0xa1, 0x62, 0xdf, - 0xfc, 0x88, 0xf6, 0xf7, 0x5b, 0x20, 0xdc, 0xa2, 0x06, 0x38, 0x64, 0xde, 0x92, 0xf9, 0x68, 0x8d, - 0xe0, 0xe9, 0x97, 0xb3, 0xfd, 0xc4, 0x44, 0xc8, 0x74, 0x75, 0xa9, 0x1b, 0x81, 0xd2, 0x0d, 0x5a, - 0x76, 0x13, 0x04, 0xb4, 0x42, 0x98, 0xce, 0xaa, 0x7f, 0x6f, 0xae, 0x01, 0x34, 0x19, 0xae, 0x96, - 0x95, 0x52, 0x5d, 0x21, 0x15, 0x05, 0xc1, 0x1a, 0x96, 0xfd, 0x6f, 0x73, 0x30, 0x2a, 0x83, 0x75, - 0x77, 0xbc, 0x41, 0x24, 0xcb, 0x63, 0x65, 0xef, 0x61, 0x69, 0x5c, 0x29, 0xe1, 0x5a, 0x2c, 0x90, - 0xc7, 0x69, 0x5c, 0x25, 0x00, 0xc7, 0x38, 0xe8, 0x29, 0x18, 0x09, 0x3b, 0x1b, 0x0c, 0x3d, 0xe1, - 0xc4, 0x53, 0xe7, 0xc5, 0x58, 0xc2, 0xd1, 0xe7, 0x60, 0x8a, 0xd7, 0x0b, 0xfc, 0xb6, 0xb3, 0xc5, - 0xd5, 0x9f, 0x43, 0xca, 0xfb, 0x76, 0x6a, 0x35, 0x01, 0x3b, 0x3a, 0x28, 0x9f, 0x4d, 0x96, 0x31, - 0xc5, 0x79, 0x17, 0x15, 0xf6, 0x18, 0xcf, 0x1b, 0xa1, 0xcb, 0xb4, 0xeb, 0x0d, 0x3f, 0x06, 0x61, - 0x1d, 0xcf, 0x7e, 0x07, 0x50, 0x77, 0xd8, 0x72, 0xf4, 0x3a, 0xb7, 0xc0, 0x72, 0x03, 0xd2, 0xec, - 0xa5, 0x48, 0xd7, 0x7d, 0x4c, 0xa5, 0xfd, 0x3d, 0xaf, 0x85, 0x55, 0x7d, 0xfb, 0xaf, 0xe6, 0x61, - 0x2a, 0xe9, 0x71, 0x88, 0x6e, 0xc0, 0x30, 0xbf, 0x23, 0x05, 0xf9, 0x1e, 0xef, 0xb4, 0x9a, 0x9f, - 0x22, 0x3b, 0x2d, 0xc4, 0x35, 0x2b, 0xea, 0xa3, 0xb7, 0x61, 0xb4, 0xe9, 0xdf, 0xf3, 0xee, 0x39, - 0x41, 0x73, 0xa1, 0x56, 0x15, 0xcb, 0x39, 0x95, 0xd5, 0xae, 0xc4, 0x68, 0xba, 0xef, 0x23, 0x7b, - 0x93, 0x88, 0x41, 0x58, 0x27, 0x87, 0xd6, 0x59, 0x28, 0xc6, 0x4d, 0x77, 0x6b, 0xd5, 0x69, 0xf7, - 0x32, 0xc7, 0x5d, 0x92, 0x48, 0x1a, 0xe5, 0x71, 0x11, 0xaf, 0x91, 0x03, 0x70, 0x4c, 0x08, 0x7d, - 0x37, 0x9c, 0x09, 0x33, 0xb4, 0x73, 0x59, 0x59, 0x2c, 0x7a, 0x29, 0xac, 0x16, 0x1f, 0xa5, 0x42, - 0x50, 0x9a, 0x1e, 0x2f, 0xad, 0x19, 0xfb, 0xd7, 0xcf, 0x80, 0xb1, 0x89, 0x8d, 0xa4, 0x46, 0xd6, - 0x09, 0x25, 0x35, 0xc2, 0x50, 0x24, 0xbb, 0xed, 0x68, 0xbf, 0xe2, 0x06, 0xbd, 0x92, 0xee, 0x2d, - 0x0b, 0x9c, 0x6e, 0x9a, 0x12, 0x82, 0x15, 0x9d, 0xf4, 0xcc, 0x53, 0xf9, 0x0f, 0x31, 0xf3, 0x54, - 0xe1, 0x14, 0x33, 0x4f, 0xad, 0xc1, 0xc8, 0x96, 0x1b, 0x61, 0xd2, 0xf6, 0x05, 0x77, 0x9a, 0xba, - 0x0e, 0xaf, 0x73, 0x94, 0xee, 0x1c, 0x27, 0x02, 0x80, 0x25, 0x11, 0xf4, 0xba, 0xda, 0x81, 0xc3, - 0xd9, 0xc2, 0x5d, 0xf7, 0x83, 0x62, 0xea, 0x1e, 0x14, 0xf9, 0xa5, 0x46, 0x1e, 0x34, 0xbf, 0xd4, - 0x8a, 0xcc, 0x0a, 0x55, 0xcc, 0xb6, 0x9d, 0x67, 0x49, 0x9f, 0xfa, 0xe4, 0x82, 0xba, 0xab, 0x67, - 0xd2, 0x2a, 0x65, 0x9f, 0x04, 0x2a, 0x49, 0xd6, 0x80, 0xf9, 0xb3, 0xbe, 0xdf, 0x82, 0x73, 0xed, - 0xb4, 0xa4, 0x72, 0x22, 0x97, 0xd3, 0x8b, 0x03, 0x67, 0xcd, 0x33, 0x1a, 0x64, 0x52, 0x7e, 0x2a, - 0x1a, 0x4e, 0x6f, 0x8e, 0x0e, 0x74, 0xb0, 0xd1, 0x14, 0x09, 0xa0, 0x9e, 0xc8, 0x48, 0xc4, 0xd5, - 0x23, 0xfd, 0xd6, 0x7a, 0x4a, 0xd2, 0xa7, 0x8f, 0x67, 0x25, 0x7d, 0x1a, 0x38, 0xd5, 0xd3, 0xeb, - 0x2a, 0x05, 0xd7, 0x78, 0xf6, 0x52, 0xe2, 0x09, 0xb6, 0xfa, 0x26, 0xde, 0x7a, 0x5d, 0x25, 0xde, - 0xea, 0x11, 0x92, 0x8e, 0xa7, 0xd5, 0xea, 0x9b, 0x6e, 0x4b, 0x4b, 0x99, 0x35, 0x79, 0x32, 0x29, - 0xb3, 0x8c, 0xab, 0x86, 0x67, 0x6d, 0x7a, 0xba, 0xcf, 0x55, 0x63, 0xd0, 0xed, 0x7d, 0xd9, 0xf0, - 0xf4, 0x60, 0xd3, 0x0f, 0x94, 0x1e, 0xec, 0xae, 0x9e, 0x6e, 0x0b, 0xf5, 0xc9, 0x27, 0x45, 0x91, - 0x06, 0x4c, 0xb2, 0x75, 0x57, 0xbf, 0x00, 0xcf, 0x64, 0xd3, 0x55, 0xf7, 0x5c, 0x37, 0xdd, 0xd4, - 0x2b, 0xb0, 0x2b, 0x79, 0xd7, 0xd9, 0xd3, 0x49, 0xde, 0x75, 0xee, 0xc4, 0x93, 0x77, 0x3d, 0x72, - 0x0a, 0xc9, 0xbb, 0x1e, 0xfd, 0x50, 0x93, 0x77, 0xcd, 0x3c, 0x84, 0xe4, 0x5d, 0x6b, 0x71, 0xf2, - 0xae, 0xf3, 0xd9, 0x53, 0x92, 0x62, 0xd0, 0x9b, 0x91, 0xb2, 0xeb, 0x2e, 0x7b, 0xd5, 0xe7, 0x21, - 0x31, 0x44, 0xcc, 0xbc, 0xf4, 0x44, 0xc5, 0x69, 0x71, 0x33, 0xf8, 0x94, 0x28, 0x10, 0x8e, 0x49, - 0x51, 0xba, 0x71, 0x0a, 0xaf, 0xc7, 0x7a, 0xe8, 0x71, 0xd3, 0x34, 0x64, 0x3d, 0x12, 0x77, 0xbd, - 0xc6, 0x13, 0x77, 0x5d, 0xc8, 0x3e, 0xc9, 0x93, 0xd7, 0x9d, 0x99, 0xae, 0xeb, 0x07, 0x72, 0x70, - 0xa9, 0xf7, 0xbe, 0x88, 0xd5, 0x73, 0xb5, 0xf8, 0x39, 0x29, 0xa1, 0x9e, 0xe3, 0xb2, 0x55, 0x8c, - 0x35, 0x70, 0xdc, 0xa1, 0xeb, 0x30, 0xad, 0x2c, 0x81, 0x5b, 0x6e, 0x63, 0x5f, 0x4b, 0x80, 0xac, - 0x3c, 0x1e, 0xeb, 0x49, 0x04, 0xdc, 0x5d, 0x07, 0x2d, 0xc0, 0xa4, 0x51, 0x58, 0xad, 0x08, 0x19, - 0x4a, 0xe9, 0x03, 0xeb, 0x26, 0x18, 0x27, 0xf1, 0xed, 0x9f, 0xb6, 0xe0, 0xd1, 0x8c, 0xbc, 0x18, - 0x03, 0x87, 0xd5, 0xd9, 0x84, 0xc9, 0xb6, 0x59, 0xb5, 0x4f, 0xf4, 0x2d, 0x23, 0xfb, 0x86, 0xea, - 0x6b, 0x02, 0x80, 0x93, 0x44, 0x17, 0xaf, 0xfe, 0xe6, 0xef, 0x5d, 0xfa, 0xd8, 0x6f, 0xfd, 0xde, - 0xa5, 0x8f, 0xfd, 0xf6, 0xef, 0x5d, 0xfa, 0xd8, 0x5f, 0x38, 0xbc, 0x64, 0xfd, 0xe6, 0xe1, 0x25, - 0xeb, 0xb7, 0x0e, 0x2f, 0x59, 0xbf, 0x7d, 0x78, 0xc9, 0xfa, 0xdd, 0xc3, 0x4b, 0xd6, 0x57, 0x7e, - 0xff, 0xd2, 0xc7, 0xde, 0xca, 0xed, 0x3d, 0xf7, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xbb, 0x9b, - 0xc3, 0x9b, 0xb7, 0xea, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index b9d569f5f..b99d10442 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -161,7 +161,7 @@ message AzureFileVolumeSource { // Deprecated in 1.7, please use the bindings subresource of pods instead. message Binding { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -218,6 +218,15 @@ message CSIPersistentVolumeSource { // secret object contains more than one secret, all secrets are passed. // +optional optional SecretReference nodePublishSecretRef = 8; + + // ControllerExpandSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // ControllerExpandVolume call. + // This is an alpha field and requires enabling ExpandCSIVolumes feature gate. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + optional SecretReference controllerExpandSecretRef = 9; } // Represents a source location of a volume to mount, managed by an external CSI driver @@ -266,7 +275,7 @@ message Capabilities { // Cephfs volumes do not support ownership management or SELinux relabeling. message CephFSPersistentVolumeSource { // Required: Monitors is a collection of Ceph monitors - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it repeated string monitors = 1; // Optional: Used as the mounted root, rather than the full Ceph tree, default is / @@ -274,23 +283,23 @@ message CephFSPersistentVolumeSource { optional string path = 2; // Optional: User is the rados user name, default is admin - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional string user = 3; // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional string secretFile = 4; // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional SecretReference secretRef = 5; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional bool readOnly = 6; } @@ -299,7 +308,7 @@ message CephFSPersistentVolumeSource { // Cephfs volumes do not support ownership management or SELinux relabeling. message CephFSVolumeSource { // Required: Monitors is a collection of Ceph monitors - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it repeated string monitors = 1; // Optional: Used as the mounted root, rather than the full Ceph tree, default is / @@ -307,23 +316,23 @@ message CephFSVolumeSource { optional string path = 2; // Optional: User is the rados user name, default is admin - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional string user = 3; // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional string secretFile = 4; // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional LocalObjectReference secretRef = 5; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional bool readOnly = 6; } @@ -333,20 +342,20 @@ message CephFSVolumeSource { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. message CinderPersistentVolumeSource { - // volume id used to identify the volume in cinder - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // volume id used to identify the volume in cinder. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md optional string volumeID = 1; // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional string fsType = 2; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional bool readOnly = 3; @@ -361,20 +370,20 @@ message CinderPersistentVolumeSource { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. message CinderVolumeSource { - // volume id used to identify the volume in cinder - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // volume id used to identify the volume in cinder. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md optional string volumeID = 1; // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional string fsType = 2; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional bool readOnly = 3; @@ -417,7 +426,7 @@ message ComponentCondition { // ComponentStatus (and ComponentStatusList) holds the cluster validation info. message ComponentStatus { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -431,7 +440,7 @@ message ComponentStatus { // Status of all the conditions for the component as a list of ComponentStatus objects. message ComponentStatusList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -442,7 +451,7 @@ message ComponentStatusList { // ConfigMap holds configuration data for pods to consume. message ConfigMap { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -487,14 +496,14 @@ message ConfigMapKeySelector { // The key to select. optional string key = 2; - // Specify whether the ConfigMap or it's key must be defined + // Specify whether the ConfigMap or its key must be defined // +optional optional bool optional = 3; } // ConfigMapList is a resource containing a list of ConfigMap objects. message ConfigMapList { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -547,7 +556,7 @@ message ConfigMapProjection { // +optional repeated KeyToPath items = 2; - // Specify whether the ConfigMap or it's keys must be defined + // Specify whether the ConfigMap or its keys must be defined // +optional optional bool optional = 4; } @@ -579,7 +588,7 @@ message ConfigMapVolumeSource { // +optional optional int32 defaultMode = 3; - // Specify whether the ConfigMap or it's keys must be defined + // Specify whether the ConfigMap or its keys must be defined // +optional optional bool optional = 4; } @@ -692,6 +701,17 @@ message Container { // +optional optional Probe readinessProbe = 11; + // StartupProbe indicates that the Pod has successfully initialized. + // If specified, no other probes are executed until this completes successfully. + // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + // when it might take a long time to load data or warm a cache, than during steady-state operation. + // This cannot be updated. + // This is an alpha feature enabled by the StartupProbe feature flag. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + optional Probe startupProbe = 22; + // Actions that the management system should take in response to container lifecycle events. // Cannot be updated. // +optional @@ -892,6 +912,13 @@ message ContainerStatus { // Container's ID in the format 'docker://'. // +optional optional string containerID = 8; + + // Specifies whether the container has passed its startup probe. + // Initialized as false, becomes true after startupProbe is considered successful. + // Resets to false when the container is restarted, or if kubelet loses state temporarily. + // Is always true when no startupProbe is defined. + // +optional + optional bool started = 9; } // DaemonEndpoint contains information about a single Daemon endpoint. @@ -992,7 +1019,8 @@ message EndpointAddress { // EndpointPort is a tuple that describes a single port. message EndpointPort { - // The name of this port (corresponds to ServicePort.Name). + // The name of this port. This must match the 'name' field in the + // corresponding ServicePort. // Must be a DNS_LABEL. // Optional only if one port is defined. // +optional @@ -1049,7 +1077,7 @@ message EndpointSubset { // ] message Endpoints { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -1067,7 +1095,7 @@ message Endpoints { // EndpointsList is a list of endpoints. message EndpointsList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1132,10 +1160,197 @@ message EnvVarSource { optional SecretKeySelector secretKeyRef = 4; } +// An EphemeralContainer is a container that may be added temporarily to an existing pod for +// user-initiated activities such as debugging. Ephemeral containers have no resource or +// scheduling guarantees, and they will not be restarted when they exit or when a pod is +// removed or restarted. If an ephemeral container causes a pod to exceed its resource +// allocation, the pod may be evicted. +// Ephemeral containers may not be added by directly updating the pod spec. They must be added +// via the pod's ephemeralcontainers subresource, and they will appear in the pod spec +// once added. +// This is an alpha feature enabled by the EphemeralContainers feature flag. +message EphemeralContainer { + // Ephemeral containers have all of the fields of Container, plus additional fields + // specific to ephemeral containers. Fields in common with Container are in the + // following inlined struct so than an EphemeralContainer may easily be converted + // to a Container. + optional EphemeralContainerCommon ephemeralContainerCommon = 1; + + // If set, the name of the container from PodSpec that this ephemeral container targets. + // The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + // If not set then the ephemeral container is run in whatever namespaces are shared + // for the pod. Note that the container runtime must support this feature. + // +optional + optional string targetContainerName = 2; +} + +// EphemeralContainerCommon is a copy of all fields in Container to be inlined in +// EphemeralContainer. This separate type allows easy conversion from EphemeralContainer +// to Container and allows separate documentation for the fields of EphemeralContainer. +// When a new field is added to Container it must be added here as well. +message EphemeralContainerCommon { + // Name of the ephemeral container specified as a DNS_LABEL. + // This name must be unique among all containers, init containers and ephemeral containers. + optional string name = 1; + + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + optional string image = 2; + + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + repeated string command = 3; + + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + repeated string args = 4; + + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + optional string workingDir = 5; + + // Ports are not allowed for ephemeral containers. + repeated ContainerPort ports = 6; + + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + repeated EnvFromSource envFrom = 19; + + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated EnvVar env = 7; + + // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + // already allocated to the pod. + // +optional + optional ResourceRequirements resources = 8; + + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + repeated VolumeMount volumeMounts = 9; + + // volumeDevices is the list of block devices to be used by the container. + // This is a beta feature. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + repeated VolumeDevice volumeDevices = 21; + + // Probes are not allowed for ephemeral containers. + // +optional + optional Probe livenessProbe = 10; + + // Probes are not allowed for ephemeral containers. + // +optional + optional Probe readinessProbe = 11; + + // Probes are not allowed for ephemeral containers. + // +optional + optional Probe startupProbe = 22; + + // Lifecycle is not allowed for ephemeral containers. + // +optional + optional Lifecycle lifecycle = 12; + + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + optional string terminationMessagePath = 13; + + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + optional string terminationMessagePolicy = 20; + + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + optional string imagePullPolicy = 14; + + // SecurityContext is not allowed for ephemeral containers. + // +optional + optional SecurityContext securityContext = 15; + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + optional bool stdin = 16; + + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + optional bool stdinOnce = 17; + + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + optional bool tty = 18; +} + +// A list of ephemeral containers used with the Pod ephemeralcontainers subresource. +message EphemeralContainers { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // A list of ephemeral containers associated with this pod. New ephemeral containers + // may be appended to this list, but existing ephemeral containers may not be removed + // or modified. + // +patchMergeKey=name + // +patchStrategy=merge + repeated EphemeralContainer ephemeralContainers = 2; +} + // Event is a report of an event somewhere in the cluster. message Event { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The object that this event is about. @@ -1200,7 +1415,7 @@ message Event { // EventList is a list of events. message EventList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1218,6 +1433,7 @@ message EventSeries { optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; // State of this Series: Ongoing or Finished + // Deprecated. Planned removal for 1.18 optional string state = 3; } @@ -1408,22 +1624,22 @@ message GitRepoVolumeSource { // Glusterfs volumes do not support ownership management or SELinux relabeling. message GlusterfsPersistentVolumeSource { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod optional string endpoints = 1; // Path is the Glusterfs volume path. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod optional string path = 2; // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional optional bool readOnly = 3; // EndpointsNamespace is the namespace that contains Glusterfs endpoint. // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional optional string endpointsNamespace = 4; } @@ -1432,16 +1648,16 @@ message GlusterfsPersistentVolumeSource { // Glusterfs volumes do not support ownership management or SELinux relabeling. message GlusterfsVolumeSource { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod optional string endpoints = 1; // Path is the Glusterfs volume path. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod optional string path = 2; // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional optional bool readOnly = 3; } @@ -1668,7 +1884,7 @@ message Lifecycle { optional Handler postStart = 1; // PreStop is called immediately before a container is terminated due to an - // API request or management event such as liveness probe failure, + // API request or management event such as liveness/startup probe failure, // preemption, resource contention, etc. The handler is not called if the // container crashes or exits. The reason for termination is passed to the // handler. The Pod's termination grace period countdown begins before the @@ -1684,12 +1900,12 @@ message Lifecycle { // LimitRange sets resource usage limits for each kind of resource in a Namespace. message LimitRange { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the limits enforced. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LimitRangeSpec spec = 2; } @@ -1724,7 +1940,7 @@ message LimitRangeItem { // LimitRangeList is a list of LimitRange items. message LimitRangeList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1742,7 +1958,7 @@ message LimitRangeSpec { // List holds a list of objects, which may not be known by the server. message List { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1819,25 +2035,43 @@ message NFSVolumeSource { // Use of multiple namespaces is optional. message Namespace { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of the Namespace. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional NamespaceSpec spec = 2; // Status describes the current status of a Namespace. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional NamespaceStatus status = 3; } +// NamespaceCondition contains details about state of namespace. +message NamespaceCondition { + // Type of namespace controller condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // +optional + optional string reason = 5; + + // +optional + optional string message = 6; +} + // NamespaceList is a list of Namespaces. message NamespaceList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1860,25 +2094,31 @@ message NamespaceStatus { // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional optional string phase = 1; + + // Represents the latest available observations of a namespace's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated NamespaceCondition conditions = 2; } // Node is a worker node in Kubernetes. // Each node will have a unique identifier in the cache (i.e. in etcd). message Node { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a node. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional NodeSpec spec = 2; // Most recently observed status of the node. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional NodeStatus status = 3; } @@ -2006,7 +2246,7 @@ message NodeDaemonEndpoints { // NodeList is the whole list of all Nodes which have been registered with master. message NodeList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -2074,6 +2314,13 @@ message NodeSpec { // +optional optional string podCIDR = 1; + // podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this + // field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for + // each of IPv4 and IPv6. + // +optional + // +patchStrategy=merge + repeated string podCIDRs = 7; + // ID of the node assigned by the cloud provider in the format: :// // +optional optional string providerID = 3; @@ -2126,6 +2373,9 @@ message NodeStatus { // List of addresses reachable to the node. // Queried from cloud provider, if available. // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses + // Note: This field is declared as mergeable, but the merge key is not sufficiently + // unique, which can cause data corruption when it is merged. Callers should instead + // use a full-replacement patch. See http://pr.k8s.io/79391 for an example. // +optional // +patchMergeKey=type // +patchStrategy=merge @@ -2208,7 +2458,7 @@ message ObjectFieldSelector { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message ObjectReference { // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional string kind = 1; @@ -2232,7 +2482,7 @@ message ObjectReference { optional string apiVersion = 5; // Specific resourceVersion to which this reference is made, if any. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional optional string resourceVersion = 6; @@ -2253,7 +2503,7 @@ message ObjectReference { // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes message PersistentVolume { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -2274,7 +2524,7 @@ message PersistentVolume { // PersistentVolumeClaim is a user's request for and claim to a persistent volume message PersistentVolumeClaim { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -2318,7 +2568,7 @@ message PersistentVolumeClaimCondition { // PersistentVolumeClaimList is a list of PersistentVolumeClaim items. message PersistentVolumeClaimList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -2412,7 +2662,7 @@ message PersistentVolumeClaimVolumeSource { // PersistentVolumeList is a list of PersistentVolume items. message PersistentVolumeList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -2446,7 +2696,7 @@ message PersistentVolumeSource { // Glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional optional GlusterfsPersistentVolumeSource glusterfs = 4; @@ -2456,7 +2706,7 @@ message PersistentVolumeSource { optional NFSVolumeSource nfs = 5; // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional optional RBDPersistentVolumeSource rbd = 6; @@ -2465,8 +2715,8 @@ message PersistentVolumeSource { // +optional optional ISCSIPersistentVolumeSource iscsi = 7; - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional CinderPersistentVolumeSource cinder = 8; @@ -2519,7 +2769,7 @@ message PersistentVolumeSource { optional LocalVolumeSource local = 20; // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md + // More info: https://examples.k8s.io/volumes/storageos/README.md // +optional optional StorageOSPersistentVolumeSource storageos = 21; @@ -2613,12 +2863,12 @@ message PhotonPersistentDiskVolumeSource { // by clients and scheduled onto hosts. message Pod { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional PodSpec spec = 2; @@ -2626,7 +2876,7 @@ message Pod { // This data may not be up to date. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional PodStatus status = 3; } @@ -2832,15 +3082,23 @@ message PodExecOptions { repeated string command = 6; } +// IP address information for entries in the (plural) PodIPs field. +// Each entry includes: +// IP: An IP address allocated to the pod. Routable at least within the cluster. +message PodIP { + // ip is an IP address (IPv4 or IPv6) assigned to the pod + optional string ip = 1; +} + // PodList is a list of Pods. message PodList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of pods. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md repeated Pod items = 2; } @@ -2927,6 +3185,12 @@ message PodSecurityContext { // +optional optional SELinuxOptions seLinuxOptions = 1; + // The Windows specific settings applied to all containers. + // If unspecified, the options within a container's SecurityContext will be used. + // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional WindowsSecurityContextOptions windowsOptions = 8; + // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in SecurityContext. If set in both SecurityContext and @@ -2998,7 +3262,7 @@ message PodSpec { // init container fails, the pod is considered to have failed and is handled according // to its restartPolicy. The name for an init container or normal container must be // unique among all containers. - // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. + // Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. // The resourceRequirements of an init container are taken into account during scheduling // by finding the highest request/limit for each resource type, and then using the max of // of that value or the sum of the normal containers. Limits are applied to init containers @@ -3018,6 +3282,16 @@ message PodSpec { // +patchStrategy=merge repeated Container containers = 2; + // List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + // pod to perform user-initiated actions such as debugging. This list cannot be specified when + // creating a pod, and it cannot be modified by updating the pod spec. In order to add an + // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + // This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated EphemeralContainer ephemeralContainers = 34; + // Restart policy for all containers within the pod. // One of Always, OnFailure, Never. // Default to Always. @@ -3185,7 +3459,7 @@ message PodSpec { // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an // empty definition that uses the default runtime handler. // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md - // This is an alpha feature and may change in the future. + // This is a beta feature as of Kubernetes v1.14. // +optional optional string runtimeClassName = 29; @@ -3194,6 +3468,37 @@ message PodSpec { // Optional: Defaults to true. // +optional optional bool enableServiceLinks = 30; + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + optional string preemptionPolicy = 31; + + // Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + // This field will be autopopulated at admission time by the RuntimeClass admission controller. If + // the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + // The RuntimeClass admission controller will reject Pod create requests which have the overhead already + // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + // More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature. + // +optional + map overhead = 32; + + // TopologySpreadConstraints describes how a group of pods ought to spread across topology + // domains. Scheduler will schedule pods in a way which abides by the constraints. + // This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread + // feature. + // All topologySpreadConstraints are ANDed. + // +optional + // +patchMergeKey=topologyKey + // +patchStrategy=merge + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + repeated TopologySpreadConstraint topologySpreadConstraints = 33; } // PodStatus represents information about the status of a pod. Status may trail the actual @@ -3256,6 +3561,14 @@ message PodStatus { // +optional optional string podIP = 6; + // podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must + // match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list + // is empty if no IPs have been allocated yet. + // +optional + // +patchStrategy=merge + // +patchMergeKey=ip + repeated PodIP podIPs = 12; + // RFC 3339 date and time at which the object was acknowledged by the Kubelet. // This is before the Kubelet pulled the container image(s) for the pod. // +optional @@ -3278,12 +3591,17 @@ message PodStatus { // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md // +optional optional string qosClass = 9; + + // Status for any ephemeral containers that have run in this pod. + // This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature. + // +optional + repeated ContainerStatus ephemeralContainerStatuses = 13; } // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded message PodStatusResult { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -3291,7 +3609,7 @@ message PodStatusResult { // This data may not be up to date. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional PodStatus status = 2; } @@ -3299,12 +3617,12 @@ message PodStatusResult { // PodTemplate describes a template for creating copies of a predefined pod. message PodTemplate { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Template defines the pods that will be created from this pod template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional PodTemplateSpec template = 2; } @@ -3312,7 +3630,7 @@ message PodTemplate { // PodTemplateList is a list of PodTemplates. message PodTemplateList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -3323,12 +3641,12 @@ message PodTemplateList { // PodTemplateSpec describes the data a pod should have when created from a template message PodTemplateSpec { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional PodSpec spec = 2; } @@ -3408,7 +3726,7 @@ message Probe { optional int32 periodSeconds = 4; // Minimum consecutive successes for the probe to be considered successful after having failed. - // Defaults to 1. Must be 1 for liveness. Minimum value is 1. + // Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. // +optional optional int32 successThreshold = 5; @@ -3468,11 +3786,11 @@ message QuobyteVolumeSource { // RBD volumes support ownership management and SELinux relabeling. message RBDPersistentVolumeSource { // A collection of Ceph monitors. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it repeated string monitors = 1; // The rados image name. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it optional string image = 2; // Filesystem type of the volume that you want to mount. @@ -3485,32 +3803,32 @@ message RBDPersistentVolumeSource { // The rados pool name. // Default is rbd. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string pool = 4; // The rados user name. // Default is admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string user = 5; // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string keyring = 6; // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional SecretReference secretRef = 7; // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional bool readOnly = 8; } @@ -3519,11 +3837,11 @@ message RBDPersistentVolumeSource { // RBD volumes support ownership management and SELinux relabeling. message RBDVolumeSource { // A collection of Ceph monitors. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it repeated string monitors = 1; // The rados image name. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it optional string image = 2; // Filesystem type of the volume that you want to mount. @@ -3536,32 +3854,32 @@ message RBDVolumeSource { // The rados pool name. // Default is rbd. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string pool = 4; // The rados user name. // Default is admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string user = 5; // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string keyring = 6; // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional LocalObjectReference secretRef = 7; // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional bool readOnly = 8; } @@ -3569,7 +3887,7 @@ message RBDVolumeSource { // RangeAllocation is not a public type. message RangeAllocation { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -3584,12 +3902,12 @@ message RangeAllocation { message ReplicationController { // If the Labels of a ReplicationController are empty, they are defaulted to // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the replication controller. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicationControllerSpec spec = 2; @@ -3597,7 +3915,7 @@ message ReplicationController { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicationControllerStatus status = 3; } @@ -3626,7 +3944,7 @@ message ReplicationControllerCondition { // ReplicationControllerList is a collection of replication controllers. message ReplicationControllerList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -3712,17 +4030,17 @@ message ResourceFieldSelector { // ResourceQuota sets aggregate quota restrictions enforced per namespace message ResourceQuota { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired quota. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ResourceQuotaSpec spec = 2; // Status defines the actual enforced quota and its current usage. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ResourceQuotaStatus status = 3; } @@ -3730,7 +4048,7 @@ message ResourceQuota { // ResourceQuotaList is a list of ResourceQuota items. message ResourceQuotaList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -3926,7 +4244,7 @@ message ScopedResourceSelectorRequirement { // the Data field must be less than MaxSecretSize bytes. message Secret { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -3972,7 +4290,7 @@ message SecretKeySelector { // The key of the secret to select from. Must be a valid secret key. optional string key = 2; - // Specify whether the Secret or it's key must be defined + // Specify whether the Secret or its key must be defined // +optional optional bool optional = 3; } @@ -3980,7 +4298,7 @@ message SecretKeySelector { // SecretList is a list of Secret. message SecretList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -4054,7 +4372,7 @@ message SecretVolumeSource { // +optional optional int32 defaultMode = 3; - // Specify whether the Secret or it's keys must be defined + // Specify whether the Secret or its keys must be defined // +optional optional bool optional = 4; } @@ -4081,6 +4399,12 @@ message SecurityContext { // +optional optional SELinuxOptions seLinuxOptions = 3; + // The Windows specific settings applied to all containers. + // If unspecified, the options from the PodSecurityContext will be used. + // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional WindowsSecurityContextOptions windowsOptions = 10; + // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in PodSecurityContext. If set in both SecurityContext and @@ -4138,19 +4462,19 @@ message SerializedReference { // will answer requests sent through the proxy. message Service { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a service. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ServiceSpec spec = 2; // Most recently observed status of the service. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ServiceStatus status = 3; } @@ -4161,7 +4485,7 @@ message Service { // * a set of secrets message ServiceAccount { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -4188,7 +4512,7 @@ message ServiceAccount { // ServiceAccountList is a list of ServiceAccount objects message ServiceAccountList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -4226,7 +4550,7 @@ message ServiceAccountTokenProjection { // ServiceList holds a list of services. message ServiceList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -4237,8 +4561,9 @@ message ServiceList { // ServicePort contains information on service's port. message ServicePort { // The name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. This maps to - // the 'Name' field in EndpointPort objects. + // All ports within a ServiceSpec must have unique names. When considering + // the endpoints for a Service, this must match the 'name' field in the + // EndpointPort. // Optional if only one ServicePort is defined on this service. // +optional optional string name = 1; @@ -4398,6 +4723,16 @@ message ServiceSpec { // sessionAffinityConfig contains the configurations of session affinity. // +optional optional SessionAffinityConfig sessionAffinityConfig = 14; + + // ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. + // IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is + // available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. + // Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which + // allocate external load-balancers should use the same IP family. Endpoints for this Service will be of + // this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the + // cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment. + // +optional + optional string ipFamily = 15; } // ServiceStatus represents the current status of a service. @@ -4576,6 +4911,59 @@ message TopologySelectorTerm { repeated TopologySelectorLabelRequirement matchLabelExpressions = 1; } +// TopologySpreadConstraint specifies how to spread matching pods among the given topology. +message TopologySpreadConstraint { + // MaxSkew describes the degree to which pods may be unevenly distributed. + // It's the maximum permitted difference between the number of matching pods in + // any two topology domains of a given topology type. + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 1/1/0: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P | P | | + // +-------+-------+-------+ + // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; + // scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) + // violate MaxSkew(1). + // - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + // It's a required field. Default value is 1 and 0 is not allowed. + optional int32 maxSkew = 1; + + // TopologyKey is the key of node labels. Nodes that have a label with this key + // and identical values are considered to be in the same topology. + // We consider each as a "bucket", and try to put balanced number + // of pods into each bucket. + // It's a required field. + optional string topologyKey = 2; + + // WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + // the spread constraint. + // - DoNotSchedule (default) tells the scheduler not to schedule it + // - ScheduleAnyway tells the scheduler to still schedule it + // It's considered as "Unsatisfiable" if and only if placing incoming pod on any + // topology violates "MaxSkew". + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 3/1/1: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P P P | P | P | + // +-------+-------+-------+ + // If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + // to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + // MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + // won't make it *more* imbalanced. + // It's a required field. + optional string whenUnsatisfiable = 3; + + // LabelSelector is used to find matching pods. + // Pods that match this label selector are counted to determine the number of pods + // in their corresponding topology domain. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 4; +} + // TypedLocalObjectReference contains enough information to let you locate the // typed referenced object inside the same namespace. message TypedLocalObjectReference { @@ -4644,7 +5032,7 @@ message VolumeMount { // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. // Defaults to "" (volume's root). // SubPathExpr and SubPath are mutually exclusive. - // This field is alpha in 1.14. + // This field is beta in 1.15. // +optional optional string subPathExpr = 6; } @@ -4724,12 +5112,12 @@ message VolumeSource { // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md + // More info: https://examples.k8s.io/volumes/iscsi/README.md // +optional optional ISCSIVolumeSource iscsi = 8; // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional optional GlusterfsVolumeSource glusterfs = 9; @@ -4740,7 +5128,7 @@ message VolumeSource { optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10; // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional optional RBDVolumeSource rbd = 11; @@ -4749,8 +5137,8 @@ message VolumeSource { // +optional optional FlexVolumeSource flexVolume = 12; - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional CinderVolumeSource cinder = 13; @@ -4843,3 +5231,26 @@ message WeightedPodAffinityTerm { optional PodAffinityTerm podAffinityTerm = 2; } +// WindowsSecurityContextOptions contain Windows-specific options and credentials. +message WindowsSecurityContextOptions { + // GMSACredentialSpecName is the name of the GMSA credential spec to use. + // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. + // +optional + optional string gmsaCredentialSpecName = 1; + + // GMSACredentialSpec is where the GMSA admission webhook + // (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + // GMSA credential spec named by the GMSACredentialSpecName field. + // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. + // +optional + optional string gmsaCredentialSpec = 2; + + // The UserName in Windows to run the entrypoint of the container process. + // Defaults to the user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag. + // +optional + optional string runAsUserName = 3; +} + diff --git a/vendor/k8s.io/api/core/v1/register.go b/vendor/k8s.io/api/core/v1/register.go index 1aac0cb41..8da1ddead 100644 --- a/vendor/k8s.io/api/core/v1/register.go +++ b/vendor/k8s.io/api/core/v1/register.go @@ -88,6 +88,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { &RangeAllocation{}, &ConfigMap{}, &ConfigMapList{}, + &EphemeralContainers{}, ) // Add common types diff --git a/vendor/k8s.io/api/core/v1/taint.go b/vendor/k8s.io/api/core/v1/taint.go index 7b606a309..db71bd2fd 100644 --- a/vendor/k8s.io/api/core/v1/taint.go +++ b/vendor/k8s.io/api/core/v1/taint.go @@ -24,8 +24,14 @@ func (t *Taint) MatchTaint(taintToMatch *Taint) bool { return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect } -// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. +// taint.ToString() converts taint struct to string in format '=:', '=:', ':', or ''. func (t *Taint) ToString() string { + if len(t.Effect) == 0 { + if len(t.Value) == 0 { + return fmt.Sprintf("%v", t.Key) + } + return fmt.Sprintf("%v=%v:", t.Key, t.Value) + } if len(t.Value) == 0 { return fmt.Sprintf("%v:%v", t.Key, t.Effect) } diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index 3af134400..fcd455402 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -87,11 +87,11 @@ type VolumeSource struct { NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"` // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md + // More info: https://examples.k8s.io/volumes/iscsi/README.md // +optional ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"` // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"` // PersistentVolumeClaimVolumeSource represents a reference to a @@ -100,15 +100,15 @@ type VolumeSource struct { // +optional PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"` // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` // FlexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. // +optional FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"` // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime @@ -192,7 +192,7 @@ type PersistentVolumeSource struct { HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"` // Glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional Glusterfs *GlusterfsPersistentVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` // NFS represents an NFS mount on the host. Provisioned by an admin. @@ -200,15 +200,15 @@ type PersistentVolumeSource struct { // +optional NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"` // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"` // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. // +optional ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"` // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime @@ -248,7 +248,7 @@ type PersistentVolumeSource struct { // +optional Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"` // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md + // More info: https://examples.k8s.io/volumes/storageos/README.md // +optional StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"` // CSI represents storage that is handled by an external CSI driver (Beta feature). @@ -275,7 +275,7 @@ const ( type PersistentVolume struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -390,7 +390,7 @@ type PersistentVolumeStatus struct { type PersistentVolumeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of persistent volumes. @@ -405,7 +405,7 @@ type PersistentVolumeList struct { type PersistentVolumeClaim struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -427,7 +427,7 @@ type PersistentVolumeClaim struct { type PersistentVolumeClaimList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // A list of persistent volume claims. @@ -625,16 +625,16 @@ type EmptyDirVolumeSource struct { // Glusterfs volumes do not support ownership management or SELinux relabeling. type GlusterfsVolumeSource struct { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` // Path is the Glusterfs volume path. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod Path string `json:"path" protobuf:"bytes,2,opt,name=path"` // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` } @@ -643,22 +643,22 @@ type GlusterfsVolumeSource struct { // Glusterfs volumes do not support ownership management or SELinux relabeling. type GlusterfsPersistentVolumeSource struct { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` // Path is the Glusterfs volume path. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod Path string `json:"path" protobuf:"bytes,2,opt,name=path"` // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` // EndpointsNamespace is the namespace that contains Glusterfs endpoint. // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional EndpointsNamespace *string `json:"endpointsNamespace,omitempty" protobuf:"bytes,4,opt,name=endpointsNamespace"` } @@ -667,10 +667,10 @@ type GlusterfsPersistentVolumeSource struct { // RBD volumes support ownership management and SELinux relabeling. type RBDVolumeSource struct { // A collection of Ceph monitors. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // The rados image name. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. @@ -681,28 +681,28 @@ type RBDVolumeSource struct { FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` // The rados pool name. // Default is rbd. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` // The rados user name. // Default is admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` } @@ -711,10 +711,10 @@ type RBDVolumeSource struct { // RBD volumes support ownership management and SELinux relabeling. type RBDPersistentVolumeSource struct { // A collection of Ceph monitors. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // The rados image name. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. @@ -725,28 +725,28 @@ type RBDPersistentVolumeSource struct { FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` // The rados pool name. // Default is rbd. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` // The rados user name. // Default is admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` } @@ -756,18 +756,18 @@ type RBDPersistentVolumeSource struct { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. type CinderVolumeSource struct { - // volume id used to identify the volume in cinder - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // volume id used to identify the volume in cinder. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` // Optional: points to a secret object containing parameters used to connect @@ -781,18 +781,18 @@ type CinderVolumeSource struct { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. type CinderPersistentVolumeSource struct { - // volume id used to identify the volume in cinder - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // volume id used to identify the volume in cinder. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` // Optional: points to a secret object containing parameters used to connect @@ -805,26 +805,26 @@ type CinderPersistentVolumeSource struct { // Cephfs volumes do not support ownership management or SELinux relabeling. type CephFSVolumeSource struct { // Required: Monitors is a collection of Ceph monitors - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // Optional: Used as the mounted root, rather than the full Ceph tree, default is / // +optional Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` // Optional: User is the rados user name, default is admin - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` } @@ -844,26 +844,26 @@ type SecretReference struct { // Cephfs volumes do not support ownership management or SELinux relabeling. type CephFSPersistentVolumeSource struct { // Required: Monitors is a collection of Ceph monitors - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // Optional: Used as the mounted root, rather than the full Ceph tree, default is / // +optional Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` // Optional: User is the rados user name, default is admin - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` } @@ -1094,7 +1094,7 @@ type SecretVolumeSource struct { // mode, like fsGroup, and the result can be other mode bits set. // +optional DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"` - // Specify whether the Secret or it's keys must be defined + // Specify whether the Secret or its keys must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1520,7 +1520,7 @@ type ConfigMapVolumeSource struct { // mode, like fsGroup, and the result can be other mode bits set. // +optional DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"` - // Specify whether the ConfigMap or it's keys must be defined + // Specify whether the ConfigMap or its keys must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1547,7 +1547,7 @@ type ConfigMapProjection struct { // relative and may not contain the '..' path or start with '..'. // +optional Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` - // Specify whether the ConfigMap or it's keys must be defined + // Specify whether the ConfigMap or its keys must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1692,6 +1692,15 @@ type CSIPersistentVolumeSource struct { // secret object contains more than one secret, all secrets are passed. // +optional NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"` + + // ControllerExpandSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // ControllerExpandVolume call. + // This is an alpha field and requires enabling ExpandCSIVolumes feature gate. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + ControllerExpandSecretRef *SecretReference `json:"controllerExpandSecretRef,omitempty" protobuf:"bytes,9,opt,name=controllerExpandSecretRef"` } // Represents a source location of a volume to mount, managed by an external CSI driver @@ -1775,7 +1784,7 @@ type VolumeMount struct { // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. // Defaults to "" (volume's root). // SubPathExpr and SubPath are mutually exclusive. - // This field is alpha in 1.14. + // This field is beta in 1.15. // +optional SubPathExpr string `json:"subPathExpr,omitempty" protobuf:"bytes,6,opt,name=subPathExpr"` } @@ -1880,7 +1889,7 @@ type ConfigMapKeySelector struct { LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` // The key to select. Key string `json:"key" protobuf:"bytes,2,opt,name=key"` - // Specify whether the ConfigMap or it's key must be defined + // Specify whether the ConfigMap or its key must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` } @@ -1891,7 +1900,7 @@ type SecretKeySelector struct { LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` // The key of the secret to select from. Must be a valid secret key. Key string `json:"key" protobuf:"bytes,2,opt,name=key"` - // Specify whether the Secret or it's key must be defined + // Specify whether the Secret or its key must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` } @@ -2016,7 +2025,7 @@ type Probe struct { // +optional PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"` // Minimum consecutive successes for the probe to be considered successful after having failed. - // Defaults to 1. Must be 1 for liveness. Minimum value is 1. + // Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. // +optional SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"` // Minimum consecutive failures for the probe to be considered failed after having succeeded. @@ -2037,6 +2046,16 @@ const ( PullIfNotPresent PullPolicy = "IfNotPresent" ) +// PreemptionPolicy describes a policy for if/when to preempt a pod. +type PreemptionPolicy string + +const ( + // PreemptLowerPriority means that pod can preempt other pods with lower priority. + PreemptLowerPriority PreemptionPolicy = "PreemptLowerPriority" + // PreemptNever means that pod never preempts other pods with lower priority. + PreemptNever PreemptionPolicy = "Never" +) + // TerminationMessagePolicy describes how termination messages are retrieved from a container. type TerminationMessagePolicy string @@ -2177,6 +2196,16 @@ type Container struct { // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` + // StartupProbe indicates that the Pod has successfully initialized. + // If specified, no other probes are executed until this completes successfully. + // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + // when it might take a long time to load data or warm a cache, than during steady-state operation. + // This cannot be updated. + // This is an alpha feature enabled by the StartupProbe feature flag. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` // Actions that the management system should take in response to container lifecycle events. // Cannot be updated. // +optional @@ -2263,7 +2292,7 @@ type Lifecycle struct { // +optional PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"` // PreStop is called immediately before a container is terminated due to an - // API request or management event such as liveness probe failure, + // API request or management event such as liveness/startup probe failure, // preemption, resource contention, etc. The handler is not called if the // container crashes or exits. The reason for termination is passed to the // handler. The Pod's termination grace period countdown begins before the @@ -2371,6 +2400,12 @@ type ContainerStatus struct { // Container's ID in the format 'docker://'. // +optional ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"` + // Specifies whether the container has passed its startup probe. + // Initialized as false, becomes true after startupProbe is considered successful. + // Resets to false when the container is restarted, or if kubelet loses state temporarily. + // Is always true when no startupProbe is defined. + // +optional + Started *bool `json:"started,omitempty" protobuf:"varint,9,opt,name=started"` } // PodPhase is a label for the condition of a pod at the current time. @@ -2401,18 +2436,22 @@ type PodConditionType string // These are valid conditions of pod. const ( - // PodScheduled represents status of the scheduling process for this pod. - PodScheduled PodConditionType = "PodScheduled" + // ContainersReady indicates whether all containers in the pod are ready. + ContainersReady PodConditionType = "ContainersReady" + // PodInitialized means that all init containers in the pod have started successfully. + PodInitialized PodConditionType = "Initialized" // PodReady means the pod is able to service requests and should be added to the // load balancing pools of all matching services. PodReady PodConditionType = "Ready" - // PodInitialized means that all init containers in the pod have started successfully. - PodInitialized PodConditionType = "Initialized" + // PodScheduled represents status of the scheduling process for this pod. + PodScheduled PodConditionType = "PodScheduled" +) + +// These are reasons for a pod's transition to a condition. +const ( // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler // can't schedule the pod right now, for example due to insufficient resources in the cluster. PodReasonUnschedulable = "Unschedulable" - // ContainersReady indicates whether all containers in the pod are ready. - ContainersReady PodConditionType = "ContainersReady" ) // PodCondition contains details for the current condition of this pod. @@ -2802,7 +2841,7 @@ type PodSpec struct { // init container fails, the pod is considered to have failed and is handled according // to its restartPolicy. The name for an init container or normal container must be // unique among all containers. - // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. + // Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. // The resourceRequirements of an init container are taken into account during scheduling // by finding the highest request/limit for each resource type, and then using the max of // of that value or the sum of the normal containers. Limits are applied to init containers @@ -2820,6 +2859,15 @@ type PodSpec struct { // +patchMergeKey=name // +patchStrategy=merge Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` + // List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + // pod to perform user-initiated actions such as debugging. This list cannot be specified when + // creating a pod, and it cannot be modified by updating the pod spec. In order to add an + // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + // This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + EphemeralContainers []EphemeralContainer `json:"ephemeralContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,34,rep,name=ephemeralContainers"` // Restart policy for all containers within the pod. // One of Always, OnFailure, Never. // Default to Always. @@ -2953,7 +3001,6 @@ type PodSpec struct { // configuration based on DNSPolicy. // +optional DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"` - // If specified, all readiness gates will be evaluated for pod readiness. // A pod is ready when all its containers are ready AND // all conditions specified in the readiness gates have status equal to "True" @@ -2965,7 +3012,7 @@ type PodSpec struct { // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an // empty definition that uses the default runtime handler. // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md - // This is an alpha feature and may change in the future. + // This is a beta feature as of Kubernetes v1.14. // +optional RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"` // EnableServiceLinks indicates whether information about services should be injected into pod's @@ -2973,6 +3020,95 @@ type PodSpec struct { // Optional: Defaults to true. // +optional EnableServiceLinks *bool `json:"enableServiceLinks,omitempty" protobuf:"varint,30,opt,name=enableServiceLinks"` + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + PreemptionPolicy *PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,31,opt,name=preemptionPolicy"` + // Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + // This field will be autopopulated at admission time by the RuntimeClass admission controller. If + // the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + // The RuntimeClass admission controller will reject Pod create requests which have the overhead already + // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + // More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature. + // +optional + Overhead ResourceList `json:"overhead,omitempty" protobuf:"bytes,32,opt,name=overhead"` + // TopologySpreadConstraints describes how a group of pods ought to spread across topology + // domains. Scheduler will schedule pods in a way which abides by the constraints. + // This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread + // feature. + // All topologySpreadConstraints are ANDed. + // +optional + // +patchMergeKey=topologyKey + // +patchStrategy=merge + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + TopologySpreadConstraints []TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty" patchStrategy:"merge" patchMergeKey:"topologyKey" protobuf:"bytes,33,opt,name=topologySpreadConstraints"` +} + +type UnsatisfiableConstraintAction string + +const ( + // DoNotSchedule instructs the scheduler not to schedule the pod + // when constraints are not satisfied. + DoNotSchedule UnsatisfiableConstraintAction = "DoNotSchedule" + // ScheduleAnyway instructs the scheduler to schedule the pod + // even if constraints are not satisfied. + ScheduleAnyway UnsatisfiableConstraintAction = "ScheduleAnyway" +) + +// TopologySpreadConstraint specifies how to spread matching pods among the given topology. +type TopologySpreadConstraint struct { + // MaxSkew describes the degree to which pods may be unevenly distributed. + // It's the maximum permitted difference between the number of matching pods in + // any two topology domains of a given topology type. + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 1/1/0: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P | P | | + // +-------+-------+-------+ + // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; + // scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) + // violate MaxSkew(1). + // - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + // It's a required field. Default value is 1 and 0 is not allowed. + MaxSkew int32 `json:"maxSkew" protobuf:"varint,1,opt,name=maxSkew"` + // TopologyKey is the key of node labels. Nodes that have a label with this key + // and identical values are considered to be in the same topology. + // We consider each as a "bucket", and try to put balanced number + // of pods into each bucket. + // It's a required field. + TopologyKey string `json:"topologyKey" protobuf:"bytes,2,opt,name=topologyKey"` + // WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + // the spread constraint. + // - DoNotSchedule (default) tells the scheduler not to schedule it + // - ScheduleAnyway tells the scheduler to still schedule it + // It's considered as "Unsatisfiable" if and only if placing incoming pod on any + // topology violates "MaxSkew". + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 3/1/1: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P P P | P | P | + // +-------+-------+-------+ + // If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + // to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + // MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + // won't make it *more* imbalanced. + // It's a required field. + WhenUnsatisfiable UnsatisfiableConstraintAction `json:"whenUnsatisfiable" protobuf:"bytes,3,opt,name=whenUnsatisfiable,casttype=UnsatisfiableConstraintAction"` + // LabelSelector is used to find matching pods. + // Pods that match this label selector are counted to determine the number of pods + // in their corresponding topology domain. + // +optional + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,4,opt,name=labelSelector"` } const ( @@ -3000,6 +3136,11 @@ type PodSecurityContext struct { // takes precedence for that container. // +optional SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"` + // The Windows specific settings applied to all containers. + // If unspecified, the options within a container's SecurityContext will be used. + // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,8,opt,name=windowsOptions"` // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in SecurityContext. If set in both SecurityContext and @@ -3085,6 +3226,175 @@ type PodDNSConfigOption struct { Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` } +// IP address information for entries in the (plural) PodIPs field. +// Each entry includes: +// IP: An IP address allocated to the pod. Routable at least within the cluster. +type PodIP struct { + // ip is an IP address (IPv4 or IPv6) assigned to the pod + IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` +} + +// EphemeralContainerCommon is a copy of all fields in Container to be inlined in +// EphemeralContainer. This separate type allows easy conversion from EphemeralContainer +// to Container and allows separate documentation for the fields of EphemeralContainer. +// When a new field is added to Container it must be added here as well. +type EphemeralContainerCommon struct { + // Name of the ephemeral container specified as a DNS_LABEL. + // This name must be unique among all containers, init containers and ephemeral containers. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` + // Ports are not allowed for ephemeral containers. + Ports []ContainerPort `json:"ports,omitempty" protobuf:"bytes,6,rep,name=ports"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + // already allocated to the pod. + // +optional + Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // This is a beta feature. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` + // Probes are not allowed for ephemeral containers. + // +optional + LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` + // Probes are not allowed for ephemeral containers. + // +optional + ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` + // Probes are not allowed for ephemeral containers. + // +optional + StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` + // Lifecycle is not allowed for ephemeral containers. + // +optional + Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // SecurityContext is not allowed for ephemeral containers. + // +optional + SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` +} + +// EphemeralContainerCommon converts to Container. All fields must be kept in sync between +// these two types. +var _ = Container(EphemeralContainerCommon{}) + +// An EphemeralContainer is a container that may be added temporarily to an existing pod for +// user-initiated activities such as debugging. Ephemeral containers have no resource or +// scheduling guarantees, and they will not be restarted when they exit or when a pod is +// removed or restarted. If an ephemeral container causes a pod to exceed its resource +// allocation, the pod may be evicted. +// Ephemeral containers may not be added by directly updating the pod spec. They must be added +// via the pod's ephemeralcontainers subresource, and they will appear in the pod spec +// once added. +// This is an alpha feature enabled by the EphemeralContainers feature flag. +type EphemeralContainer struct { + // Ephemeral containers have all of the fields of Container, plus additional fields + // specific to ephemeral containers. Fields in common with Container are in the + // following inlined struct so than an EphemeralContainer may easily be converted + // to a Container. + EphemeralContainerCommon `json:",inline" protobuf:"bytes,1,req"` + + // If set, the name of the container from PodSpec that this ephemeral container targets. + // The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + // If not set then the ephemeral container is run in whatever namespaces are shared + // for the pod. Note that the container runtime must support this feature. + // +optional + TargetContainerName string `json:"targetContainerName,omitempty" protobuf:"bytes,2,opt,name=targetContainerName"` +} + // PodStatus represents information about the status of a pod. Status may trail the actual // state of a system, especially if the node that hosts the pod cannot contact the control // plane. @@ -3140,6 +3450,14 @@ type PodStatus struct { // +optional PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"` + // podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must + // match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list + // is empty if no IPs have been allocated yet. + // +optional + // +patchStrategy=merge + // +patchMergeKey=ip + PodIPs []PodIP `json:"podIPs,omitempty" protobuf:"bytes,12,rep,name=podIPs" patchStrategy:"merge" patchMergeKey:"ip"` + // RFC 3339 date and time at which the object was acknowledged by the Kubelet. // This is before the Kubelet pulled the container image(s) for the pod. // +optional @@ -3161,6 +3479,10 @@ type PodStatus struct { // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md // +optional QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` + // Status for any ephemeral containers that have run in this pod. + // This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature. + // +optional + EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty" protobuf:"bytes,13,rep,name=ephemeralContainerStatuses"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -3169,19 +3491,21 @@ type PodStatus struct { type PodStatusResult struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Most recently observed status of the pod. // This data may not be up to date. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` } // +genclient +// +genclient:method=GetEphemeralContainers,verb=get,subresource=ephemeralcontainers,result=EphemeralContainers +// +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers,input=EphemeralContainers,result=EphemeralContainers // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Pod is a collection of containers that can run on a host. This resource is created @@ -3189,12 +3513,12 @@ type PodStatusResult struct { type Pod struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -3202,7 +3526,7 @@ type Pod struct { // This data may not be up to date. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -3213,24 +3537,24 @@ type Pod struct { type PodList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of pods. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"` } // PodTemplateSpec describes the data a pod should have when created from a template type PodTemplateSpec struct { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -3242,12 +3566,12 @@ type PodTemplateSpec struct { type PodTemplate struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Template defines the pods that will be created from this pod template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` } @@ -3258,7 +3582,7 @@ type PodTemplate struct { type PodTemplateList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3370,12 +3694,12 @@ type ReplicationController struct { // If the Labels of a ReplicationController are empty, they are defaulted to // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the replication controller. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -3383,7 +3707,7 @@ type ReplicationController struct { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -3394,7 +3718,7 @@ type ReplicationController struct { type ReplicationControllerList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3495,6 +3819,17 @@ type LoadBalancerIngress struct { Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` } +// IPFamily represents the IP Family (IPv4 or IPv6). This type is used +// to express the family of an IP expressed by a type (i.e. service.Spec.IPFamily) +type IPFamily string + +const ( + // IPv4Protocol indicates that this IP is IPv4 protocol + IPv4Protocol IPFamily = "IPv4" + // IPv6Protocol indicates that this IP is IPv6 protocol + IPv6Protocol IPFamily = "IPv6" +) + // ServiceSpec describes the attributes that a user creates on a service. type ServiceSpec struct { // The list of ports that are exposed by this service. @@ -3610,13 +3945,24 @@ type ServiceSpec struct { // sessionAffinityConfig contains the configurations of session affinity. // +optional SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"` + + // ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. + // IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is + // available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. + // Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which + // allocate external load-balancers should use the same IP family. Endpoints for this Service will be of + // this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the + // cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment. + // +optional + IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"` } // ServicePort contains information on service's port. type ServicePort struct { // The name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. This maps to - // the 'Name' field in EndpointPort objects. + // All ports within a ServiceSpec must have unique names. When considering + // the endpoints for a Service, this must match the 'name' field in the + // EndpointPort. // Optional if only one ServicePort is defined on this service. // +optional Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` @@ -3659,19 +4005,19 @@ type ServicePort struct { type Service struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of a service. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the service. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -3688,7 +4034,7 @@ const ( type ServiceList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3706,7 +4052,7 @@ type ServiceList struct { type ServiceAccount struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3736,7 +4082,7 @@ type ServiceAccount struct { type ServiceAccountList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3763,7 +4109,7 @@ type ServiceAccountList struct { type Endpoints struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3825,7 +4171,8 @@ type EndpointAddress struct { // EndpointPort is a tuple that describes a single port. type EndpointPort struct { - // The name of this port (corresponds to ServicePort.Name). + // The name of this port. This must match the 'name' field in the + // corresponding ServicePort. // Must be a DNS_LABEL. // Optional only if one port is defined. // +optional @@ -3847,7 +4194,7 @@ type EndpointPort struct { type EndpointsList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3860,6 +4207,14 @@ type NodeSpec struct { // PodCIDR represents the pod IP range assigned to the node. // +optional PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"` + + // podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this + // field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for + // each of IPv4 and IPv6. + // +optional + // +patchStrategy=merge + PodCIDRs []string `json:"podCIDRs,omitempty" protobuf:"bytes,7,opt,name=podCIDRs" patchStrategy:"merge"` + // ID of the node assigned by the cloud provider in the format: :// // +optional ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"` @@ -4041,6 +4396,9 @@ type NodeStatus struct { // List of addresses reachable to the node. // Queried from cloud provider, if available. // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses + // Note: This field is declared as mergeable, but the merge key is not sufficiently + // unique, which can cause data corruption when it is merged. Callers should instead + // use a full-replacement patch. See http://pr.k8s.io/79391 for an example. // +optional // +patchMergeKey=type // +patchStrategy=merge @@ -4140,9 +4498,6 @@ type NodeConditionType string const ( // NodeReady means kubelet is healthy and ready to accept pods. NodeReady NodeConditionType = "Ready" - // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk - // space on the node. - NodeOutOfDisk NodeConditionType = "OutOfDisk" // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. NodeMemoryPressure NodeConditionType = "MemoryPressure" // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. @@ -4233,19 +4588,19 @@ type ResourceList map[ResourceName]resource.Quantity type Node struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of a node. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the node. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -4256,7 +4611,7 @@ type Node struct { type NodeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4287,6 +4642,12 @@ type NamespaceStatus struct { // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"` + + // Represents the latest available observations of a namespace's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []NamespaceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` } type NamespacePhase string @@ -4299,6 +4660,32 @@ const ( NamespaceTerminating NamespacePhase = "Terminating" ) +type NamespaceConditionType string + +// These are valid conditions of a namespace. +const ( + // NamespaceDeletionDiscoveryFailure contains information about namespace deleter errors during resource discovery. + NamespaceDeletionDiscoveryFailure NamespaceConditionType = "NamespaceDeletionDiscoveryFailure" + // NamespaceDeletionContentFailure contains information about namespace deleter errors during deletion of resources. + NamespaceDeletionContentFailure NamespaceConditionType = "NamespaceDeletionContentFailure" + // NamespaceDeletionGVParsingFailure contains information about namespace deleter errors parsing GV for legacy types. + NamespaceDeletionGVParsingFailure NamespaceConditionType = "NamespaceDeletionGroupVersionParsingFailure" +) + +// NamespaceCondition contains details about state of namespace. +type NamespaceCondition struct { + // Type of namespace controller condition. + Type NamespaceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NamespaceConditionType"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + // +genclient // +genclient:nonNamespaced // +genclient:skipVerbs=deleteCollection @@ -4309,17 +4696,17 @@ const ( type Namespace struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of the Namespace. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status describes the current status of a Namespace. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -4330,7 +4717,7 @@ type Namespace struct { type NamespaceList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4346,7 +4733,7 @@ type NamespaceList struct { type Binding struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4354,6 +4741,22 @@ type Binding struct { Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// A list of ephemeral containers used with the Pod ephemeralcontainers subresource. +type EphemeralContainers struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // A list of ephemeral containers associated with this pod. New ephemeral containers + // may be appended to this list, but existing ephemeral containers may not be removed + // or modified. + // +patchMergeKey=name + // +patchStrategy=merge + EphemeralContainers []EphemeralContainer `json:"ephemeralContainers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=ephemeralContainers"` +} + // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. // +k8s:openapi-gen=false type Preconditions struct { @@ -4537,7 +4940,7 @@ type ServiceProxyOptions struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type ObjectReference struct { // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` // Namespace of the referent. @@ -4556,7 +4959,7 @@ type ObjectReference struct { // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"` // Specific resourceVersion to which this reference is made, if any. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` @@ -4631,7 +5034,7 @@ const ( type Event struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` // The object that this event is about. @@ -4701,6 +5104,7 @@ type EventSeries struct { // Time of the last occurrence observed LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty" protobuf:"bytes,2,name=lastObservedTime"` // State of this Series: Ongoing or Finished + // Deprecated. Planned removal for 1.18 State EventSeriesState `json:"state,omitempty" protobuf:"bytes,3,name=state"` } @@ -4718,7 +5122,7 @@ const ( type EventList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4778,12 +5182,12 @@ type LimitRangeSpec struct { type LimitRange struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the limits enforced. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -4794,7 +5198,7 @@ type LimitRange struct { type LimitRangeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4934,17 +5338,17 @@ type ResourceQuotaStatus struct { type ResourceQuota struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the desired quota. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status defines the actual enforced quota and its current usage. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -4955,7 +5359,7 @@ type ResourceQuota struct { type ResourceQuotaList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4972,7 +5376,7 @@ type ResourceQuotaList struct { type Secret struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5089,7 +5493,7 @@ const ( type SecretList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5105,7 +5509,7 @@ type SecretList struct { type ConfigMap struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5134,7 +5538,7 @@ type ConfigMap struct { type ConfigMapList struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5176,7 +5580,7 @@ type ComponentCondition struct { type ComponentStatus struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5193,7 +5597,7 @@ type ComponentStatus struct { type ComponentStatusList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5267,6 +5671,11 @@ type SecurityContext struct { // PodSecurityContext, the value specified in SecurityContext takes precedence. // +optional SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"` + // The Windows specific settings applied to all containers. + // If unspecified, the options from the PodSecurityContext will be used. + // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,10,opt,name=windowsOptions"` // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in PodSecurityContext. If set in both SecurityContext and @@ -5337,13 +5746,36 @@ type SELinuxOptions struct { Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"` } +// WindowsSecurityContextOptions contain Windows-specific options and credentials. +type WindowsSecurityContextOptions struct { + // GMSACredentialSpecName is the name of the GMSA credential spec to use. + // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. + // +optional + GMSACredentialSpecName *string `json:"gmsaCredentialSpecName,omitempty" protobuf:"bytes,1,opt,name=gmsaCredentialSpecName"` + + // GMSACredentialSpec is where the GMSA admission webhook + // (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + // GMSA credential spec named by the GMSACredentialSpecName field. + // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. + // +optional + GMSACredentialSpec *string `json:"gmsaCredentialSpec,omitempty" protobuf:"bytes,2,opt,name=gmsaCredentialSpec"` + + // The UserName in Windows to run the entrypoint of the container process. + // Defaults to the user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag. + // +optional + RunAsUserName *string `json:"runAsUserName,omitempty" protobuf:"bytes,3,opt,name=runAsUserName"` +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // RangeAllocation is not a public type. type RangeAllocation struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 2c5b04f29..35b8389a7 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -108,7 +108,7 @@ func (AzureFileVolumeSource) SwaggerDoc() map[string]string { var map_Binding = map[string]string{ "": "Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "target": "The target object that you want to bind to the standard object.", } @@ -126,6 +126,7 @@ var map_CSIPersistentVolumeSource = map[string]string{ "controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "controllerExpandSecretRef": "ControllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This is an alpha field and requires enabling ExpandCSIVolumes feature gate. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", } func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -157,12 +158,12 @@ func (Capabilities) SwaggerDoc() map[string]string { var map_CephFSPersistentVolumeSource = map[string]string{ "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - "user": "Optional: User is the rados user name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "user": "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", } func (CephFSPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -171,12 +172,12 @@ func (CephFSPersistentVolumeSource) SwaggerDoc() map[string]string { var map_CephFSVolumeSource = map[string]string{ "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - "user": "Optional: User is the rados user name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "user": "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", } func (CephFSVolumeSource) SwaggerDoc() map[string]string { @@ -185,9 +186,9 @@ func (CephFSVolumeSource) SwaggerDoc() map[string]string { var map_CinderPersistentVolumeSource = map[string]string{ "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", - "volumeID": "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "volumeID": "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.", } @@ -197,9 +198,9 @@ func (CinderPersistentVolumeSource) SwaggerDoc() map[string]string { var map_CinderVolumeSource = map[string]string{ "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", - "volumeID": "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "volumeID": "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.", } @@ -230,7 +231,7 @@ func (ComponentCondition) SwaggerDoc() map[string]string { var map_ComponentStatus = map[string]string{ "": "ComponentStatus (and ComponentStatusList) holds the cluster validation info.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "conditions": "List of component conditions observed", } @@ -240,7 +241,7 @@ func (ComponentStatus) SwaggerDoc() map[string]string { var map_ComponentStatusList = map[string]string{ "": "Status of all the conditions for the component as a list of ComponentStatus objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of ComponentStatus objects.", } @@ -250,7 +251,7 @@ func (ComponentStatusList) SwaggerDoc() map[string]string { var map_ConfigMap = map[string]string{ "": "ConfigMap holds configuration data for pods to consume.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "data": "Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process.", "binaryData": "BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet.", } @@ -271,7 +272,7 @@ func (ConfigMapEnvSource) SwaggerDoc() map[string]string { var map_ConfigMapKeySelector = map[string]string{ "": "Selects a key from a ConfigMap.", "key": "The key to select.", - "optional": "Specify whether the ConfigMap or it's key must be defined", + "optional": "Specify whether the ConfigMap or its key must be defined", } func (ConfigMapKeySelector) SwaggerDoc() map[string]string { @@ -280,7 +281,7 @@ func (ConfigMapKeySelector) SwaggerDoc() map[string]string { var map_ConfigMapList = map[string]string{ "": "ConfigMapList is a resource containing a list of ConfigMap objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of ConfigMaps.", } @@ -304,7 +305,7 @@ func (ConfigMapNodeConfigSource) SwaggerDoc() map[string]string { var map_ConfigMapProjection = map[string]string{ "": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.", "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - "optional": "Specify whether the ConfigMap or it's keys must be defined", + "optional": "Specify whether the ConfigMap or its keys must be defined", } func (ConfigMapProjection) SwaggerDoc() map[string]string { @@ -315,7 +316,7 @@ var map_ConfigMapVolumeSource = map[string]string{ "": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.", "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "optional": "Specify whether the ConfigMap or it's keys must be defined", + "optional": "Specify whether the ConfigMap or its keys must be defined", } func (ConfigMapVolumeSource) SwaggerDoc() map[string]string { @@ -337,6 +338,7 @@ var map_Container = map[string]string{ "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is a beta feature.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "startupProbe": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is an alpha feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", @@ -429,6 +431,7 @@ var map_ContainerStatus = map[string]string{ "image": "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images", "imageID": "ImageID of the container's image.", "containerID": "Container's ID in the format 'docker://'.", + "started": "Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined.", } func (ContainerStatus) SwaggerDoc() map[string]string { @@ -499,7 +502,7 @@ func (EndpointAddress) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort is a tuple that describes a single port.", - "name": "The name of this port (corresponds to ServicePort.Name). Must be a DNS_LABEL. Optional only if one port is defined.", + "name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", "port": "The port number of the endpoint.", "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", } @@ -521,7 +524,7 @@ func (EndpointSubset) SwaggerDoc() map[string]string { var map_Endpoints = map[string]string{ "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "subsets": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.", } @@ -531,7 +534,7 @@ func (Endpoints) SwaggerDoc() map[string]string { var map_EndpointsList = map[string]string{ "": "EndpointsList is a list of endpoints.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of endpoints.", } @@ -573,9 +576,57 @@ func (EnvVarSource) SwaggerDoc() map[string]string { return map_EnvVarSource } +var map_EphemeralContainer = map[string]string{ + "": "An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.", + "targetContainerName": "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container is run in whatever namespaces are shared for the pod. Note that the container runtime must support this feature.", +} + +func (EphemeralContainer) SwaggerDoc() map[string]string { + return map_EphemeralContainer +} + +var map_EphemeralContainerCommon = map[string]string{ + "": "EphemeralContainerCommon is a copy of all fields in Container to be inlined in EphemeralContainer. This separate type allows easy conversion from EphemeralContainer to Container and allows separate documentation for the fields of EphemeralContainer. When a new field is added to Container it must be added here as well.", + "name": "Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.", + "image": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images", + "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "ports": "Ports are not allowed for ephemeral containers.", + "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "env": "List of environment variables to set in the container. Cannot be updated.", + "resources": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.", + "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is a beta feature.", + "livenessProbe": "Probes are not allowed for ephemeral containers.", + "readinessProbe": "Probes are not allowed for ephemeral containers.", + "startupProbe": "Probes are not allowed for ephemeral containers.", + "lifecycle": "Lifecycle is not allowed for ephemeral containers.", + "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "securityContext": "SecurityContext is not allowed for ephemeral containers.", + "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", +} + +func (EphemeralContainerCommon) SwaggerDoc() map[string]string { + return map_EphemeralContainerCommon +} + +var map_EphemeralContainers = map[string]string{ + "": "A list of ephemeral containers used with the Pod ephemeralcontainers subresource.", + "ephemeralContainers": "A list of ephemeral containers associated with this pod. New ephemeral containers may be appended to this list, but existing ephemeral containers may not be removed or modified.", +} + +func (EphemeralContainers) SwaggerDoc() map[string]string { + return map_EphemeralContainers +} + var map_Event = map[string]string{ "": "Event is a report of an event somewhere in the cluster.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "involvedObject": "The object that this event is about.", "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", "message": "A human-readable description of the status of this operation.", @@ -598,7 +649,7 @@ func (Event) SwaggerDoc() map[string]string { var map_EventList = map[string]string{ "": "EventList is a list of events.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of events", } @@ -610,7 +661,7 @@ var map_EventSeries = map[string]string{ "": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.", "count": "Number of occurrences in this series up to the last heartbeat time", "lastObservedTime": "Time of the last occurrence observed", - "state": "State of this Series: Ongoing or Finished", + "state": "State of this Series: Ongoing or Finished Deprecated. Planned removal for 1.18", } func (EventSeries) SwaggerDoc() map[string]string { @@ -710,10 +761,10 @@ func (GitRepoVolumeSource) SwaggerDoc() map[string]string { var map_GlusterfsPersistentVolumeSource = map[string]string{ "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "path": "Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "endpointsNamespace": "EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "path": "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "endpointsNamespace": "EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", } func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -722,9 +773,9 @@ func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string { var map_GlusterfsVolumeSource = map[string]string{ "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "path": "Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "path": "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", } func (GlusterfsVolumeSource) SwaggerDoc() map[string]string { @@ -837,7 +888,7 @@ func (KeyToPath) SwaggerDoc() map[string]string { var map_Lifecycle = map[string]string{ "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", - "preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + "preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", } func (Lifecycle) SwaggerDoc() map[string]string { @@ -846,8 +897,8 @@ func (Lifecycle) SwaggerDoc() map[string]string { var map_LimitRange = map[string]string{ "": "LimitRange sets resource usage limits for each kind of resource in a Namespace.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (LimitRange) SwaggerDoc() map[string]string { @@ -870,7 +921,7 @@ func (LimitRangeItem) SwaggerDoc() map[string]string { var map_LimitRangeList = map[string]string{ "": "LimitRangeList is a list of LimitRange items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", } @@ -938,18 +989,28 @@ func (NFSVolumeSource) SwaggerDoc() map[string]string { var map_Namespace = map[string]string{ "": "Namespace provides a scope for Names. Use of multiple namespaces is optional.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Namespace) SwaggerDoc() map[string]string { return map_Namespace } +var map_NamespaceCondition = map[string]string{ + "": "NamespaceCondition contains details about state of namespace.", + "type": "Type of namespace controller condition.", + "status": "Status of the condition, one of True, False, Unknown.", +} + +func (NamespaceCondition) SwaggerDoc() map[string]string { + return map_NamespaceCondition +} + var map_NamespaceList = map[string]string{ "": "NamespaceList is a list of Namespaces.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", } @@ -967,8 +1028,9 @@ func (NamespaceSpec) SwaggerDoc() map[string]string { } var map_NamespaceStatus = map[string]string{ - "": "NamespaceStatus is information about the current status of a Namespace.", - "phase": "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", + "": "NamespaceStatus is information about the current status of a Namespace.", + "phase": "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", + "conditions": "Represents the latest available observations of a namespace's current state.", } func (NamespaceStatus) SwaggerDoc() map[string]string { @@ -977,9 +1039,9 @@ func (NamespaceStatus) SwaggerDoc() map[string]string { var map_Node = map[string]string{ "": "Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Node) SwaggerDoc() map[string]string { @@ -1052,7 +1114,7 @@ func (NodeDaemonEndpoints) SwaggerDoc() map[string]string { var map_NodeList = map[string]string{ "": "NodeList is the whole list of all Nodes which have been registered with master.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of nodes", } @@ -1111,6 +1173,7 @@ func (NodeSelectorTerm) SwaggerDoc() map[string]string { var map_NodeSpec = map[string]string{ "": "NodeSpec describes the attributes that a node is created with.", "podCIDR": "PodCIDR represents the pod IP range assigned to the node.", + "podCIDRs": "podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for each of IPv4 and IPv6.", "providerID": "ID of the node assigned by the cloud provider in the format: ://", "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration", "taints": "If specified, the node's taints.", @@ -1128,7 +1191,7 @@ var map_NodeStatus = map[string]string{ "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", - "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses", + "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See http://pr.k8s.io/79391 for an example.", "daemonEndpoints": "Endpoints of daemons running on the Node.", "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", "images": "List of container images on this node", @@ -1171,12 +1234,12 @@ func (ObjectFieldSelector) SwaggerDoc() map[string]string { var map_ObjectReference = map[string]string{ "": "ObjectReference contains enough information to let you inspect or modify the referred object.", - "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "namespace": "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", "name": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "uid": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids", "apiVersion": "API version of the referent.", - "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", + "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", "fieldPath": "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.", } @@ -1186,7 +1249,7 @@ func (ObjectReference) SwaggerDoc() map[string]string { var map_PersistentVolume = map[string]string{ "": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", "status": "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", } @@ -1197,7 +1260,7 @@ func (PersistentVolume) SwaggerDoc() map[string]string { var map_PersistentVolumeClaim = map[string]string{ "": "PersistentVolumeClaim is a user's request for and claim to a persistent volume", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", "status": "Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", } @@ -1220,7 +1283,7 @@ func (PersistentVolumeClaimCondition) SwaggerDoc() map[string]string { var map_PersistentVolumeClaimList = map[string]string{ "": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "A list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", } @@ -1267,7 +1330,7 @@ func (PersistentVolumeClaimVolumeSource) SwaggerDoc() map[string]string { var map_PersistentVolumeList = map[string]string{ "": "PersistentVolumeList is a list of PersistentVolume items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", } @@ -1280,11 +1343,11 @@ var map_PersistentVolumeSource = map[string]string{ "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", "hostPath": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", + "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md", "nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", @@ -1297,7 +1360,7 @@ var map_PersistentVolumeSource = map[string]string{ "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", "local": "Local represents directly-attached storage with node affinity", - "storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md", + "storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md", "csi": "CSI represents storage that is handled by an external CSI driver (Beta feature).", } @@ -1344,9 +1407,9 @@ func (PhotonPersistentDiskVolumeSource) SwaggerDoc() map[string]string { var map_Pod = map[string]string{ "": "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Pod) SwaggerDoc() map[string]string { @@ -1445,10 +1508,19 @@ func (PodExecOptions) SwaggerDoc() map[string]string { return map_PodExecOptions } +var map_PodIP = map[string]string{ + "": "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n IP: An IP address allocated to the pod. Routable at least within the cluster.", + "ip": "ip is an IP address (IPv4 or IPv6) assigned to the pod", +} + +func (PodIP) SwaggerDoc() map[string]string { + return map_PodIP +} + var map_PodList = map[string]string{ "": "PodList is a list of Pods.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of pods. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of pods. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md", } func (PodList) SwaggerDoc() map[string]string { @@ -1501,6 +1573,7 @@ func (PodReadinessGate) SwaggerDoc() map[string]string { var map_PodSecurityContext = map[string]string{ "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", @@ -1525,8 +1598,9 @@ func (PodSignature) SwaggerDoc() map[string]string { var map_PodSpec = map[string]string{ "": "PodSpec is a description of a pod.", "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", - "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", + "ephemeralContainers": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.", "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", @@ -1552,8 +1626,11 @@ var map_PodSpec = map[string]string{ "priority": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", "dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md", - "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is an alpha feature and may change in the future.", + "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.", "enableServiceLinks": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", + "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.", + "topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread feature. All topologySpreadConstraints are ANDed.", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1561,18 +1638,20 @@ func (PodSpec) SwaggerDoc() map[string]string { } var map_PodStatus = map[string]string{ - "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", - "phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", - "conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", - "message": "A human readable message indicating details about why the pod is in this condition.", - "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", - "nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", - "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", - "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", - "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", - "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", + "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", + "phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", + "conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + "message": "A human readable message indicating details about why the pod is in this condition.", + "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", + "nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", + "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", + "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", + "podIPs": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.", + "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", + "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", + "ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod. This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature.", } func (PodStatus) SwaggerDoc() map[string]string { @@ -1581,8 +1660,8 @@ func (PodStatus) SwaggerDoc() map[string]string { var map_PodStatusResult = map[string]string{ "": "PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (PodStatusResult) SwaggerDoc() map[string]string { @@ -1591,8 +1670,8 @@ func (PodStatusResult) SwaggerDoc() map[string]string { var map_PodTemplate = map[string]string{ "": "PodTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "template": "Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "template": "Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (PodTemplate) SwaggerDoc() map[string]string { @@ -1601,7 +1680,7 @@ func (PodTemplate) SwaggerDoc() map[string]string { var map_PodTemplateList = map[string]string{ "": "PodTemplateList is a list of PodTemplates.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of pod templates", } @@ -1611,8 +1690,8 @@ func (PodTemplateList) SwaggerDoc() map[string]string { var map_PodTemplateSpec = map[string]string{ "": "PodTemplateSpec describes the data a pod should have when created from a template", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (PodTemplateSpec) SwaggerDoc() map[string]string { @@ -1666,7 +1745,7 @@ var map_Probe = map[string]string{ "initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", - "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.", + "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", "failureThreshold": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", } @@ -1700,14 +1779,14 @@ func (QuobyteVolumeSource) SwaggerDoc() map[string]string { var map_RBDPersistentVolumeSource = map[string]string{ "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", - "monitors": "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "image": "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "monitors": "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "image": "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", - "pool": "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "user": "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "pool": "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "user": "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", } func (RBDPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -1716,14 +1795,14 @@ func (RBDPersistentVolumeSource) SwaggerDoc() map[string]string { var map_RBDVolumeSource = map[string]string{ "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", - "monitors": "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "image": "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "monitors": "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "image": "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", - "pool": "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "user": "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "pool": "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "user": "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", } func (RBDVolumeSource) SwaggerDoc() map[string]string { @@ -1732,7 +1811,7 @@ func (RBDVolumeSource) SwaggerDoc() map[string]string { var map_RangeAllocation = map[string]string{ "": "RangeAllocation is not a public type.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "range": "Range is string that identifies the range represented by 'data'.", "data": "Data is a bit array containing all allocated addresses in the previous segment.", } @@ -1743,9 +1822,9 @@ func (RangeAllocation) SwaggerDoc() map[string]string { var map_ReplicationController = map[string]string{ "": "ReplicationController represents the configuration of a replication controller.", - "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (ReplicationController) SwaggerDoc() map[string]string { @@ -1767,7 +1846,7 @@ func (ReplicationControllerCondition) SwaggerDoc() map[string]string { var map_ReplicationControllerList = map[string]string{ "": "ReplicationControllerList is a collection of replication controllers.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", } @@ -1814,9 +1893,9 @@ func (ResourceFieldSelector) SwaggerDoc() map[string]string { var map_ResourceQuota = map[string]string{ "": "ResourceQuota sets aggregate quota restrictions enforced per namespace", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (ResourceQuota) SwaggerDoc() map[string]string { @@ -1825,7 +1904,7 @@ func (ResourceQuota) SwaggerDoc() map[string]string { var map_ResourceQuotaList = map[string]string{ "": "ResourceQuotaList is a list of ResourceQuota items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", } @@ -1934,7 +2013,7 @@ func (ScopedResourceSelectorRequirement) SwaggerDoc() map[string]string { var map_Secret = map[string]string{ "": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "data": "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", "stringData": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.", "type": "Used to facilitate programmatic handling of secret data.", @@ -1956,7 +2035,7 @@ func (SecretEnvSource) SwaggerDoc() map[string]string { var map_SecretKeySelector = map[string]string{ "": "SecretKeySelector selects a key of a Secret.", "key": "The key of the secret to select from. Must be a valid secret key.", - "optional": "Specify whether the Secret or it's key must be defined", + "optional": "Specify whether the Secret or its key must be defined", } func (SecretKeySelector) SwaggerDoc() map[string]string { @@ -1965,7 +2044,7 @@ func (SecretKeySelector) SwaggerDoc() map[string]string { var map_SecretList = map[string]string{ "": "SecretList is a list of Secret.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "Items is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret", } @@ -1998,7 +2077,7 @@ var map_SecretVolumeSource = map[string]string{ "secretName": "Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "optional": "Specify whether the Secret or it's keys must be defined", + "optional": "Specify whether the Secret or its keys must be defined", } func (SecretVolumeSource) SwaggerDoc() map[string]string { @@ -2010,6 +2089,7 @@ var map_SecurityContext = map[string]string{ "capabilities": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.", "privileged": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.", "seLinuxOptions": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", @@ -2033,9 +2113,9 @@ func (SerializedReference) SwaggerDoc() map[string]string { var map_Service = map[string]string{ "": "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Service) SwaggerDoc() map[string]string { @@ -2044,7 +2124,7 @@ func (Service) SwaggerDoc() map[string]string { var map_ServiceAccount = map[string]string{ "": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret", "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod", "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.", @@ -2056,7 +2136,7 @@ func (ServiceAccount) SwaggerDoc() map[string]string { var map_ServiceAccountList = map[string]string{ "": "ServiceAccountList is a list of ServiceAccount objects", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", } @@ -2077,7 +2157,7 @@ func (ServiceAccountTokenProjection) SwaggerDoc() map[string]string { var map_ServiceList = map[string]string{ "": "ServiceList holds a list of services.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of services", } @@ -2087,7 +2167,7 @@ func (ServiceList) SwaggerDoc() map[string]string { var map_ServicePort = map[string]string{ "": "ServicePort contains information on service's port.", - "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.", + "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.", "protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", "port": "The port that will be exposed by this service.", "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", @@ -2122,6 +2202,7 @@ var map_ServiceSpec = map[string]string{ "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.", "publishNotReadyAddresses": "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery.", "sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.", + "ipFamily": "ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which allocate external load-balancers should use the same IP family. Endpoints for this Service will be of this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.", } func (ServiceSpec) SwaggerDoc() map[string]string { @@ -2236,6 +2317,18 @@ func (TopologySelectorTerm) SwaggerDoc() map[string]string { return map_TopologySelectorTerm } +var map_TopologySpreadConstraint = map[string]string{ + "": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.", + "maxSkew": "MaxSkew describes the degree to which pods may be unevenly distributed. It's the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: ", + "topologyKey": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field.", + "whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It's considered as \"Unsatisfiable\" if and only if placing incoming pod on any topology violates \"MaxSkew\". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ", + "labelSelector": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.", +} + +func (TopologySpreadConstraint) SwaggerDoc() map[string]string { + return map_TopologySpreadConstraint +} + var map_TypedLocalObjectReference = map[string]string{ "": "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.", "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", @@ -2273,7 +2366,7 @@ var map_VolumeMount = map[string]string{ "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", "mountPropagation": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.", - "subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is alpha in 1.14.", + "subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.", } func (VolumeMount) SwaggerDoc() map[string]string { @@ -2310,12 +2403,12 @@ var map_VolumeSource = map[string]string{ "gitRepo": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "secret": "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", "nfs": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md", - "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", + "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", @@ -2359,4 +2452,15 @@ func (WeightedPodAffinityTerm) SwaggerDoc() map[string]string { return map_WeightedPodAffinityTerm } +var map_WindowsSecurityContextOptions = map[string]string{ + "": "WindowsSecurityContextOptions contain Windows-specific options and credentials.", + "gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.", + "gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.", + "runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag.", +} + +func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string { + return map_WindowsSecurityContextOptions +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/core/v1/well_known_labels.go b/vendor/k8s.io/api/core/v1/well_known_labels.go index 4497760d3..3287fb51f 100644 --- a/vendor/k8s.io/api/core/v1/well_known_labels.go +++ b/vendor/k8s.io/api/core/v1/well_known_labels.go @@ -33,4 +33,10 @@ const ( // LabelNamespaceNodeRestriction is a forbidden label namespace that kubelets may not self-set when the NodeRestriction admission plugin is enabled LabelNamespaceNodeRestriction = "node-restriction.kubernetes.io" + + // IsHeadlessService is added by Controller to an Endpoint denoting if its parent + // Service is Headless. The existence of this label can be used further by other + // controllers and kube-proxy to check if the Endpoint objects should be replicated when + // using Headless Services + IsHeadlessService = "service.kubernetes.io/headless" ) diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 9a580c079..fd47019c0 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -237,6 +237,11 @@ func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource *out = new(SecretReference) **out = **in } + if in.ControllerExpandSecretRef != nil { + in, out := &in.ControllerExpandSecretRef, &out.ControllerExpandSecretRef + *out = new(SecretReference) + **out = **in + } return } @@ -480,7 +485,7 @@ func (in *ComponentStatus) DeepCopyObject() runtime.Object { func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ComponentStatus, len(*in)) @@ -605,7 +610,7 @@ func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector { func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ConfigMap, len(*in)) @@ -768,6 +773,11 @@ func (in *Container) DeepCopyInto(out *Container) { *out = new(Probe) (*in).DeepCopyInto(*out) } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } if in.Lifecycle != nil { in, out := &in.Lifecycle, &out.Lifecycle *out = new(Lifecycle) @@ -915,6 +925,11 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { *out = *in in.State.DeepCopyInto(&out.State) in.LastTerminationState.DeepCopyInto(&out.LastTerminationState) + if in.Started != nil { + in, out := &in.Started, &out.Started + *out = new(bool) + **out = **in + } return } @@ -1161,7 +1176,7 @@ func (in *Endpoints) DeepCopyObject() runtime.Object { func (in *EndpointsList) DeepCopyInto(out *EndpointsList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Endpoints, len(*in)) @@ -1273,6 +1288,139 @@ func (in *EnvVarSource) DeepCopy() *EnvVarSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralContainer) DeepCopyInto(out *EphemeralContainer) { + *out = *in + in.EphemeralContainerCommon.DeepCopyInto(&out.EphemeralContainerCommon) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainer. +func (in *EphemeralContainer) DeepCopy() *EphemeralContainer { + if in == nil { + return nil + } + out := new(EphemeralContainer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(Lifecycle) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(SecurityContext) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainerCommon. +func (in *EphemeralContainerCommon) DeepCopy() *EphemeralContainerCommon { + if in == nil { + return nil + } + out := new(EphemeralContainerCommon) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralContainers) DeepCopyInto(out *EphemeralContainers) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.EphemeralContainers != nil { + in, out := &in.EphemeralContainers, &out.EphemeralContainers + *out = make([]EphemeralContainer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainers. +func (in *EphemeralContainers) DeepCopy() *EphemeralContainers { + if in == nil { + return nil + } + out := new(EphemeralContainers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EphemeralContainers) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Event) DeepCopyInto(out *Event) { *out = *in @@ -1318,7 +1466,7 @@ func (in *Event) DeepCopyObject() runtime.Object { func (in *EventList) DeepCopyInto(out *EventList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Event, len(*in)) @@ -1875,7 +2023,7 @@ func (in *LimitRangeItem) DeepCopy() *LimitRangeItem { func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]LimitRange, len(*in)) @@ -1931,7 +2079,7 @@ func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { func (in *List) DeepCopyInto(out *List) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]runtime.RawExtension, len(*in)) @@ -2056,7 +2204,7 @@ func (in *Namespace) DeepCopyInto(out *Namespace) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } @@ -2078,11 +2226,28 @@ func (in *Namespace) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceCondition) DeepCopyInto(out *NamespaceCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceCondition. +func (in *NamespaceCondition) DeepCopy() *NamespaceCondition { + if in == nil { + return nil + } + out := new(NamespaceCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamespaceList) DeepCopyInto(out *NamespaceList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Namespace, len(*in)) @@ -2135,6 +2300,13 @@ func (in *NamespaceSpec) DeepCopy() *NamespaceSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) { *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NamespaceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -2311,7 +2483,7 @@ func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { func (in *NodeList) DeepCopyInto(out *NodeList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Node, len(*in)) @@ -2465,6 +2637,11 @@ func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { *out = *in + if in.PodCIDRs != nil { + in, out := &in.PodCIDRs, &out.PodCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.Taints != nil { in, out := &in.Taints, &out.Taints *out = make([]Taint, len(*in)) @@ -2690,7 +2867,7 @@ func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondi func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PersistentVolumeClaim, len(*in)) @@ -2816,7 +2993,7 @@ func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVo func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PersistentVolume, len(*in)) @@ -3293,11 +3470,27 @@ func (in *PodExecOptions) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodIP) DeepCopyInto(out *PodIP) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodIP. +func (in *PodIP) DeepCopy() *PodIP { + if in == nil { + return nil + } + out := new(PodIP) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodList) DeepCopyInto(out *PodList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Pod, len(*in)) @@ -3449,6 +3642,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { *out = new(SELinuxOptions) **out = **in } + if in.WindowsOptions != nil { + in, out := &in.WindowsOptions, &out.WindowsOptions + *out = new(WindowsSecurityContextOptions) + (*in).DeepCopyInto(*out) + } if in.RunAsUser != nil { in, out := &in.RunAsUser, &out.RunAsUser *out = new(int64) @@ -3537,6 +3735,13 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.EphemeralContainers != nil { + in, out := &in.EphemeralContainers, &out.EphemeralContainers + *out = make([]EphemeralContainer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.TerminationGracePeriodSeconds != nil { in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds *out = new(int64) @@ -3618,6 +3823,25 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { *out = new(bool) **out = **in } + if in.PreemptionPolicy != nil { + in, out := &in.PreemptionPolicy, &out.PreemptionPolicy + *out = new(PreemptionPolicy) + **out = **in + } + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -3641,6 +3865,11 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.PodIPs != nil { + in, out := &in.PodIPs, &out.PodIPs + *out = make([]PodIP, len(*in)) + copy(*out, *in) + } if in.StartTime != nil { in, out := &in.StartTime, &out.StartTime *out = (*in).DeepCopy() @@ -3659,6 +3888,13 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.EphemeralContainerStatuses != nil { + in, out := &in.EphemeralContainerStatuses, &out.EphemeralContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -3730,7 +3966,7 @@ func (in *PodTemplate) DeepCopyObject() runtime.Object { func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodTemplate, len(*in)) @@ -4042,7 +4278,7 @@ func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondi func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicationController, len(*in)) @@ -4198,7 +4434,7 @@ func (in *ResourceQuota) DeepCopyObject() runtime.Object { func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ResourceQuota, len(*in)) @@ -4518,7 +4754,7 @@ func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { func (in *SecretList) DeepCopyInto(out *SecretList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Secret, len(*in)) @@ -4643,6 +4879,11 @@ func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { *out = new(SELinuxOptions) **out = **in } + if in.WindowsOptions != nil { + in, out := &in.WindowsOptions, &out.WindowsOptions + *out = new(WindowsSecurityContextOptions) + (*in).DeepCopyInto(*out) + } if in.RunAsUser != nil { in, out := &in.RunAsUser, &out.RunAsUser *out = new(int64) @@ -4785,7 +5026,7 @@ func (in *ServiceAccount) DeepCopyObject() runtime.Object { func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ServiceAccount, len(*in)) @@ -4839,7 +5080,7 @@ func (in *ServiceAccountTokenProjection) DeepCopy() *ServiceAccountTokenProjecti func (in *ServiceList) DeepCopyInto(out *ServiceList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Service, len(*in)) @@ -4940,6 +5181,11 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { *out = new(SessionAffinityConfig) (*in).DeepCopyInto(*out) } + if in.IPFamily != nil { + in, out := &in.IPFamily, &out.IPFamily + *out = new(IPFamily) + **out = **in + } return } @@ -5151,6 +5397,27 @@ func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySpreadConstraint. +func (in *TopologySpreadConstraint) DeepCopy() *TopologySpreadConstraint { + if in == nil { + return nil + } + out := new(TopologySpreadConstraint) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) { *out = *in @@ -5471,3 +5738,34 @@ func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsSecurityContextOptions) DeepCopyInto(out *WindowsSecurityContextOptions) { + *out = *in + if in.GMSACredentialSpecName != nil { + in, out := &in.GMSACredentialSpecName, &out.GMSACredentialSpecName + *out = new(string) + **out = **in + } + if in.GMSACredentialSpec != nil { + in, out := &in.GMSACredentialSpec, &out.GMSACredentialSpec + *out = new(string) + **out = **in + } + if in.RunAsUserName != nil { + in, out := &in.RunAsUserName, &out.RunAsUserName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsSecurityContextOptions. +func (in *WindowsSecurityContextOptions) DeepCopy() *WindowsSecurityContextOptions { + if in == nil { + return nil + } + out := new(WindowsSecurityContextOptions) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/discovery/v1alpha1/doc.go b/vendor/k8s.io/api/discovery/v1alpha1/doc.go new file mode 100644 index 000000000..ffd6b0b54 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +groupName=discovery.k8s.io + +package v1alpha1 // import "k8s.io/api/discovery/v1alpha1" diff --git a/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go b/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go new file mode 100644 index 000000000..34f6b28d7 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go @@ -0,0 +1,1689 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Endpoint) Reset() { *m = Endpoint{} } +func (*Endpoint) ProtoMessage() {} +func (*Endpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_772f83c5b34e07a5, []int{0} +} +func (m *Endpoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Endpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_Endpoint.Merge(m, src) +} +func (m *Endpoint) XXX_Size() int { + return m.Size() +} +func (m *Endpoint) XXX_DiscardUnknown() { + xxx_messageInfo_Endpoint.DiscardUnknown(m) +} + +var xxx_messageInfo_Endpoint proto.InternalMessageInfo + +func (m *EndpointConditions) Reset() { *m = EndpointConditions{} } +func (*EndpointConditions) ProtoMessage() {} +func (*EndpointConditions) Descriptor() ([]byte, []int) { + return fileDescriptor_772f83c5b34e07a5, []int{1} +} +func (m *EndpointConditions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointConditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointConditions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointConditions.Merge(m, src) +} +func (m *EndpointConditions) XXX_Size() int { + return m.Size() +} +func (m *EndpointConditions) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointConditions.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointConditions proto.InternalMessageInfo + +func (m *EndpointPort) Reset() { *m = EndpointPort{} } +func (*EndpointPort) ProtoMessage() {} +func (*EndpointPort) Descriptor() ([]byte, []int) { + return fileDescriptor_772f83c5b34e07a5, []int{2} +} +func (m *EndpointPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointPort.Merge(m, src) +} +func (m *EndpointPort) XXX_Size() int { + return m.Size() +} +func (m *EndpointPort) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointPort.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointPort proto.InternalMessageInfo + +func (m *EndpointSlice) Reset() { *m = EndpointSlice{} } +func (*EndpointSlice) ProtoMessage() {} +func (*EndpointSlice) Descriptor() ([]byte, []int) { + return fileDescriptor_772f83c5b34e07a5, []int{3} +} +func (m *EndpointSlice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointSlice) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointSlice.Merge(m, src) +} +func (m *EndpointSlice) XXX_Size() int { + return m.Size() +} +func (m *EndpointSlice) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointSlice.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointSlice proto.InternalMessageInfo + +func (m *EndpointSliceList) Reset() { *m = EndpointSliceList{} } +func (*EndpointSliceList) ProtoMessage() {} +func (*EndpointSliceList) Descriptor() ([]byte, []int) { + return fileDescriptor_772f83c5b34e07a5, []int{4} +} +func (m *EndpointSliceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointSliceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointSliceList.Merge(m, src) +} +func (m *EndpointSliceList) XXX_Size() int { + return m.Size() +} +func (m *EndpointSliceList) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointSliceList.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Endpoint)(nil), "k8s.io.api.discovery.v1alpha1.Endpoint") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.discovery.v1alpha1.Endpoint.TopologyEntry") + proto.RegisterType((*EndpointConditions)(nil), "k8s.io.api.discovery.v1alpha1.EndpointConditions") + proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.discovery.v1alpha1.EndpointPort") + proto.RegisterType((*EndpointSlice)(nil), "k8s.io.api.discovery.v1alpha1.EndpointSlice") + proto.RegisterType((*EndpointSliceList)(nil), "k8s.io.api.discovery.v1alpha1.EndpointSliceList") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1alpha1/generated.proto", fileDescriptor_772f83c5b34e07a5) +} + +var fileDescriptor_772f83c5b34e07a5 = []byte{ + // 728 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4b, 0x6f, 0xd3, 0x4a, + 0x14, 0x8e, 0x9b, 0x5a, 0xb2, 0x27, 0x8d, 0xd4, 0x8e, 0xee, 0x22, 0xca, 0xbd, 0xd7, 0x8e, 0xc2, + 0x82, 0x48, 0x85, 0x31, 0xa9, 0x28, 0xaa, 0x60, 0x43, 0x8d, 0xca, 0x43, 0xe2, 0x11, 0x86, 0x2e, + 0x10, 0x62, 0xc1, 0xc4, 0x9e, 0x3a, 0x26, 0x89, 0xc7, 0xb2, 0x27, 0x91, 0xb2, 0xe3, 0x27, 0x20, + 0xf1, 0x77, 0x58, 0xb2, 0xe8, 0xb2, 0xcb, 0xae, 0x0c, 0x35, 0xff, 0xa2, 0x2b, 0x34, 0xe3, 0x57, + 0x4a, 0x78, 0x64, 0x37, 0xe7, 0x9b, 0xf3, 0x7d, 0xe7, 0x9c, 0x6f, 0xce, 0x80, 0x87, 0xe3, 0x83, + 0x18, 0xf9, 0xcc, 0x1a, 0xcf, 0x86, 0x34, 0x0a, 0x28, 0xa7, 0xb1, 0x35, 0xa7, 0x81, 0xcb, 0x22, + 0x2b, 0xbf, 0x20, 0xa1, 0x6f, 0xb9, 0x7e, 0xec, 0xb0, 0x39, 0x8d, 0x16, 0xd6, 0xbc, 0x4f, 0x26, + 0xe1, 0x88, 0xf4, 0x2d, 0x8f, 0x06, 0x34, 0x22, 0x9c, 0xba, 0x28, 0x8c, 0x18, 0x67, 0xf0, 0xff, + 0x2c, 0x1d, 0x91, 0xd0, 0x47, 0x65, 0x3a, 0x2a, 0xd2, 0xdb, 0x37, 0x3d, 0x9f, 0x8f, 0x66, 0x43, + 0xe4, 0xb0, 0xa9, 0xe5, 0x31, 0x8f, 0x59, 0x92, 0x35, 0x9c, 0x9d, 0xc8, 0x48, 0x06, 0xf2, 0x94, + 0xa9, 0xb5, 0xbb, 0x4b, 0xc5, 0x1d, 0x16, 0x51, 0x6b, 0xbe, 0x52, 0xb1, 0x7d, 0xbb, 0xca, 0x99, + 0x12, 0x67, 0xe4, 0x07, 0xa2, 0xbf, 0x70, 0xec, 0x09, 0x20, 0xb6, 0xa6, 0x94, 0x93, 0x5f, 0xb1, + 0xac, 0xdf, 0xb1, 0xa2, 0x59, 0xc0, 0xfd, 0x29, 0x5d, 0x21, 0xdc, 0xf9, 0x1b, 0x21, 0x76, 0x46, + 0x74, 0x4a, 0x7e, 0xe6, 0x75, 0x3f, 0xd7, 0x81, 0x76, 0x14, 0xb8, 0x21, 0xf3, 0x03, 0x0e, 0x77, + 0x81, 0x4e, 0x5c, 0x37, 0xa2, 0x71, 0x4c, 0xe3, 0x96, 0xd2, 0xa9, 0xf7, 0x74, 0xbb, 0x99, 0x26, + 0xa6, 0x7e, 0x58, 0x80, 0xb8, 0xba, 0x87, 0x14, 0x00, 0x87, 0x05, 0xae, 0xcf, 0x7d, 0x16, 0xc4, + 0xad, 0x8d, 0x8e, 0xd2, 0x6b, 0xec, 0xf5, 0xd1, 0x1f, 0xfd, 0x45, 0x45, 0xa5, 0x07, 0x25, 0xd1, + 0x86, 0xa7, 0x89, 0x59, 0x4b, 0x13, 0x13, 0x54, 0x18, 0x5e, 0x12, 0x86, 0x3d, 0xa0, 0x8d, 0x58, + 0xcc, 0x03, 0x32, 0xa5, 0xad, 0x7a, 0x47, 0xe9, 0xe9, 0xf6, 0x56, 0x9a, 0x98, 0xda, 0xe3, 0x1c, + 0xc3, 0xe5, 0x2d, 0x1c, 0x00, 0x9d, 0x93, 0xc8, 0xa3, 0x1c, 0xd3, 0x93, 0xd6, 0xa6, 0xec, 0xe7, + 0xda, 0x72, 0x3f, 0xe2, 0x85, 0xd0, 0xbc, 0x8f, 0x5e, 0x0c, 0xdf, 0x53, 0x47, 0x24, 0xd1, 0x88, + 0x06, 0x0e, 0xcd, 0x46, 0x3c, 0x2e, 0x98, 0xb8, 0x12, 0x81, 0x0e, 0xd0, 0x38, 0x0b, 0xd9, 0x84, + 0x79, 0x8b, 0x96, 0xda, 0xa9, 0xf7, 0x1a, 0x7b, 0xfb, 0x6b, 0x0e, 0x88, 0x8e, 0x73, 0xde, 0x51, + 0xc0, 0xa3, 0x85, 0xbd, 0x9d, 0x0f, 0xa9, 0x15, 0x30, 0x2e, 0x85, 0xdb, 0xf7, 0x40, 0xf3, 0x4a, + 0x32, 0xdc, 0x06, 0xf5, 0x31, 0x5d, 0xb4, 0x14, 0x31, 0x2c, 0x16, 0x47, 0xf8, 0x0f, 0x50, 0xe7, + 0x64, 0x32, 0xa3, 0xd2, 0x65, 0x1d, 0x67, 0xc1, 0xdd, 0x8d, 0x03, 0xa5, 0xbb, 0x0f, 0xe0, 0xaa, + 0xa7, 0xd0, 0x04, 0x6a, 0x44, 0x89, 0x9b, 0x69, 0x68, 0xb6, 0x9e, 0x26, 0xa6, 0x8a, 0x05, 0x80, + 0x33, 0xbc, 0xfb, 0x49, 0x01, 0x5b, 0x05, 0x6f, 0xc0, 0x22, 0x0e, 0xff, 0x03, 0x9b, 0xd2, 0x61, + 0x59, 0xd4, 0xd6, 0xd2, 0xc4, 0xdc, 0x7c, 0x2e, 0xdc, 0x95, 0x28, 0x7c, 0x04, 0x34, 0xb9, 0x2d, + 0x0e, 0x9b, 0x64, 0x2d, 0xd8, 0xbb, 0x62, 0x98, 0x41, 0x8e, 0x5d, 0x26, 0xe6, 0xbf, 0xab, 0x3f, + 0x01, 0x15, 0xd7, 0xb8, 0x24, 0x8b, 0x32, 0x21, 0x8b, 0xb8, 0x7c, 0x48, 0x35, 0x2b, 0x23, 0xca, + 0x63, 0x89, 0x76, 0xbf, 0x6e, 0x80, 0x66, 0xd1, 0xd5, 0xab, 0x89, 0xef, 0x50, 0xf8, 0x0e, 0x68, + 0xe2, 0x87, 0xb8, 0x84, 0x13, 0xd9, 0x5a, 0x63, 0xef, 0xd6, 0xd2, 0x03, 0x94, 0x8b, 0x8e, 0xc2, + 0xb1, 0x27, 0x80, 0x18, 0x89, 0xec, 0xea, 0x8d, 0x9f, 0x51, 0x4e, 0xaa, 0x05, 0xab, 0x30, 0x5c, + 0xaa, 0xc2, 0xfb, 0xa0, 0x91, 0xaf, 0xf4, 0xf1, 0x22, 0xa4, 0x72, 0x6d, 0x74, 0xdb, 0x48, 0x13, + 0xb3, 0x71, 0x58, 0xc1, 0x97, 0x57, 0x43, 0xbc, 0x4c, 0x81, 0xaf, 0x81, 0x4e, 0xf3, 0xa6, 0xc5, + 0x37, 0x10, 0x5b, 0x72, 0x7d, 0xcd, 0x2d, 0xb1, 0x77, 0xf2, 0xde, 0xf4, 0x02, 0x89, 0x71, 0x25, + 0x06, 0x07, 0x40, 0x15, 0xbe, 0xc4, 0xad, 0xba, 0x54, 0xdd, 0x5d, 0x53, 0x55, 0x38, 0x6a, 0x37, + 0x73, 0x65, 0x55, 0x44, 0x31, 0xce, 0x84, 0xba, 0x5f, 0x14, 0xb0, 0x73, 0xc5, 0xe1, 0xa7, 0x7e, + 0xcc, 0xe1, 0xdb, 0x15, 0x97, 0xd1, 0x7a, 0x2e, 0x0b, 0xb6, 0xf4, 0xb8, 0xdc, 0xef, 0x02, 0x59, + 0x72, 0xf8, 0x25, 0x50, 0x7d, 0x4e, 0xa7, 0x85, 0x37, 0x37, 0xd6, 0x9c, 0x42, 0xb6, 0x57, 0x8d, + 0xf1, 0x44, 0x48, 0xe0, 0x4c, 0xc9, 0x46, 0xa7, 0x17, 0x46, 0xed, 0xec, 0xc2, 0xa8, 0x9d, 0x5f, + 0x18, 0xb5, 0x0f, 0xa9, 0xa1, 0x9c, 0xa6, 0x86, 0x72, 0x96, 0x1a, 0xca, 0x79, 0x6a, 0x28, 0xdf, + 0x52, 0x43, 0xf9, 0xf8, 0xdd, 0xa8, 0xbd, 0xd1, 0x0a, 0xcd, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x04, 0x9d, 0x1a, 0x33, 0x3e, 0x06, 0x00, 0x00, +} + +func (m *Endpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Topology) > 0 { + keysForTopology := make([]string, 0, len(m.Topology)) + for k := range m.Topology { + keysForTopology = append(keysForTopology, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTopology) + for iNdEx := len(keysForTopology) - 1; iNdEx >= 0; iNdEx-- { + v := m.Topology[string(keysForTopology[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForTopology[iNdEx]) + copy(dAtA[i:], keysForTopology[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForTopology[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if m.TargetRef != nil { + { + size, err := m.TargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Hostname != nil { + i -= len(*m.Hostname) + copy(dAtA[i:], *m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Hostname))) + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Conditions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Addresses[iNdEx]) + copy(dAtA[i:], m.Addresses[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Addresses[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *EndpointConditions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointConditions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointConditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Ready != nil { + i-- + if *m.Ready { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EndpointPort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x18 + } + if m.Protocol != nil { + i -= len(*m.Protocol) + copy(dAtA[i:], *m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i-- + dAtA[i] = 0x12 + } + if m.Name != nil { + i -= len(*m.Name) + copy(dAtA[i:], *m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EndpointSlice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSlice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AddressType != nil { + i -= len(*m.AddressType) + copy(dAtA[i:], *m.AddressType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AddressType))) + i-- + dAtA[i] = 0x22 + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Endpoints) > 0 { + for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Endpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EndpointSliceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSliceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Endpoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Conditions.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Hostname != nil { + l = len(*m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TargetRef != nil { + l = m.TargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Topology) > 0 { + for k, v := range m.Topology { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *EndpointConditions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Ready != nil { + n += 2 + } + return n +} + +func (m *EndpointPort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func (m *EndpointSlice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Endpoints) > 0 { + for _, e := range m.Endpoints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AddressType != nil { + l = len(*m.AddressType) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EndpointSliceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Endpoint) String() string { + if this == nil { + return "nil" + } + keysForTopology := make([]string, 0, len(this.Topology)) + for k := range this.Topology { + keysForTopology = append(keysForTopology, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTopology) + mapStringForTopology := "map[string]string{" + for _, k := range keysForTopology { + mapStringForTopology += fmt.Sprintf("%v: %v,", k, this.Topology[k]) + } + mapStringForTopology += "}" + s := strings.Join([]string{`&Endpoint{`, + `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`, + `Conditions:` + strings.Replace(strings.Replace(this.Conditions.String(), "EndpointConditions", "EndpointConditions", 1), `&`, ``, 1) + `,`, + `Hostname:` + valueToStringGenerated(this.Hostname) + `,`, + `TargetRef:` + strings.Replace(fmt.Sprintf("%v", this.TargetRef), "ObjectReference", "v1.ObjectReference", 1) + `,`, + `Topology:` + mapStringForTopology + `,`, + `}`, + }, "") + return s +} +func (this *EndpointConditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointConditions{`, + `Ready:` + valueToStringGenerated(this.Ready) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointPort{`, + `Name:` + valueToStringGenerated(this.Name) + `,`, + `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSlice) String() string { + if this == nil { + return "nil" + } + repeatedStringForEndpoints := "[]Endpoint{" + for _, f := range this.Endpoints { + repeatedStringForEndpoints += strings.Replace(strings.Replace(f.String(), "Endpoint", "Endpoint", 1), `&`, ``, 1) + "," + } + repeatedStringForEndpoints += "}" + repeatedStringForPorts := "[]EndpointPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&EndpointSlice{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Endpoints:` + repeatedStringForEndpoints + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `AddressType:` + valueToStringGenerated(this.AddressType) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSliceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]EndpointSlice{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "EndpointSlice", "EndpointSlice", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EndpointSliceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Endpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Conditions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Hostname = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetRef == nil { + m.TargetRef = &v1.ObjectReference{} + } + if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Topology", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Topology == nil { + m.Topology = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Topology[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointConditions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointConditions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointConditions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Ready = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSlice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSlice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSlice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Endpoints = append(m.Endpoints, Endpoint{}) + if err := m.Endpoints[len(m.Endpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, EndpointPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AddressType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AddressType(dAtA[iNdEx:postIndex]) + m.AddressType = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSliceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSliceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSliceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, EndpointSlice{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/api/discovery/v1alpha1/generated.proto b/vendor/k8s.io/api/discovery/v1alpha1/generated.proto new file mode 100644 index 000000000..f14f37fd0 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/generated.proto @@ -0,0 +1,148 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.discovery.v1alpha1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// Endpoint represents a single logical "backend" implementing a service. +message Endpoint { + // addresses of this endpoint. The contents of this field are interpreted + // according to the corresponding EndpointSlice addressType field. This + // allows for cases like dual-stack (IPv4 and IPv6) networking. Consumers + // (e.g. kube-proxy) must handle different types of addresses in the context + // of their own capabilities. This must contain at least one address but no + // more than 100. + // +listType=set + repeated string addresses = 1; + + // conditions contains information about the current status of the endpoint. + optional EndpointConditions conditions = 2; + + // hostname of this endpoint. This field may be used by consumers of + // endpoints to distinguish endpoints from each other (e.g. in DNS names). + // Multiple endpoints which use the same hostname should be considered + // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) + // validation. + // +optional + optional string hostname = 3; + + // targetRef is a reference to a Kubernetes object that represents this + // endpoint. + // +optional + optional k8s.io.api.core.v1.ObjectReference targetRef = 4; + + // topology contains arbitrary topology information associated with the + // endpoint. These key/value pairs must conform with the label format. + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // Topology may include a maximum of 16 key/value pairs. This includes, but + // is not limited to the following well known keys: + // * kubernetes.io/hostname: the value indicates the hostname of the node + // where the endpoint is located. This should match the corresponding + // node label. + // * topology.kubernetes.io/zone: the value indicates the zone where the + // endpoint is located. This should match the corresponding node label. + // * topology.kubernetes.io/region: the value indicates the region where the + // endpoint is located. This should match the corresponding node label. + // +optional + map topology = 5; +} + +// EndpointConditions represents the current condition of an endpoint. +message EndpointConditions { + // ready indicates that this endpoint is prepared to receive traffic, + // according to whatever system is managing the endpoint. A nil value + // indicates an unknown state. In most cases consumers should interpret this + // unknown state as ready. + // +optional + optional bool ready = 1; +} + +// EndpointPort represents a Port used by an EndpointSlice +message EndpointPort { + // The name of this port. All ports in an EndpointSlice must have a unique + // name. If the EndpointSlice is dervied from a Kubernetes service, this + // corresponds to the Service.ports[].name. + // Name must either be an empty string or pass IANA_SVC_NAME validation: + // * must be no more than 15 characters long + // * may contain only [-a-z0-9] + // * must contain at least one letter [a-z] + // * it must not start or end with a hyphen, nor contain adjacent hyphens + // Default is empty string. + optional string name = 1; + + // The IP protocol for this port. + // Must be UDP, TCP, or SCTP. + // Default is TCP. + optional string protocol = 2; + + // The port number of the endpoint. + // If this is not specified, ports are not restricted and must be + // interpreted in the context of the specific consumer. + optional int32 port = 3; +} + +// EndpointSlice represents a subset of the endpoints that implement a service. +// For a given service there may be multiple EndpointSlice objects, selected by +// labels, which must be joined to produce the full set of endpoints. +message EndpointSlice { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // addressType specifies the type of address carried by this EndpointSlice. + // All addresses in this slice must be the same type. + // Default is IP + // +optional + optional string addressType = 4; + + // endpoints is a list of unique endpoints in this slice. Each slice may + // include a maximum of 1000 endpoints. + // +listType=atomic + repeated Endpoint endpoints = 2; + + // ports specifies the list of network ports exposed by each endpoint in + // this slice. Each port must have a unique name. When ports is empty, it + // indicates that there are no defined ports. When a port is defined with a + // nil port value, it indicates "all ports". Each slice may include a + // maximum of 100 ports. + // +optional + // +listType=atomic + repeated EndpointPort ports = 3; +} + +// EndpointSliceList represents a list of endpoint slices +message EndpointSliceList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of endpoint slices + // +listType=set + repeated EndpointSlice items = 2; +} + diff --git a/vendor/k8s.io/api/discovery/v1alpha1/register.go b/vendor/k8s.io/api/discovery/v1alpha1/register.go new file mode 100644 index 000000000..55b73f992 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name used in this package +const GroupName = "discovery.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &EndpointSlice{}, + &EndpointSliceList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/discovery/v1alpha1/types.go b/vendor/k8s.io/api/discovery/v1alpha1/types.go new file mode 100644 index 000000000..0de6082ae --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/types.go @@ -0,0 +1,144 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EndpointSlice represents a subset of the endpoints that implement a service. +// For a given service there may be multiple EndpointSlice objects, selected by +// labels, which must be joined to produce the full set of endpoints. +type EndpointSlice struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // addressType specifies the type of address carried by this EndpointSlice. + // All addresses in this slice must be the same type. + // Default is IP + // +optional + AddressType *AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` + // endpoints is a list of unique endpoints in this slice. Each slice may + // include a maximum of 1000 endpoints. + // +listType=atomic + Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"` + // ports specifies the list of network ports exposed by each endpoint in + // this slice. Each port must have a unique name. When ports is empty, it + // indicates that there are no defined ports. When a port is defined with a + // nil port value, it indicates "all ports". Each slice may include a + // maximum of 100 ports. + // +optional + // +listType=atomic + Ports []EndpointPort `json:"ports" protobuf:"bytes,3,rep,name=ports"` +} + +// AddressType represents the type of address referred to by an endpoint. +type AddressType string + +const ( + // AddressTypeIP represents an IP Address. + AddressTypeIP = AddressType("IP") +) + +// Endpoint represents a single logical "backend" implementing a service. +type Endpoint struct { + // addresses of this endpoint. The contents of this field are interpreted + // according to the corresponding EndpointSlice addressType field. This + // allows for cases like dual-stack (IPv4 and IPv6) networking. Consumers + // (e.g. kube-proxy) must handle different types of addresses in the context + // of their own capabilities. This must contain at least one address but no + // more than 100. + // +listType=set + Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` + // conditions contains information about the current status of the endpoint. + Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"` + // hostname of this endpoint. This field may be used by consumers of + // endpoints to distinguish endpoints from each other (e.g. in DNS names). + // Multiple endpoints which use the same hostname should be considered + // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) + // validation. + // +optional + Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // targetRef is a reference to a Kubernetes object that represents this + // endpoint. + // +optional + TargetRef *v1.ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,4,opt,name=targetRef"` + // topology contains arbitrary topology information associated with the + // endpoint. These key/value pairs must conform with the label format. + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // Topology may include a maximum of 16 key/value pairs. This includes, but + // is not limited to the following well known keys: + // * kubernetes.io/hostname: the value indicates the hostname of the node + // where the endpoint is located. This should match the corresponding + // node label. + // * topology.kubernetes.io/zone: the value indicates the zone where the + // endpoint is located. This should match the corresponding node label. + // * topology.kubernetes.io/region: the value indicates the region where the + // endpoint is located. This should match the corresponding node label. + // +optional + Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"` +} + +// EndpointConditions represents the current condition of an endpoint. +type EndpointConditions struct { + // ready indicates that this endpoint is prepared to receive traffic, + // according to whatever system is managing the endpoint. A nil value + // indicates an unknown state. In most cases consumers should interpret this + // unknown state as ready. + // +optional + Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"` +} + +// EndpointPort represents a Port used by an EndpointSlice +type EndpointPort struct { + // The name of this port. All ports in an EndpointSlice must have a unique + // name. If the EndpointSlice is dervied from a Kubernetes service, this + // corresponds to the Service.ports[].name. + // Name must either be an empty string or pass IANA_SVC_NAME validation: + // * must be no more than 15 characters long + // * may contain only [-a-z0-9] + // * must contain at least one letter [a-z] + // * it must not start or end with a hyphen, nor contain adjacent hyphens + // Default is empty string. + Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"` + // The IP protocol for this port. + // Must be UDP, TCP, or SCTP. + // Default is TCP. + Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"` + // The port number of the endpoint. + // If this is not specified, ports are not restricted and must be + // interpreted in the context of the specific consumer. + Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EndpointSliceList represents a list of endpoint slices +type EndpointSliceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of endpoint slices + // +listType=set + Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..a524bcd68 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,85 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Endpoint = map[string]string{ + "": "Endpoint represents a single logical \"backend\" implementing a service.", + "addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. This allows for cases like dual-stack (IPv4 and IPv6) networking. Consumers (e.g. kube-proxy) must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.", + "conditions": "conditions contains information about the current status of the endpoint.", + "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) validation.", + "targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.", + "topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.", +} + +func (Endpoint) SwaggerDoc() map[string]string { + return map_Endpoint +} + +var map_EndpointConditions = map[string]string{ + "": "EndpointConditions represents the current condition of an endpoint.", + "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready.", +} + +func (EndpointConditions) SwaggerDoc() map[string]string { + return map_EndpointConditions +} + +var map_EndpointPort = map[string]string{ + "": "EndpointPort represents a Port used by an EndpointSlice", + "name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass IANA_SVC_NAME validation: * must be no more than 15 characters long * may contain only [-a-z0-9] * must contain at least one letter [a-z] * it must not start or end with a hyphen, nor contain adjacent hyphens Default is empty string.", + "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", +} + +func (EndpointPort) SwaggerDoc() map[string]string { + return map_EndpointPort +} + +var map_EndpointSlice = map[string]string{ + "": "EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.", + "metadata": "Standard object's metadata.", + "addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. Default is IP", + "endpoints": "endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.", + "ports": "ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports.", +} + +func (EndpointSlice) SwaggerDoc() map[string]string { + return map_EndpointSlice +} + +var map_EndpointSliceList = map[string]string{ + "": "EndpointSliceList represents a list of endpoint slices", + "metadata": "Standard list metadata.", + "items": "List of endpoint slices", +} + +func (EndpointSliceList) SwaggerDoc() map[string]string { + return map_EndpointSliceList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/discovery/v1alpha1/well_known_labels.go b/vendor/k8s.io/api/discovery/v1alpha1/well_known_labels.go new file mode 100644 index 000000000..850cd2059 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/well_known_labels.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +const ( + // LabelServiceName is used to indicate the name of a Kubernetes service. + LabelServiceName = "kubernetes.io/service-name" +) diff --git a/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..e424fccf3 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,195 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Conditions.DeepCopyInto(&out.Conditions) + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(string) + **out = **in + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.Topology != nil { + in, out := &in.Topology, &out.Topology + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointConditions) DeepCopyInto(out *EndpointConditions) { + *out = *in + if in.Ready != nil { + in, out := &in.Ready, &out.Ready + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConditions. +func (in *EndpointConditions) DeepCopy() *EndpointConditions { + if in == nil { + return nil + } + out := new(EndpointConditions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(v1.Protocol) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. +func (in *EndpointPort) DeepCopy() *EndpointPort { + if in == nil { + return nil + } + out := new(EndpointPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSlice) DeepCopyInto(out *EndpointSlice) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.AddressType != nil { + in, out := &in.AddressType, &out.AddressType + *out = new(AddressType) + **out = **in + } + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSlice. +func (in *EndpointSlice) DeepCopy() *EndpointSlice { + if in == nil { + return nil + } + out := new(EndpointSlice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointSlice) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSliceList) DeepCopyInto(out *EndpointSliceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EndpointSlice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSliceList. +func (in *EndpointSliceList) DeepCopy() *EndpointSliceList { + if in == nil { + return nil + } + out := new(EndpointSliceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointSliceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/api/events/v1beta1/generated.pb.go b/vendor/k8s.io/api/events/v1beta1/generated.pb.go index bb0c881b5..0e9a8e783 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/events/v1beta1/generated.pb.go @@ -17,29 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto - - It has these top-level messages: - Event - EventList - EventSeries -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -52,27 +44,159 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Event) Reset() { *m = Event{} } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_4f97f691c32a5ac8, []int{0} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Event proto.InternalMessageInfo + +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { + return fileDescriptor_4f97f691c32a5ac8, []int{1} +} +func (m *EventList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventList.Merge(m, src) +} +func (m *EventList) XXX_Size() int { + return m.Size() +} +func (m *EventList) XXX_DiscardUnknown() { + xxx_messageInfo_EventList.DiscardUnknown(m) +} + +var xxx_messageInfo_EventList proto.InternalMessageInfo -func (m *EventList) Reset() { *m = EventList{} } -func (*EventList) ProtoMessage() {} -func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *EventSeries) Reset() { *m = EventSeries{} } +func (*EventSeries) ProtoMessage() {} +func (*EventSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_4f97f691c32a5ac8, []int{2} +} +func (m *EventSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventSeries.Merge(m, src) +} +func (m *EventSeries) XXX_Size() int { + return m.Size() +} +func (m *EventSeries) XXX_DiscardUnknown() { + xxx_messageInfo_EventSeries.DiscardUnknown(m) +} -func (m *EventSeries) Reset() { *m = EventSeries{} } -func (*EventSeries) ProtoMessage() {} -func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_EventSeries proto.InternalMessageInfo func init() { proto.RegisterType((*Event)(nil), "k8s.io.api.events.v1beta1.Event") proto.RegisterType((*EventList)(nil), "k8s.io.api.events.v1beta1.EventList") proto.RegisterType((*EventSeries)(nil), "k8s.io.api.events.v1beta1.EventSeries") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto", fileDescriptor_4f97f691c32a5ac8) +} + +var fileDescriptor_4f97f691c32a5ac8 = []byte{ + // 801 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x16, 0x13, 0x4b, 0xb2, 0x56, 0x49, 0x2c, 0x6f, 0x0e, 0xde, 0xb8, 0x00, 0xa5, 0x2a, 0x40, + 0x20, 0x14, 0x08, 0x59, 0x07, 0x45, 0xdb, 0x6b, 0x18, 0xb9, 0x45, 0x02, 0xbb, 0x01, 0xd6, 0x3e, + 0x15, 0x3d, 0x64, 0x45, 0x4d, 0x68, 0x56, 0xe2, 0x2e, 0xb1, 0xbb, 0x12, 0xe0, 0x5b, 0x2f, 0x05, + 0x7a, 0xec, 0x33, 0xf4, 0x09, 0xfa, 0x18, 0x3e, 0xe6, 0x98, 0x93, 0x50, 0xb3, 0x6f, 0xd1, 0x53, + 0xc1, 0xe5, 0x4a, 0x94, 0xf5, 0x83, 0xa8, 0xe8, 0x4d, 0x9c, 0xf9, 0x7e, 0x66, 0x66, 0x47, 0x83, + 0x82, 0xd1, 0xb7, 0xca, 0x8b, 0x85, 0x3f, 0x9a, 0x0c, 0x40, 0x72, 0xd0, 0xa0, 0xfc, 0x29, 0xf0, + 0xa1, 0x90, 0xbe, 0x4d, 0xb0, 0x34, 0xf6, 0x61, 0x0a, 0x5c, 0x2b, 0x7f, 0x7a, 0x32, 0x00, 0xcd, + 0x4e, 0xfc, 0x08, 0x38, 0x48, 0xa6, 0x61, 0xe8, 0xa5, 0x52, 0x68, 0x81, 0x9f, 0x14, 0x50, 0x8f, + 0xa5, 0xb1, 0x57, 0x40, 0x3d, 0x0b, 0x3d, 0x7e, 0x1e, 0xc5, 0xfa, 0x6a, 0x32, 0xf0, 0x42, 0x91, + 0xf8, 0x91, 0x88, 0x84, 0x6f, 0x18, 0x83, 0xc9, 0x7b, 0xf3, 0x65, 0x3e, 0xcc, 0xaf, 0x42, 0xe9, + 0xb8, 0xbb, 0x64, 0x1a, 0x0a, 0x09, 0xfe, 0x74, 0xcd, 0xed, 0xf8, 0xab, 0x12, 0x93, 0xb0, 0xf0, + 0x2a, 0xe6, 0x20, 0xaf, 0xfd, 0x74, 0x14, 0xe5, 0x01, 0xe5, 0x27, 0xa0, 0xd9, 0x26, 0x96, 0xbf, + 0x8d, 0x25, 0x27, 0x5c, 0xc7, 0x09, 0xac, 0x11, 0xbe, 0xfe, 0x14, 0x41, 0x85, 0x57, 0x90, 0xb0, + 0x55, 0x5e, 0xf7, 0x8f, 0x06, 0xaa, 0x9e, 0xe6, 0x43, 0xc0, 0xef, 0xd0, 0x7e, 0x5e, 0xcd, 0x90, + 0x69, 0x46, 0x9c, 0x8e, 0xd3, 0x6b, 0xbe, 0xf8, 0xd2, 0x2b, 0x27, 0xb5, 0x10, 0xf5, 0xd2, 0x51, + 0x94, 0x07, 0x94, 0x97, 0xa3, 0xbd, 0xe9, 0x89, 0xf7, 0x76, 0xf0, 0x33, 0x84, 0xfa, 0x1c, 0x34, + 0x0b, 0xf0, 0xcd, 0xac, 0x5d, 0xc9, 0x66, 0x6d, 0x54, 0xc6, 0xe8, 0x42, 0x15, 0xbf, 0x43, 0x0d, + 0x33, 0xef, 0xcb, 0x38, 0x01, 0x72, 0xcf, 0x58, 0xf8, 0xbb, 0x59, 0x9c, 0xc7, 0xa1, 0x14, 0x39, + 0x2d, 0x38, 0xb4, 0x0e, 0x8d, 0xd3, 0xb9, 0x12, 0x2d, 0x45, 0xf1, 0x1b, 0x54, 0x53, 0x20, 0x63, + 0x50, 0xe4, 0xbe, 0x91, 0x7f, 0xe6, 0x6d, 0x7d, 0x6b, 0xcf, 0x08, 0x5c, 0x18, 0x74, 0x80, 0xb2, + 0x59, 0xbb, 0x56, 0xfc, 0xa6, 0x56, 0x01, 0x9f, 0xa3, 0xc7, 0x12, 0x52, 0x21, 0x75, 0xcc, 0xa3, + 0x57, 0x82, 0x6b, 0x29, 0xc6, 0x63, 0x90, 0x64, 0xaf, 0xe3, 0xf4, 0x1a, 0xc1, 0x67, 0xb6, 0x8c, + 0xc7, 0x74, 0x1d, 0x42, 0x37, 0xf1, 0xf0, 0xf7, 0xe8, 0x70, 0x11, 0x7e, 0xcd, 0x95, 0x66, 0x3c, + 0x04, 0x52, 0x35, 0x62, 0x4f, 0xac, 0xd8, 0x21, 0x5d, 0x05, 0xd0, 0x75, 0x0e, 0x7e, 0x86, 0x6a, + 0x2c, 0xd4, 0xb1, 0xe0, 0xa4, 0x66, 0xd8, 0x8f, 0x2c, 0xbb, 0xf6, 0xd2, 0x44, 0xa9, 0xcd, 0xe6, + 0x38, 0x09, 0x4c, 0x09, 0x4e, 0xea, 0x77, 0x71, 0xd4, 0x44, 0xa9, 0xcd, 0xe2, 0x4b, 0xd4, 0x90, + 0x10, 0x31, 0x39, 0x8c, 0x79, 0x44, 0xf6, 0xcd, 0xd8, 0x9e, 0x2e, 0x8f, 0x2d, 0x5f, 0xec, 0xf2, + 0x99, 0x29, 0xbc, 0x07, 0x09, 0x3c, 0x5c, 0x7a, 0x09, 0x3a, 0x67, 0xd3, 0x52, 0x08, 0xbf, 0x41, + 0x75, 0x09, 0xe3, 0x7c, 0xd1, 0x48, 0x63, 0x77, 0xcd, 0x66, 0x36, 0x6b, 0xd7, 0x69, 0xc1, 0xa3, + 0x73, 0x01, 0xdc, 0x41, 0x7b, 0x5c, 0x68, 0x20, 0xc8, 0xf4, 0xf1, 0xc0, 0xfa, 0xee, 0xfd, 0x20, + 0x34, 0x50, 0x93, 0xc9, 0x11, 0xfa, 0x3a, 0x05, 0xd2, 0xbc, 0x8b, 0xb8, 0xbc, 0x4e, 0x81, 0x9a, + 0x0c, 0x06, 0xd4, 0x1a, 0x42, 0x2a, 0x21, 0xcc, 0x15, 0x2f, 0xc4, 0x44, 0x86, 0x40, 0x1e, 0x98, + 0xc2, 0xda, 0x9b, 0x0a, 0x2b, 0x96, 0xc3, 0xc0, 0x02, 0x62, 0xe5, 0x5a, 0xfd, 0x15, 0x01, 0xba, + 0x26, 0x89, 0x7f, 0x73, 0x10, 0x29, 0x83, 0xdf, 0xc5, 0x52, 0x99, 0xc5, 0x54, 0x9a, 0x25, 0x29, + 0x79, 0x68, 0xfc, 0xbe, 0xd8, 0x6d, 0xe5, 0xcd, 0xb6, 0x77, 0xac, 0x35, 0xe9, 0x6f, 0xd1, 0xa4, + 0x5b, 0xdd, 0xf0, 0xaf, 0x0e, 0x3a, 0x2a, 0x93, 0x67, 0x6c, 0xb9, 0x92, 0x47, 0xff, 0xb9, 0x92, + 0xb6, 0xad, 0xe4, 0xa8, 0xbf, 0x59, 0x92, 0x6e, 0xf3, 0xc2, 0x2f, 0xd1, 0x41, 0x99, 0x7a, 0x25, + 0x26, 0x5c, 0x93, 0x83, 0x8e, 0xd3, 0xab, 0x06, 0x47, 0x56, 0xf2, 0xa0, 0x7f, 0x37, 0x4d, 0x57, + 0xf1, 0xdd, 0x3f, 0x1d, 0x54, 0xfc, 0xdf, 0xcf, 0x62, 0xa5, 0xf1, 0x4f, 0x6b, 0x87, 0xca, 0xdb, + 0xad, 0x91, 0x9c, 0x6d, 0xce, 0x54, 0xcb, 0x3a, 0xef, 0xcf, 0x23, 0x4b, 0x47, 0xea, 0x14, 0x55, + 0x63, 0x0d, 0x89, 0x22, 0xf7, 0x3a, 0xf7, 0x7b, 0xcd, 0x17, 0x9d, 0x4f, 0x5d, 0x90, 0xe0, 0xa1, + 0x15, 0xab, 0xbe, 0xce, 0x69, 0xb4, 0x60, 0x77, 0x33, 0x07, 0x35, 0x97, 0x2e, 0x0c, 0x7e, 0x8a, + 0xaa, 0xa1, 0xe9, 0xdd, 0x31, 0xbd, 0x2f, 0x48, 0x45, 0xc7, 0x45, 0x0e, 0x4f, 0x50, 0x6b, 0xcc, + 0x94, 0x7e, 0x3b, 0x50, 0x20, 0xa7, 0x30, 0xfc, 0x3f, 0x77, 0x72, 0xb1, 0xb4, 0x67, 0x2b, 0x82, + 0x74, 0xcd, 0x02, 0x7f, 0x83, 0xaa, 0x4a, 0x33, 0x0d, 0xe6, 0x68, 0x36, 0x82, 0xcf, 0xe7, 0xb5, + 0x5d, 0xe4, 0xc1, 0x7f, 0x66, 0xed, 0xd6, 0x52, 0x23, 0x26, 0x46, 0x0b, 0x7c, 0xf0, 0xfc, 0xe6, + 0xd6, 0xad, 0x7c, 0xb8, 0x75, 0x2b, 0x1f, 0x6f, 0xdd, 0xca, 0x2f, 0x99, 0xeb, 0xdc, 0x64, 0xae, + 0xf3, 0x21, 0x73, 0x9d, 0x8f, 0x99, 0xeb, 0xfc, 0x95, 0xb9, 0xce, 0xef, 0x7f, 0xbb, 0x95, 0x1f, + 0xeb, 0x76, 0x5e, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x25, 0x9b, 0x14, 0x4d, 0xbd, 0x07, 0x00, + 0x00, +} + func (m *Event) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -80,112 +204,139 @@ func (m *Event) Marshal() (dAtA []byte, err error) { } func (m *Event) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) - n2, err := m.EventTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedCount)) + i-- + dAtA[i] = 0x78 + { + size, err := m.DeprecatedLastTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - if m.Series != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) - n3, err := m.Series.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x72 + { + size, err := m.DeprecatedFirstTimestamp.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n3 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) - i += copy(dAtA[i:], m.ReportingController) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) - i += copy(dAtA[i:], m.ReportingInstance) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) - i += copy(dAtA[i:], m.Action) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Regarding.Size())) - n4, err := m.Regarding.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x6a + { + size, err := m.DeprecatedSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x62 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x5a + i -= len(m.Note) + copy(dAtA[i:], m.Note) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Note))) + i-- + dAtA[i] = 0x52 if m.Related != nil { + { + size, err := m.Related.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) - n5, err := m.Related.MarshalTo(dAtA[i:]) + } + { + size, err := m.Regarding.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n5 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Note))) - i += copy(dAtA[i:], m.Note) - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedSource.Size())) - n6, err := m.DeprecatedSource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x42 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x3a + i -= len(m.Action) + copy(dAtA[i:], m.Action) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i-- + dAtA[i] = 0x32 + i -= len(m.ReportingInstance) + copy(dAtA[i:], m.ReportingInstance) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) + i-- + dAtA[i] = 0x2a + i -= len(m.ReportingController) + copy(dAtA[i:], m.ReportingController) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) + i-- + dAtA[i] = 0x22 + if m.Series != nil { + { + size, err := m.Series.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i += n6 - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedFirstTimestamp.Size())) - n7, err := m.DeprecatedFirstTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.EventTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedLastTimestamp.Size())) - n8, err := m.DeprecatedLastTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - dAtA[i] = 0x78 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedCount)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *EventList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -193,37 +344,46 @@ func (m *EventList) Marshal() (dAtA []byte, err error) { } func (m *EventList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *EventSeries) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -231,38 +391,51 @@ func (m *EventSeries) Marshal() (dAtA []byte, err error) { } func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) - n10, err := m.LastObservedTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x1a - i++ + i -= len(m.State) + copy(dAtA[i:], m.State) i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) - i += copy(dAtA[i:], m.State) - return i, nil + i-- + dAtA[i] = 0x1a + { + size, err := m.LastObservedTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Event) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -302,6 +475,9 @@ func (m *Event) Size() (n int) { } func (m *EventList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -316,6 +492,9 @@ func (m *EventList) Size() (n int) { } func (m *EventSeries) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Count)) @@ -327,14 +506,7 @@ func (m *EventSeries) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -344,20 +516,20 @@ func (this *Event) String() string { return "nil" } s := strings.Join([]string{`&Event{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `EventTime:` + strings.Replace(strings.Replace(this.EventTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, - `Series:` + strings.Replace(fmt.Sprintf("%v", this.Series), "EventSeries", "EventSeries", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `EventTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EventTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Series:` + strings.Replace(this.Series.String(), "EventSeries", "EventSeries", 1) + `,`, `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, `Action:` + fmt.Sprintf("%v", this.Action) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Regarding:` + strings.Replace(strings.Replace(this.Regarding.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, - `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1) + `,`, + `Regarding:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Regarding), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "v11.ObjectReference", 1) + `,`, `Note:` + fmt.Sprintf("%v", this.Note) + `,`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `DeprecatedSource:` + strings.Replace(strings.Replace(this.DeprecatedSource.String(), "EventSource", "k8s_io_api_core_v1.EventSource", 1), `&`, ``, 1) + `,`, - `DeprecatedFirstTimestamp:` + strings.Replace(strings.Replace(this.DeprecatedFirstTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `DeprecatedLastTimestamp:` + strings.Replace(strings.Replace(this.DeprecatedLastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `DeprecatedSource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DeprecatedSource), "EventSource", "v11.EventSource", 1), `&`, ``, 1) + `,`, + `DeprecatedFirstTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DeprecatedFirstTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `DeprecatedLastTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DeprecatedLastTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `DeprecatedCount:` + fmt.Sprintf("%v", this.DeprecatedCount) + `,`, `}`, }, "") @@ -367,9 +539,14 @@ func (this *EventList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Event{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Event", "Event", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&EventList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Event", "Event", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -380,7 +557,7 @@ func (this *EventSeries) String() string { } s := strings.Join([]string{`&EventSeries{`, `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `LastObservedTime:` + strings.Replace(strings.Replace(this.LastObservedTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `LastObservedTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastObservedTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, `State:` + fmt.Sprintf("%v", this.State) + `,`, `}`, }, "") @@ -409,7 +586,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -437,7 +614,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -446,6 +623,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -467,7 +647,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -476,6 +656,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -497,7 +680,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -506,6 +689,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -530,7 +716,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -540,6 +726,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -559,7 +748,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -569,6 +758,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -588,7 +780,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -598,6 +790,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -617,7 +812,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -627,6 +822,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -646,7 +844,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -655,6 +853,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -676,7 +877,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -685,11 +886,14 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Related == nil { - m.Related = &k8s_io_api_core_v1.ObjectReference{} + m.Related = &v11.ObjectReference{} } if err := m.Related.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -709,7 +913,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -719,6 +923,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -738,7 +945,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -748,6 +955,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -767,7 +977,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -776,6 +986,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -797,7 +1010,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -806,6 +1019,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -827,7 +1043,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -836,6 +1052,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -857,7 +1076,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DeprecatedCount |= (int32(b) & 0x7F) << shift + m.DeprecatedCount |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -871,6 +1090,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -898,7 +1120,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -926,7 +1148,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -935,6 +1157,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -956,7 +1181,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -965,6 +1190,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -982,6 +1210,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1009,7 +1240,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1037,7 +1268,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Count |= (int32(b) & 0x7F) << shift + m.Count |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1056,7 +1287,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1065,6 +1296,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1086,7 +1320,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1096,6 +1330,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1110,6 +1347,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1176,10 +1416,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1208,6 +1451,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1226,62 +1472,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 801 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x6e, 0xdb, 0x46, - 0x10, 0x16, 0x13, 0x4b, 0xb2, 0x56, 0x49, 0x2c, 0x6f, 0x0e, 0xde, 0xb8, 0x00, 0xa5, 0x2a, 0x40, - 0x20, 0x14, 0x08, 0x59, 0x07, 0x45, 0xdb, 0x6b, 0x18, 0xb9, 0x45, 0x02, 0xbb, 0x01, 0xd6, 0x3e, - 0x15, 0x3d, 0x64, 0x45, 0x4d, 0x68, 0x56, 0xe2, 0x2e, 0xb1, 0xbb, 0x12, 0xe0, 0x5b, 0x2f, 0x05, - 0x7a, 0xec, 0x33, 0xf4, 0x09, 0xfa, 0x18, 0x3e, 0xe6, 0x98, 0x93, 0x50, 0xb3, 0x6f, 0xd1, 0x53, - 0xc1, 0xe5, 0x4a, 0x94, 0xf5, 0x83, 0xa8, 0xe8, 0x4d, 0x9c, 0xf9, 0x7e, 0x66, 0x66, 0x47, 0x83, - 0x82, 0xd1, 0xb7, 0xca, 0x8b, 0x85, 0x3f, 0x9a, 0x0c, 0x40, 0x72, 0xd0, 0xa0, 0xfc, 0x29, 0xf0, - 0xa1, 0x90, 0xbe, 0x4d, 0xb0, 0x34, 0xf6, 0x61, 0x0a, 0x5c, 0x2b, 0x7f, 0x7a, 0x32, 0x00, 0xcd, - 0x4e, 0xfc, 0x08, 0x38, 0x48, 0xa6, 0x61, 0xe8, 0xa5, 0x52, 0x68, 0x81, 0x9f, 0x14, 0x50, 0x8f, - 0xa5, 0xb1, 0x57, 0x40, 0x3d, 0x0b, 0x3d, 0x7e, 0x1e, 0xc5, 0xfa, 0x6a, 0x32, 0xf0, 0x42, 0x91, - 0xf8, 0x91, 0x88, 0x84, 0x6f, 0x18, 0x83, 0xc9, 0x7b, 0xf3, 0x65, 0x3e, 0xcc, 0xaf, 0x42, 0xe9, - 0xb8, 0xbb, 0x64, 0x1a, 0x0a, 0x09, 0xfe, 0x74, 0xcd, 0xed, 0xf8, 0xab, 0x12, 0x93, 0xb0, 0xf0, - 0x2a, 0xe6, 0x20, 0xaf, 0xfd, 0x74, 0x14, 0xe5, 0x01, 0xe5, 0x27, 0xa0, 0xd9, 0x26, 0x96, 0xbf, - 0x8d, 0x25, 0x27, 0x5c, 0xc7, 0x09, 0xac, 0x11, 0xbe, 0xfe, 0x14, 0x41, 0x85, 0x57, 0x90, 0xb0, - 0x55, 0x5e, 0xf7, 0x8f, 0x06, 0xaa, 0x9e, 0xe6, 0x43, 0xc0, 0xef, 0xd0, 0x7e, 0x5e, 0xcd, 0x90, - 0x69, 0x46, 0x9c, 0x8e, 0xd3, 0x6b, 0xbe, 0xf8, 0xd2, 0x2b, 0x27, 0xb5, 0x10, 0xf5, 0xd2, 0x51, - 0x94, 0x07, 0x94, 0x97, 0xa3, 0xbd, 0xe9, 0x89, 0xf7, 0x76, 0xf0, 0x33, 0x84, 0xfa, 0x1c, 0x34, - 0x0b, 0xf0, 0xcd, 0xac, 0x5d, 0xc9, 0x66, 0x6d, 0x54, 0xc6, 0xe8, 0x42, 0x15, 0xbf, 0x43, 0x0d, - 0x33, 0xef, 0xcb, 0x38, 0x01, 0x72, 0xcf, 0x58, 0xf8, 0xbb, 0x59, 0x9c, 0xc7, 0xa1, 0x14, 0x39, - 0x2d, 0x38, 0xb4, 0x0e, 0x8d, 0xd3, 0xb9, 0x12, 0x2d, 0x45, 0xf1, 0x1b, 0x54, 0x53, 0x20, 0x63, - 0x50, 0xe4, 0xbe, 0x91, 0x7f, 0xe6, 0x6d, 0x7d, 0x6b, 0xcf, 0x08, 0x5c, 0x18, 0x74, 0x80, 0xb2, - 0x59, 0xbb, 0x56, 0xfc, 0xa6, 0x56, 0x01, 0x9f, 0xa3, 0xc7, 0x12, 0x52, 0x21, 0x75, 0xcc, 0xa3, - 0x57, 0x82, 0x6b, 0x29, 0xc6, 0x63, 0x90, 0x64, 0xaf, 0xe3, 0xf4, 0x1a, 0xc1, 0x67, 0xb6, 0x8c, - 0xc7, 0x74, 0x1d, 0x42, 0x37, 0xf1, 0xf0, 0xf7, 0xe8, 0x70, 0x11, 0x7e, 0xcd, 0x95, 0x66, 0x3c, - 0x04, 0x52, 0x35, 0x62, 0x4f, 0xac, 0xd8, 0x21, 0x5d, 0x05, 0xd0, 0x75, 0x0e, 0x7e, 0x86, 0x6a, - 0x2c, 0xd4, 0xb1, 0xe0, 0xa4, 0x66, 0xd8, 0x8f, 0x2c, 0xbb, 0xf6, 0xd2, 0x44, 0xa9, 0xcd, 0xe6, - 0x38, 0x09, 0x4c, 0x09, 0x4e, 0xea, 0x77, 0x71, 0xd4, 0x44, 0xa9, 0xcd, 0xe2, 0x4b, 0xd4, 0x90, - 0x10, 0x31, 0x39, 0x8c, 0x79, 0x44, 0xf6, 0xcd, 0xd8, 0x9e, 0x2e, 0x8f, 0x2d, 0x5f, 0xec, 0xf2, - 0x99, 0x29, 0xbc, 0x07, 0x09, 0x3c, 0x5c, 0x7a, 0x09, 0x3a, 0x67, 0xd3, 0x52, 0x08, 0xbf, 0x41, - 0x75, 0x09, 0xe3, 0x7c, 0xd1, 0x48, 0x63, 0x77, 0xcd, 0x66, 0x36, 0x6b, 0xd7, 0x69, 0xc1, 0xa3, - 0x73, 0x01, 0xdc, 0x41, 0x7b, 0x5c, 0x68, 0x20, 0xc8, 0xf4, 0xf1, 0xc0, 0xfa, 0xee, 0xfd, 0x20, - 0x34, 0x50, 0x93, 0xc9, 0x11, 0xfa, 0x3a, 0x05, 0xd2, 0xbc, 0x8b, 0xb8, 0xbc, 0x4e, 0x81, 0x9a, - 0x0c, 0x06, 0xd4, 0x1a, 0x42, 0x2a, 0x21, 0xcc, 0x15, 0x2f, 0xc4, 0x44, 0x86, 0x40, 0x1e, 0x98, - 0xc2, 0xda, 0x9b, 0x0a, 0x2b, 0x96, 0xc3, 0xc0, 0x02, 0x62, 0xe5, 0x5a, 0xfd, 0x15, 0x01, 0xba, - 0x26, 0x89, 0x7f, 0x73, 0x10, 0x29, 0x83, 0xdf, 0xc5, 0x52, 0x99, 0xc5, 0x54, 0x9a, 0x25, 0x29, - 0x79, 0x68, 0xfc, 0xbe, 0xd8, 0x6d, 0xe5, 0xcd, 0xb6, 0x77, 0xac, 0x35, 0xe9, 0x6f, 0xd1, 0xa4, - 0x5b, 0xdd, 0xf0, 0xaf, 0x0e, 0x3a, 0x2a, 0x93, 0x67, 0x6c, 0xb9, 0x92, 0x47, 0xff, 0xb9, 0x92, - 0xb6, 0xad, 0xe4, 0xa8, 0xbf, 0x59, 0x92, 0x6e, 0xf3, 0xc2, 0x2f, 0xd1, 0x41, 0x99, 0x7a, 0x25, - 0x26, 0x5c, 0x93, 0x83, 0x8e, 0xd3, 0xab, 0x06, 0x47, 0x56, 0xf2, 0xa0, 0x7f, 0x37, 0x4d, 0x57, - 0xf1, 0xdd, 0x3f, 0x1d, 0x54, 0xfc, 0xdf, 0xcf, 0x62, 0xa5, 0xf1, 0x4f, 0x6b, 0x87, 0xca, 0xdb, - 0xad, 0x91, 0x9c, 0x6d, 0xce, 0x54, 0xcb, 0x3a, 0xef, 0xcf, 0x23, 0x4b, 0x47, 0xea, 0x14, 0x55, - 0x63, 0x0d, 0x89, 0x22, 0xf7, 0x3a, 0xf7, 0x7b, 0xcd, 0x17, 0x9d, 0x4f, 0x5d, 0x90, 0xe0, 0xa1, - 0x15, 0xab, 0xbe, 0xce, 0x69, 0xb4, 0x60, 0x77, 0x33, 0x07, 0x35, 0x97, 0x2e, 0x0c, 0x7e, 0x8a, - 0xaa, 0xa1, 0xe9, 0xdd, 0x31, 0xbd, 0x2f, 0x48, 0x45, 0xc7, 0x45, 0x0e, 0x4f, 0x50, 0x6b, 0xcc, - 0x94, 0x7e, 0x3b, 0x50, 0x20, 0xa7, 0x30, 0xfc, 0x3f, 0x77, 0x72, 0xb1, 0xb4, 0x67, 0x2b, 0x82, - 0x74, 0xcd, 0x02, 0x7f, 0x83, 0xaa, 0x4a, 0x33, 0x0d, 0xe6, 0x68, 0x36, 0x82, 0xcf, 0xe7, 0xb5, - 0x5d, 0xe4, 0xc1, 0x7f, 0x66, 0xed, 0xd6, 0x52, 0x23, 0x26, 0x46, 0x0b, 0x7c, 0xf0, 0xfc, 0xe6, - 0xd6, 0xad, 0x7c, 0xb8, 0x75, 0x2b, 0x1f, 0x6f, 0xdd, 0xca, 0x2f, 0x99, 0xeb, 0xdc, 0x64, 0xae, - 0xf3, 0x21, 0x73, 0x9d, 0x8f, 0x99, 0xeb, 0xfc, 0x95, 0xb9, 0xce, 0xef, 0x7f, 0xbb, 0x95, 0x1f, - 0xeb, 0x76, 0x5e, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x25, 0x9b, 0x14, 0x4d, 0xbd, 0x07, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/events/v1beta1/generated.proto b/vendor/k8s.io/api/events/v1beta1/generated.proto index b3e565e67..58f5aa422 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.proto +++ b/vendor/k8s.io/api/events/v1beta1/generated.proto @@ -98,7 +98,7 @@ message Event { // EventList is a list of Event objects. message EventList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -116,6 +116,7 @@ message EventSeries { optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; // Information whether this series is ongoing or finished. + // Deprecated. Planned removal for 1.18 optional string state = 3; } diff --git a/vendor/k8s.io/api/events/v1beta1/types.go b/vendor/k8s.io/api/events/v1beta1/types.go index dc48ddb06..0571fbb2e 100644 --- a/vendor/k8s.io/api/events/v1beta1/types.go +++ b/vendor/k8s.io/api/events/v1beta1/types.go @@ -43,11 +43,11 @@ type Event struct { // ID of the controller instance, e.g. `kubelet-xyzf`. // +optional - ReportingInstance string `json:"reportingInstance,omitemtpy" protobuf:"bytes,5,opt,name=reportingInstance"` + ReportingInstance string `json:"reportingInstance,omitempty" protobuf:"bytes,5,opt,name=reportingInstance"` // What action was taken/failed regarding to the regarding object. // +optional - Action string `json:"action,omitemtpy" protobuf:"bytes,6,name=action"` + Action string `json:"action,omitempty" protobuf:"bytes,6,name=action"` // Why the action was taken. Reason string `json:"reason,omitempty" protobuf:"bytes,7,name=reason"` @@ -96,6 +96,7 @@ type EventSeries struct { // Time when last Event from the series was seen before last heartbeat. LastObservedTime metav1.MicroTime `json:"lastObservedTime" protobuf:"bytes,2,opt,name=lastObservedTime"` // Information whether this series is ongoing or finished. + // Deprecated. Planned removal for 1.18 State EventSeriesState `json:"state" protobuf:"bytes,3,opt,name=state"` } @@ -113,7 +114,7 @@ const ( type EventList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go index a15672c19..639daca6d 100644 --- a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go @@ -51,7 +51,7 @@ func (Event) SwaggerDoc() map[string]string { var map_EventList = map[string]string{ "": "EventList is a list of Event objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -63,7 +63,7 @@ var map_EventSeries = map[string]string{ "": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.", "count": "Number of occurrences in this series up to the last heartbeat time", "lastObservedTime": "Time when last Event from the series was seen before last heartbeat.", - "state": "Information whether this series is ongoing or finished.", + "state": "Information whether this series is ongoing or finished. Deprecated. Planned removal for 1.18", } func (EventSeries) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go index e52e142c6..779ebaf6e 100644 --- a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go @@ -70,7 +70,7 @@ func (in *Event) DeepCopyObject() runtime.Object { func (in *EventList) DeepCopyInto(out *EventList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Event, len(*in)) diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index 6802be28b..65b47eabd 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -17,87 +17,26 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto - - It has these top-level messages: - AllowedCSIDriver - AllowedFlexVolume - AllowedHostPath - DaemonSet - DaemonSetCondition - DaemonSetList - DaemonSetSpec - DaemonSetStatus - DaemonSetUpdateStrategy - Deployment - DeploymentCondition - DeploymentList - DeploymentRollback - DeploymentSpec - DeploymentStatus - DeploymentStrategy - FSGroupStrategyOptions - HTTPIngressPath - HTTPIngressRuleValue - HostPortRange - IDRange - IPBlock - Ingress - IngressBackend - IngressList - IngressRule - IngressRuleValue - IngressSpec - IngressStatus - IngressTLS - NetworkPolicy - NetworkPolicyEgressRule - NetworkPolicyIngressRule - NetworkPolicyList - NetworkPolicyPeer - NetworkPolicyPort - NetworkPolicySpec - PodSecurityPolicy - PodSecurityPolicyList - PodSecurityPolicySpec - ReplicaSet - ReplicaSetCondition - ReplicaSetList - ReplicaSetSpec - ReplicaSetStatus - ReplicationControllerDummy - RollbackConfig - RollingUpdateDaemonSet - RollingUpdateDeployment - RunAsGroupStrategyOptions - RunAsUserStrategyOptions - SELinuxStrategyOptions - Scale - ScaleSpec - ScaleStatus - SupplementalGroupsStrategyOptions -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import strings "strings" -import reflect "reflect" - -import io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -110,244 +49,1602 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } -func (*AllowedCSIDriver) ProtoMessage() {} -func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } +func (*AllowedCSIDriver) ProtoMessage() {} +func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{0} +} +func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedCSIDriver.Merge(m, src) +} +func (m *AllowedCSIDriver) XXX_Size() int { + return m.Size() +} +func (m *AllowedCSIDriver) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) +} -func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } -func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo -func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } -func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } +func (*AllowedFlexVolume) ProtoMessage() {} +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{1} +} +func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedFlexVolume.Merge(m, src) +} +func (m *AllowedFlexVolume) XXX_Size() int { + return m.Size() +} +func (m *AllowedFlexVolume) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) +} -func (m *DaemonSet) Reset() { *m = DaemonSet{} } -func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo -func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } -func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } +func (*AllowedHostPath) ProtoMessage() {} +func (*AllowedHostPath) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{2} +} +func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedHostPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedHostPath.Merge(m, src) +} +func (m *AllowedHostPath) XXX_Size() int { + return m.Size() +} +func (m *AllowedHostPath) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) +} -func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } -func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo -func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } -func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{3} +} +func (m *DaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSet.Merge(m, src) +} +func (m *DaemonSet) XXX_Size() int { + return m.Size() +} +func (m *DaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSet.DiscardUnknown(m) +} -func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } -func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_DaemonSet proto.InternalMessageInfo -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{4} +} +func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetCondition.Merge(m, src) +} +func (m *DaemonSetCondition) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m) +} -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{5} +} +func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetList.Merge(m, src) +} +func (m *DaemonSetList) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetList) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetList.DiscardUnknown(m) +} -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo -func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } -func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{6} +} +func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetSpec.Merge(m, src) +} +func (m *DaemonSetSpec) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m) +} -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{7} +} +func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetStatus.Merge(m, src) +} +func (m *DaemonSetStatus) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m) +} -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{8} +} +func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src) +} +func (m *DaemonSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m) +} -func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } -func (*HTTPIngressPath) ProtoMessage() {} -func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo -func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } -func (*HTTPIngressRuleValue) ProtoMessage() {} -func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{9} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +var xxx_messageInfo_Deployment proto.InternalMessageInfo -func (m *IDRange) Reset() { *m = IDRange{} } -func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{10} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} -func (m *IPBlock) Reset() { *m = IPBlock{} } -func (*IPBlock) ProtoMessage() {} -func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo -func (m *Ingress) Reset() { *m = Ingress{} } -func (*Ingress) ProtoMessage() {} -func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{11} +} +func (m *DeploymentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentList.Merge(m, src) +} +func (m *DeploymentList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentList.DiscardUnknown(m) +} -func (m *IngressBackend) Reset() { *m = IngressBackend{} } -func (*IngressBackend) ProtoMessage() {} -func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +var xxx_messageInfo_DeploymentList proto.InternalMessageInfo -func (m *IngressList) Reset() { *m = IngressList{} } -func (*IngressList) ProtoMessage() {} -func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } +func (*DeploymentRollback) ProtoMessage() {} +func (*DeploymentRollback) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{12} +} +func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentRollback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentRollback) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentRollback.Merge(m, src) +} +func (m *DeploymentRollback) XXX_Size() int { + return m.Size() +} +func (m *DeploymentRollback) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentRollback.DiscardUnknown(m) +} -func (m *IngressRule) Reset() { *m = IngressRule{} } -func (*IngressRule) ProtoMessage() {} -func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo -func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } -func (*IngressRuleValue) ProtoMessage() {} -func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{13} +} +func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentSpec.Merge(m, src) +} +func (m *DeploymentSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{14} +} +func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStatus.Merge(m, src) +} +func (m *DeploymentStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{15} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo + +func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } +func (*FSGroupStrategyOptions) ProtoMessage() {} +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{16} +} +func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) +} +func (m *FSGroupStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo + +func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } +func (*HTTPIngressPath) ProtoMessage() {} +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{17} +} +func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPIngressPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPIngressPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPIngressPath.Merge(m, src) +} +func (m *HTTPIngressPath) XXX_Size() int { + return m.Size() +} +func (m *HTTPIngressPath) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPIngressPath.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo + +func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } +func (*HTTPIngressRuleValue) ProtoMessage() {} +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{18} +} +func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPIngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPIngressRuleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPIngressRuleValue.Merge(m, src) +} +func (m *HTTPIngressRuleValue) XXX_Size() int { + return m.Size() +} +func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPIngressRuleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo + +func (m *HostPortRange) Reset() { *m = HostPortRange{} } +func (*HostPortRange) ProtoMessage() {} +func (*HostPortRange) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{19} +} +func (m *HostPortRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostPortRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostPortRange.Merge(m, src) +} +func (m *HostPortRange) XXX_Size() int { + return m.Size() +} +func (m *HostPortRange) XXX_DiscardUnknown() { + xxx_messageInfo_HostPortRange.DiscardUnknown(m) +} + +var xxx_messageInfo_HostPortRange proto.InternalMessageInfo + +func (m *IDRange) Reset() { *m = IDRange{} } +func (*IDRange) ProtoMessage() {} +func (*IDRange) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{20} +} +func (m *IDRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IDRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_IDRange.Merge(m, src) +} +func (m *IDRange) XXX_Size() int { + return m.Size() +} +func (m *IDRange) XXX_DiscardUnknown() { + xxx_messageInfo_IDRange.DiscardUnknown(m) +} + +var xxx_messageInfo_IDRange proto.InternalMessageInfo + +func (m *IPBlock) Reset() { *m = IPBlock{} } +func (*IPBlock) ProtoMessage() {} +func (*IPBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{21} +} +func (m *IPBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPBlock.Merge(m, src) +} +func (m *IPBlock) XXX_Size() int { + return m.Size() +} +func (m *IPBlock) XXX_DiscardUnknown() { + xxx_messageInfo_IPBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_IPBlock proto.InternalMessageInfo -func (m *IngressSpec) Reset() { *m = IngressSpec{} } -func (*IngressSpec) ProtoMessage() {} -func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (m *Ingress) Reset() { *m = Ingress{} } +func (*Ingress) ProtoMessage() {} +func (*Ingress) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{22} +} +func (m *Ingress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Ingress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Ingress) XXX_Merge(src proto.Message) { + xxx_messageInfo_Ingress.Merge(m, src) +} +func (m *Ingress) XXX_Size() int { + return m.Size() +} +func (m *Ingress) XXX_DiscardUnknown() { + xxx_messageInfo_Ingress.DiscardUnknown(m) +} -func (m *IngressStatus) Reset() { *m = IngressStatus{} } -func (*IngressStatus) ProtoMessage() {} -func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +var xxx_messageInfo_Ingress proto.InternalMessageInfo -func (m *IngressTLS) Reset() { *m = IngressTLS{} } -func (*IngressTLS) ProtoMessage() {} -func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (m *IngressBackend) Reset() { *m = IngressBackend{} } +func (*IngressBackend) ProtoMessage() {} +func (*IngressBackend) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{23} +} +func (m *IngressBackend) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressBackend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressBackend) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressBackend.Merge(m, src) +} +func (m *IngressBackend) XXX_Size() int { + return m.Size() +} +func (m *IngressBackend) XXX_DiscardUnknown() { + xxx_messageInfo_IngressBackend.DiscardUnknown(m) +} -func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } -func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +var xxx_messageInfo_IngressBackend proto.InternalMessageInfo + +func (m *IngressList) Reset() { *m = IngressList{} } +func (*IngressList) ProtoMessage() {} +func (*IngressList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{24} +} +func (m *IngressList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressList.Merge(m, src) +} +func (m *IngressList) XXX_Size() int { + return m.Size() +} +func (m *IngressList) XXX_DiscardUnknown() { + xxx_messageInfo_IngressList.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressList proto.InternalMessageInfo + +func (m *IngressRule) Reset() { *m = IngressRule{} } +func (*IngressRule) ProtoMessage() {} +func (*IngressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{25} +} +func (m *IngressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressRule.Merge(m, src) +} +func (m *IngressRule) XXX_Size() int { + return m.Size() +} +func (m *IngressRule) XXX_DiscardUnknown() { + xxx_messageInfo_IngressRule.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressRule proto.InternalMessageInfo + +func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } +func (*IngressRuleValue) ProtoMessage() {} +func (*IngressRuleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{26} +} +func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressRuleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressRuleValue.Merge(m, src) +} +func (m *IngressRuleValue) XXX_Size() int { + return m.Size() +} +func (m *IngressRuleValue) XXX_DiscardUnknown() { + xxx_messageInfo_IngressRuleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo + +func (m *IngressSpec) Reset() { *m = IngressSpec{} } +func (*IngressSpec) ProtoMessage() {} +func (*IngressSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{27} +} +func (m *IngressSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressSpec.Merge(m, src) +} +func (m *IngressSpec) XXX_Size() int { + return m.Size() +} +func (m *IngressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IngressSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressSpec proto.InternalMessageInfo + +func (m *IngressStatus) Reset() { *m = IngressStatus{} } +func (*IngressStatus) ProtoMessage() {} +func (*IngressStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{28} +} +func (m *IngressStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressStatus.Merge(m, src) +} +func (m *IngressStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressStatus proto.InternalMessageInfo + +func (m *IngressTLS) Reset() { *m = IngressTLS{} } +func (*IngressTLS) ProtoMessage() {} +func (*IngressTLS) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{29} +} +func (m *IngressTLS) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressTLS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressTLS) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressTLS.Merge(m, src) +} +func (m *IngressTLS) XXX_Size() int { + return m.Size() +} +func (m *IngressTLS) XXX_DiscardUnknown() { + xxx_messageInfo_IngressTLS.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressTLS proto.InternalMessageInfo + +func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } +func (*NetworkPolicy) ProtoMessage() {} +func (*NetworkPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{30} +} +func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicy.Merge(m, src) +} +func (m *NetworkPolicy) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } func (*NetworkPolicyEgressRule) ProtoMessage() {} func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{31} + return fileDescriptor_cdc93917efc28165, []int{31} +} +func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyEgressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyEgressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyEgressRule.Merge(m, src) +} +func (m *NetworkPolicyEgressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyEgressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyEgressRule.DiscardUnknown(m) } +var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo + func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{32} + return fileDescriptor_cdc93917efc28165, []int{32} +} +func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyIngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyIngressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyIngressRule.Merge(m, src) +} +func (m *NetworkPolicyIngressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyIngressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyIngressRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo + +func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } +func (*NetworkPolicyList) ProtoMessage() {} +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{33} +} +func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyList.Merge(m, src) +} +func (m *NetworkPolicyList) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo + +func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } +func (*NetworkPolicyPeer) ProtoMessage() {} +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{34} +} +func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyPeer) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPeer.Merge(m, src) +} +func (m *NetworkPolicyPeer) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPeer) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPeer.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo + +func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } +func (*NetworkPolicyPort) ProtoMessage() {} +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{35} +} +func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPort.Merge(m, src) +} +func (m *NetworkPolicyPort) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPort) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPort.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo + +func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } +func (*NetworkPolicySpec) ProtoMessage() {} +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{36} +} +func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicySpec.Merge(m, src) +} +func (m *NetworkPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo + +func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } +func (*PodSecurityPolicy) ProtoMessage() {} +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{37} +} +func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicy.Merge(m, src) +} +func (m *PodSecurityPolicy) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo + +func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } +func (*PodSecurityPolicyList) ProtoMessage() {} +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{38} +} +func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) +} +func (m *PodSecurityPolicyList) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) } -func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } -func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo + +func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } +func (*PodSecurityPolicySpec) ProtoMessage() {} +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{39} +} +func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) +} +func (m *PodSecurityPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) +} -func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } -func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo -func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } -func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{40} +} +func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSet.Merge(m, src) +} +func (m *ReplicaSet) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSet) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSet.DiscardUnknown(m) +} -func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } -func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{41} +} +func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetCondition.Merge(m, src) +} +func (m *ReplicaSetCondition) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m) +} -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{42} +} +func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetList.Merge(m, src) +} +func (m *ReplicaSetList) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetList) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetList.DiscardUnknown(m) +} -func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } -func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo -func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } -func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{43} +} +func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetSpec.Merge(m, src) +} +func (m *ReplicaSetSpec) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m) +} -func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } -func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo -func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } -func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{44} +} +func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetStatus.Merge(m, src) +} +func (m *ReplicaSetStatus) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m) +} -func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } -func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} } func (*ReplicationControllerDummy) ProtoMessage() {} func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{45} + return fileDescriptor_cdc93917efc28165, []int{45} +} +func (m *ReplicationControllerDummy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerDummy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicationControllerDummy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerDummy.Merge(m, src) +} +func (m *ReplicationControllerDummy) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerDummy) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerDummy.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicationControllerDummy proto.InternalMessageInfo + +func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } +func (*RollbackConfig) ProtoMessage() {} +func (*RollbackConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{46} +} +func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollbackConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollbackConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollbackConfig.Merge(m, src) +} +func (m *RollbackConfig) XXX_Size() int { + return m.Size() } +func (m *RollbackConfig) XXX_DiscardUnknown() { + xxx_messageInfo_RollbackConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo -func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } -func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{47} +} +func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src) +} +func (m *RollingUpdateDaemonSet) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m) +} -func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } -func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } +var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{48} + return fileDescriptor_cdc93917efc28165, []int{48} +} +func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) +} +func (m *RollingUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) } +var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo + func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } func (*RunAsGroupStrategyOptions) ProtoMessage() {} func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{49} + return fileDescriptor_cdc93917efc28165, []int{49} +} +func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) +} +func (m *RunAsGroupStrategyOptions) XXX_Size() int { + return m.Size() } +func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{50} + return fileDescriptor_cdc93917efc28165, []int{50} +} +func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) +} +func (m *RunAsUserStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo + +func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } +func (*RuntimeClassStrategyOptions) ProtoMessage() {} +func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{51} +} +func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) +} +func (m *RuntimeClassStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo + +func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } +func (*SELinuxStrategyOptions) ProtoMessage() {} +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{52} +} +func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) +} +func (m *SELinuxStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) } -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } +var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{53} +} +func (m *Scale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) +} +func (m *Scale) XXX_Size() int { + return m.Size() +} +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) +} + +var xxx_messageInfo_Scale proto.InternalMessageInfo + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{54} +} +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) +} +func (m *ScaleSpec) XXX_Size() int { + return m.Size() +} +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +} -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{55} +} +func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleStatus.Merge(m, src) +} +func (m *ScaleStatus) XXX_Size() int { + return m.Size() +} +func (m *ScaleStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleStatus.DiscardUnknown(m) +} -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } +var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{55} + return fileDescriptor_cdc93917efc28165, []int{56} +} +func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) } +var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo + func init() { proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.extensions.v1beta1.AllowedCSIDriver") proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") @@ -362,6 +1659,7 @@ func init() { proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.extensions.v1beta1.DeploymentCondition") proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.extensions.v1beta1.DeploymentList") proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.api.extensions.v1beta1.DeploymentRollback") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.extensions.v1beta1.DeploymentRollback.UpdatedAnnotationsEntry") proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.extensions.v1beta1.DeploymentSpec") proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStatus") proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStrategy") @@ -400,16 +1698,258 @@ func init() { proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDeployment") proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsGroupStrategyOptions") proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsUserStrategyOptions") + proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RuntimeClassStrategyOptions") proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*Scale)(nil), "k8s.io.api.extensions.v1beta1.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.extensions.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus.SelectorEntry") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto", fileDescriptor_cdc93917efc28165) +} + +var fileDescriptor_cdc93917efc28165 = []byte{ + // 3684 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0x4f, 0x6c, 0x1b, 0x47, + 0x77, 0xf7, 0x92, 0x94, 0x48, 0x3d, 0xfd, 0x1f, 0xc9, 0x12, 0x3f, 0x3b, 0x16, 0xfd, 0x6d, 0x00, + 0xd7, 0x49, 0x6d, 0x32, 0x76, 0x6c, 0x7f, 0xae, 0x8d, 0x7e, 0x89, 0x28, 0x59, 0xb6, 0x52, 0xfd, + 0x61, 0x86, 0x92, 0x1b, 0x04, 0x4d, 0x9a, 0x15, 0x39, 0xa2, 0xd6, 0x5a, 0xee, 0x6e, 0x76, 0x87, + 0x8a, 0x08, 0xf4, 0xd0, 0x43, 0x51, 0xa0, 0x40, 0x8b, 0xf6, 0x92, 0xb6, 0xc7, 0x06, 0x05, 0x7a, + 0x6a, 0xd1, 0xde, 0xda, 0x43, 0x10, 0xa0, 0x40, 0x0a, 0x18, 0x45, 0x5a, 0xe4, 0xd6, 0x9c, 0x84, + 0x46, 0x39, 0x15, 0x3d, 0xf5, 0x56, 0xf8, 0x50, 0x14, 0x33, 0x3b, 0xfb, 0x7f, 0x57, 0x5c, 0x29, + 0xb6, 0xd0, 0x00, 0xbd, 0x89, 0xf3, 0xde, 0xfb, 0xbd, 0x37, 0x33, 0x6f, 0xde, 0x7b, 0x33, 0xfb, + 0x04, 0x2b, 0xfb, 0xf7, 0xed, 0xaa, 0x6a, 0xd4, 0xf6, 0x7b, 0x3b, 0xc4, 0xd2, 0x09, 0x25, 0x76, + 0xed, 0x80, 0xe8, 0x6d, 0xc3, 0xaa, 0x09, 0x82, 0x62, 0xaa, 0x35, 0x72, 0x48, 0x89, 0x6e, 0xab, + 0x86, 0x6e, 0xd7, 0x0e, 0x6e, 0xed, 0x10, 0xaa, 0xdc, 0xaa, 0x75, 0x88, 0x4e, 0x2c, 0x85, 0x92, + 0x76, 0xd5, 0xb4, 0x0c, 0x6a, 0xa0, 0x2b, 0x0e, 0x7b, 0x55, 0x31, 0xd5, 0xaa, 0xcf, 0x5e, 0x15, + 0xec, 0x97, 0x6e, 0x76, 0x54, 0xba, 0xd7, 0xdb, 0xa9, 0xb6, 0x8c, 0x6e, 0xad, 0x63, 0x74, 0x8c, + 0x1a, 0x97, 0xda, 0xe9, 0xed, 0xf2, 0x5f, 0xfc, 0x07, 0xff, 0xcb, 0x41, 0xbb, 0x24, 0x07, 0x94, + 0xb7, 0x0c, 0x8b, 0xd4, 0x0e, 0x62, 0x1a, 0x2f, 0xdd, 0xf1, 0x79, 0xba, 0x4a, 0x6b, 0x4f, 0xd5, + 0x89, 0xd5, 0xaf, 0x99, 0xfb, 0x1d, 0x36, 0x60, 0xd7, 0xba, 0x84, 0x2a, 0x49, 0x52, 0xb5, 0x34, + 0x29, 0xab, 0xa7, 0x53, 0xb5, 0x4b, 0x62, 0x02, 0xf7, 0x06, 0x09, 0xd8, 0xad, 0x3d, 0xd2, 0x55, + 0x62, 0x72, 0x6f, 0xa7, 0xc9, 0xf5, 0xa8, 0xaa, 0xd5, 0x54, 0x9d, 0xda, 0xd4, 0x8a, 0x0a, 0xc9, + 0x77, 0x60, 0x6a, 0x51, 0xd3, 0x8c, 0xcf, 0x48, 0x7b, 0xa9, 0xb9, 0xba, 0x6c, 0xa9, 0x07, 0xc4, + 0x42, 0x57, 0xa1, 0xa0, 0x2b, 0x5d, 0x52, 0x96, 0xae, 0x4a, 0xd7, 0x47, 0xea, 0x63, 0xcf, 0x8f, + 0x2a, 0x17, 0x8e, 0x8f, 0x2a, 0x85, 0x0d, 0xa5, 0x4b, 0x30, 0xa7, 0xc8, 0x0f, 0x61, 0x5a, 0x48, + 0xad, 0x68, 0xe4, 0xf0, 0xa9, 0xa1, 0xf5, 0xba, 0x04, 0x5d, 0x83, 0xe1, 0x36, 0x07, 0x10, 0x82, + 0x13, 0x42, 0x70, 0xd8, 0x81, 0xc5, 0x82, 0x2a, 0xdb, 0x30, 0x29, 0x84, 0x9f, 0x18, 0x36, 0x6d, + 0x28, 0x74, 0x0f, 0xdd, 0x06, 0x30, 0x15, 0xba, 0xd7, 0xb0, 0xc8, 0xae, 0x7a, 0x28, 0xc4, 0x91, + 0x10, 0x87, 0x86, 0x47, 0xc1, 0x01, 0x2e, 0x74, 0x03, 0x4a, 0x16, 0x51, 0xda, 0x9b, 0xba, 0xd6, + 0x2f, 0xe7, 0xae, 0x4a, 0xd7, 0x4b, 0xf5, 0x29, 0x21, 0x51, 0xc2, 0x62, 0x1c, 0x7b, 0x1c, 0xf2, + 0xe7, 0x39, 0x18, 0x59, 0x56, 0x48, 0xd7, 0xd0, 0x9b, 0x84, 0xa2, 0x4f, 0xa0, 0xc4, 0xb6, 0xab, + 0xad, 0x50, 0x85, 0x6b, 0x1b, 0xbd, 0xfd, 0x56, 0xd5, 0x77, 0x27, 0x6f, 0xf5, 0xaa, 0xe6, 0x7e, + 0x87, 0x0d, 0xd8, 0x55, 0xc6, 0x5d, 0x3d, 0xb8, 0x55, 0xdd, 0xdc, 0x79, 0x46, 0x5a, 0x74, 0x9d, + 0x50, 0xc5, 0xb7, 0xcf, 0x1f, 0xc3, 0x1e, 0x2a, 0xda, 0x80, 0x82, 0x6d, 0x92, 0x16, 0xb7, 0x6c, + 0xf4, 0xf6, 0x8d, 0xea, 0x89, 0xce, 0x5a, 0xf5, 0x2c, 0x6b, 0x9a, 0xa4, 0xe5, 0xaf, 0x38, 0xfb, + 0x85, 0x39, 0x0e, 0x7a, 0x0a, 0xc3, 0x36, 0x55, 0x68, 0xcf, 0x2e, 0xe7, 0x39, 0x62, 0x35, 0x33, + 0x22, 0x97, 0xf2, 0x37, 0xc3, 0xf9, 0x8d, 0x05, 0x9a, 0xfc, 0x1f, 0x39, 0x40, 0x1e, 0xef, 0x92, + 0xa1, 0xb7, 0x55, 0xaa, 0x1a, 0x3a, 0x7a, 0x00, 0x05, 0xda, 0x37, 0x5d, 0x17, 0xb8, 0xe6, 0x1a, + 0xb4, 0xd5, 0x37, 0xc9, 0x8b, 0xa3, 0xca, 0x5c, 0x5c, 0x82, 0x51, 0x30, 0x97, 0x41, 0x6b, 0x9e, + 0xa9, 0x39, 0x2e, 0x7d, 0x27, 0xac, 0xfa, 0xc5, 0x51, 0x25, 0xe1, 0xb0, 0x55, 0x3d, 0xa4, 0xb0, + 0x81, 0xe8, 0x00, 0x90, 0xa6, 0xd8, 0x74, 0xcb, 0x52, 0x74, 0xdb, 0xd1, 0xa4, 0x76, 0x89, 0x58, + 0x84, 0x37, 0xb3, 0x6d, 0x1a, 0x93, 0xa8, 0x5f, 0x12, 0x56, 0xa0, 0xb5, 0x18, 0x1a, 0x4e, 0xd0, + 0xc0, 0xbc, 0xd9, 0x22, 0x8a, 0x6d, 0xe8, 0xe5, 0x42, 0xd8, 0x9b, 0x31, 0x1f, 0xc5, 0x82, 0x8a, + 0xde, 0x80, 0x62, 0x97, 0xd8, 0xb6, 0xd2, 0x21, 0xe5, 0x21, 0xce, 0x38, 0x29, 0x18, 0x8b, 0xeb, + 0xce, 0x30, 0x76, 0xe9, 0xf2, 0x97, 0x12, 0x8c, 0x7b, 0x2b, 0xb7, 0xa6, 0xda, 0x14, 0xfd, 0x56, + 0xcc, 0x0f, 0xab, 0xd9, 0xa6, 0xc4, 0xa4, 0xb9, 0x17, 0x7a, 0x3e, 0xef, 0x8e, 0x04, 0x7c, 0x70, + 0x1d, 0x86, 0x54, 0x4a, 0xba, 0x6c, 0x1f, 0xf2, 0xd7, 0x47, 0x6f, 0x5f, 0xcf, 0xea, 0x32, 0xf5, + 0x71, 0x01, 0x3a, 0xb4, 0xca, 0xc4, 0xb1, 0x83, 0x22, 0xff, 0x69, 0x21, 0x60, 0x3e, 0x73, 0x4d, + 0xf4, 0x11, 0x94, 0x6c, 0xa2, 0x91, 0x16, 0x35, 0x2c, 0x61, 0xfe, 0xdb, 0x19, 0xcd, 0x57, 0x76, + 0x88, 0xd6, 0x14, 0xa2, 0xf5, 0x31, 0x66, 0xbf, 0xfb, 0x0b, 0x7b, 0x90, 0xe8, 0x7d, 0x28, 0x51, + 0xd2, 0x35, 0x35, 0x85, 0x12, 0x71, 0x8e, 0x5e, 0x0f, 0x4e, 0x81, 0x79, 0x0e, 0x03, 0x6b, 0x18, + 0xed, 0x2d, 0xc1, 0xc6, 0x8f, 0x8f, 0xb7, 0x24, 0xee, 0x28, 0xf6, 0x60, 0xd0, 0x01, 0x4c, 0xf4, + 0xcc, 0x36, 0xe3, 0xa4, 0x2c, 0x0a, 0x76, 0xfa, 0xc2, 0x93, 0xee, 0x65, 0x5d, 0x9b, 0xed, 0x90, + 0x74, 0x7d, 0x4e, 0xe8, 0x9a, 0x08, 0x8f, 0xe3, 0x88, 0x16, 0xb4, 0x08, 0x93, 0x5d, 0x55, 0x67, + 0x71, 0xa9, 0xdf, 0x24, 0x2d, 0x43, 0x6f, 0xdb, 0xdc, 0xad, 0x86, 0xea, 0xf3, 0x02, 0x60, 0x72, + 0x3d, 0x4c, 0xc6, 0x51, 0x7e, 0xf4, 0x1e, 0x20, 0x77, 0x1a, 0x8f, 0x9d, 0x20, 0xae, 0x1a, 0x3a, + 0xf7, 0xb9, 0xbc, 0xef, 0xdc, 0x5b, 0x31, 0x0e, 0x9c, 0x20, 0x85, 0xd6, 0x60, 0xd6, 0x22, 0x07, + 0x2a, 0x9b, 0xe3, 0x13, 0xd5, 0xa6, 0x86, 0xd5, 0x5f, 0x53, 0xbb, 0x2a, 0x2d, 0x0f, 0x73, 0x9b, + 0xca, 0xc7, 0x47, 0x95, 0x59, 0x9c, 0x40, 0xc7, 0x89, 0x52, 0xf2, 0x9f, 0x0d, 0xc3, 0x64, 0x24, + 0xde, 0xa0, 0xa7, 0x30, 0xd7, 0xea, 0x59, 0x16, 0xd1, 0xe9, 0x46, 0xaf, 0xbb, 0x43, 0xac, 0x66, + 0x6b, 0x8f, 0xb4, 0x7b, 0x1a, 0x69, 0x73, 0x47, 0x19, 0xaa, 0x2f, 0x08, 0x8b, 0xe7, 0x96, 0x12, + 0xb9, 0x70, 0x8a, 0x34, 0x5b, 0x05, 0x9d, 0x0f, 0xad, 0xab, 0xb6, 0xed, 0x61, 0xe6, 0x38, 0xa6, + 0xb7, 0x0a, 0x1b, 0x31, 0x0e, 0x9c, 0x20, 0xc5, 0x6c, 0x6c, 0x13, 0x5b, 0xb5, 0x48, 0x3b, 0x6a, + 0x63, 0x3e, 0x6c, 0xe3, 0x72, 0x22, 0x17, 0x4e, 0x91, 0x46, 0x77, 0x61, 0xd4, 0xd1, 0xc6, 0xf7, + 0x4f, 0x6c, 0xf4, 0x8c, 0x00, 0x1b, 0xdd, 0xf0, 0x49, 0x38, 0xc8, 0xc7, 0xa6, 0x66, 0xec, 0xd8, + 0xc4, 0x3a, 0x20, 0xed, 0xf4, 0x0d, 0xde, 0x8c, 0x71, 0xe0, 0x04, 0x29, 0x36, 0x35, 0xc7, 0x03, + 0x63, 0x53, 0x1b, 0x0e, 0x4f, 0x6d, 0x3b, 0x91, 0x0b, 0xa7, 0x48, 0x33, 0x3f, 0x76, 0x4c, 0x5e, + 0x3c, 0x50, 0x54, 0x4d, 0xd9, 0xd1, 0x48, 0xb9, 0x18, 0xf6, 0xe3, 0x8d, 0x30, 0x19, 0x47, 0xf9, + 0xd1, 0x63, 0x98, 0x76, 0x86, 0xb6, 0x75, 0xc5, 0x03, 0x29, 0x71, 0x90, 0x9f, 0x09, 0x90, 0xe9, + 0x8d, 0x28, 0x03, 0x8e, 0xcb, 0xa0, 0x07, 0x30, 0xd1, 0x32, 0x34, 0x8d, 0xfb, 0xe3, 0x92, 0xd1, + 0xd3, 0x69, 0x79, 0x84, 0xa3, 0x20, 0x76, 0x1e, 0x97, 0x42, 0x14, 0x1c, 0xe1, 0x44, 0x04, 0xa0, + 0xe5, 0x26, 0x1c, 0xbb, 0x0c, 0x3c, 0x3e, 0xde, 0xca, 0x1a, 0x03, 0xbc, 0x54, 0xe5, 0xd7, 0x00, + 0xde, 0x90, 0x8d, 0x03, 0xc0, 0xf2, 0x3f, 0x4b, 0x30, 0x9f, 0x12, 0x3a, 0xd0, 0x3b, 0xa1, 0x14, + 0xfb, 0xab, 0x91, 0x14, 0x7b, 0x39, 0x45, 0x2c, 0x90, 0x67, 0x75, 0x18, 0xb7, 0xd8, 0xac, 0xf4, + 0x8e, 0xc3, 0x22, 0x62, 0xe4, 0xdd, 0x01, 0xd3, 0xc0, 0x41, 0x19, 0x3f, 0xe6, 0x4f, 0x1f, 0x1f, + 0x55, 0xc6, 0x43, 0x34, 0x1c, 0x86, 0x97, 0xff, 0x3c, 0x07, 0xb0, 0x4c, 0x4c, 0xcd, 0xe8, 0x77, + 0x89, 0x7e, 0x1e, 0x35, 0xd4, 0x66, 0xa8, 0x86, 0xba, 0x39, 0x68, 0x7b, 0x3c, 0xd3, 0x52, 0x8b, + 0xa8, 0xdf, 0x8c, 0x14, 0x51, 0xb5, 0xec, 0x90, 0x27, 0x57, 0x51, 0xff, 0x96, 0x87, 0x19, 0x9f, + 0xd9, 0x2f, 0xa3, 0x1e, 0x86, 0xf6, 0xf8, 0x57, 0x22, 0x7b, 0x3c, 0x9f, 0x20, 0xf2, 0xca, 0xea, + 0xa8, 0x67, 0x30, 0xc1, 0xaa, 0x1c, 0x67, 0x2f, 0x79, 0x0d, 0x35, 0x7c, 0xea, 0x1a, 0xca, 0xcb, + 0x76, 0x6b, 0x21, 0x24, 0x1c, 0x41, 0x4e, 0xa9, 0xd9, 0x8a, 0x3f, 0xc5, 0x9a, 0xed, 0x2b, 0x09, + 0x26, 0xfc, 0x6d, 0x3a, 0x87, 0xa2, 0x6d, 0x23, 0x5c, 0xb4, 0xbd, 0x91, 0xd9, 0x45, 0x53, 0xaa, + 0xb6, 0xff, 0x66, 0x05, 0xbe, 0xc7, 0xc4, 0x0e, 0xf8, 0x8e, 0xd2, 0xda, 0x1f, 0x7c, 0xc7, 0x43, + 0x9f, 0x4b, 0x80, 0x44, 0x16, 0x58, 0xd4, 0x75, 0x83, 0x2a, 0x4e, 0xac, 0x74, 0xcc, 0x5a, 0xcd, + 0x6c, 0x96, 0xab, 0xb1, 0xba, 0x1d, 0xc3, 0x7a, 0xa4, 0x53, 0xab, 0xef, 0x6f, 0x72, 0x9c, 0x01, + 0x27, 0x18, 0x80, 0x14, 0x00, 0x4b, 0x60, 0x6e, 0x19, 0xe2, 0x20, 0xdf, 0xcc, 0x10, 0xf3, 0x98, + 0xc0, 0x92, 0xa1, 0xef, 0xaa, 0x1d, 0x3f, 0xec, 0x60, 0x0f, 0x08, 0x07, 0x40, 0x2f, 0x3d, 0x82, + 0xf9, 0x14, 0x6b, 0xd1, 0x14, 0xe4, 0xf7, 0x49, 0xdf, 0x59, 0x36, 0xcc, 0xfe, 0x44, 0xb3, 0x30, + 0x74, 0xa0, 0x68, 0x3d, 0x27, 0xfc, 0x8e, 0x60, 0xe7, 0xc7, 0x83, 0xdc, 0x7d, 0x49, 0xfe, 0x72, + 0x28, 0xe8, 0x3b, 0xbc, 0x62, 0xbe, 0xce, 0x2e, 0xad, 0xa6, 0xa6, 0xb6, 0x14, 0x5b, 0x14, 0x42, + 0x63, 0xce, 0x85, 0xd5, 0x19, 0xc3, 0x1e, 0x35, 0x54, 0x5b, 0xe7, 0x5e, 0x6d, 0x6d, 0x9d, 0x7f, + 0x39, 0xb5, 0xf5, 0x6f, 0x43, 0xc9, 0x76, 0xab, 0xea, 0x02, 0x87, 0xbc, 0x75, 0x8a, 0xf8, 0x2a, + 0x0a, 0x6a, 0x4f, 0x81, 0x57, 0x4a, 0x7b, 0xa0, 0x49, 0x45, 0xf4, 0xd0, 0x29, 0x8b, 0xe8, 0x97, + 0x5a, 0xf8, 0xb2, 0x78, 0x63, 0x2a, 0x3d, 0x9b, 0xb4, 0x79, 0x6c, 0x2b, 0xf9, 0xf1, 0xa6, 0xc1, + 0x47, 0xb1, 0xa0, 0xa2, 0x8f, 0x42, 0x2e, 0x5b, 0x3a, 0x8b, 0xcb, 0x4e, 0xa4, 0xbb, 0x2b, 0xda, + 0x86, 0x79, 0xd3, 0x32, 0x3a, 0x16, 0xb1, 0xed, 0x65, 0xa2, 0xb4, 0x35, 0x55, 0x27, 0xee, 0xfa, + 0x38, 0x15, 0xd1, 0xe5, 0xe3, 0xa3, 0xca, 0x7c, 0x23, 0x99, 0x05, 0xa7, 0xc9, 0xca, 0xcf, 0x0b, + 0x30, 0x15, 0xcd, 0x80, 0x29, 0x45, 0xaa, 0x74, 0xa6, 0x22, 0xf5, 0x46, 0xe0, 0x30, 0x38, 0x15, + 0x7c, 0xe0, 0x05, 0x27, 0x76, 0x20, 0x16, 0x61, 0x52, 0x44, 0x03, 0x97, 0x28, 0xca, 0x74, 0x6f, + 0xf7, 0xb7, 0xc3, 0x64, 0x1c, 0xe5, 0x47, 0x0f, 0x61, 0xdc, 0xe2, 0x75, 0xb7, 0x0b, 0xe0, 0xd4, + 0xae, 0x17, 0x05, 0xc0, 0x38, 0x0e, 0x12, 0x71, 0x98, 0x97, 0xd5, 0xad, 0x7e, 0x39, 0xea, 0x02, + 0x14, 0xc2, 0x75, 0xeb, 0x62, 0x94, 0x01, 0xc7, 0x65, 0xd0, 0x3a, 0xcc, 0xf4, 0xf4, 0x38, 0x94, + 0xe3, 0xca, 0x97, 0x05, 0xd4, 0xcc, 0x76, 0x9c, 0x05, 0x27, 0xc9, 0xa1, 0xdd, 0x50, 0x29, 0x3b, + 0xcc, 0xc3, 0xf3, 0xed, 0xcc, 0x07, 0x2f, 0x73, 0x2d, 0x9b, 0x50, 0x6e, 0x97, 0xb2, 0x96, 0xdb, + 0xf2, 0x3f, 0x4a, 0xc1, 0x24, 0xe4, 0x95, 0xc0, 0x83, 0x5e, 0x99, 0x62, 0x12, 0x81, 0xea, 0xc8, + 0x48, 0xae, 0x7e, 0xef, 0x9d, 0xaa, 0xfa, 0xf5, 0x93, 0xe7, 0xe0, 0xf2, 0xf7, 0x0b, 0x09, 0xe6, + 0x56, 0x9a, 0x8f, 0x2d, 0xa3, 0x67, 0xba, 0xe6, 0x6c, 0x9a, 0xce, 0xd2, 0xfc, 0x02, 0x0a, 0x56, + 0x4f, 0x73, 0xe7, 0xf1, 0xba, 0x3b, 0x0f, 0xdc, 0xd3, 0xd8, 0x3c, 0x66, 0x22, 0x52, 0xce, 0x24, + 0x98, 0x00, 0xda, 0x80, 0x61, 0x4b, 0xd1, 0x3b, 0xc4, 0x4d, 0xab, 0xd7, 0x06, 0x58, 0xbf, 0xba, + 0x8c, 0x19, 0x7b, 0xa0, 0xb0, 0xe1, 0xd2, 0x58, 0xa0, 0xc8, 0x7f, 0x24, 0xc1, 0xe4, 0x93, 0xad, + 0xad, 0xc6, 0xaa, 0xce, 0x4f, 0x34, 0x7f, 0x5b, 0xbd, 0x0a, 0x05, 0x53, 0xa1, 0x7b, 0xd1, 0x4c, + 0xcf, 0x68, 0x98, 0x53, 0xd0, 0x07, 0x50, 0x64, 0x91, 0x84, 0xe8, 0xed, 0x8c, 0xa5, 0xb6, 0x80, + 0xaf, 0x3b, 0x42, 0x7e, 0xf5, 0x24, 0x06, 0xb0, 0x0b, 0x27, 0xef, 0xc3, 0x6c, 0xc0, 0x1c, 0xb6, + 0x1e, 0x4f, 0x59, 0x76, 0x44, 0x4d, 0x18, 0x62, 0x9a, 0x59, 0x0e, 0xcc, 0x67, 0x78, 0xcc, 0x8c, + 0x4c, 0xc9, 0xaf, 0x74, 0xd8, 0x2f, 0x1b, 0x3b, 0x58, 0xf2, 0x3a, 0x8c, 0xf3, 0x07, 0x65, 0xc3, + 0xa2, 0x7c, 0x59, 0xd0, 0x15, 0xc8, 0x77, 0x55, 0x5d, 0xe4, 0xd9, 0x51, 0x21, 0x93, 0x67, 0x39, + 0x82, 0x8d, 0x73, 0xb2, 0x72, 0x28, 0x22, 0x8f, 0x4f, 0x56, 0x0e, 0x31, 0x1b, 0x97, 0x1f, 0x43, + 0x51, 0x2c, 0x77, 0x10, 0x28, 0x7f, 0x32, 0x50, 0x3e, 0x01, 0x68, 0x13, 0x8a, 0xab, 0x8d, 0xba, + 0x66, 0x38, 0x55, 0x57, 0x4b, 0x6d, 0x5b, 0xd1, 0xbd, 0x58, 0x5a, 0x5d, 0xc6, 0x98, 0x53, 0x90, + 0x0c, 0xc3, 0xe4, 0xb0, 0x45, 0x4c, 0xca, 0x3d, 0x62, 0xa4, 0x0e, 0x6c, 0x97, 0x1f, 0xf1, 0x11, + 0x2c, 0x28, 0xf2, 0x1f, 0xe7, 0xa0, 0x28, 0x96, 0xe3, 0x1c, 0x6e, 0x61, 0x6b, 0xa1, 0x5b, 0xd8, + 0x9b, 0xd9, 0x5c, 0x23, 0xf5, 0x0a, 0xb6, 0x15, 0xb9, 0x82, 0xdd, 0xc8, 0x88, 0x77, 0xf2, 0xfd, + 0xeb, 0xef, 0x24, 0x98, 0x08, 0x3b, 0x25, 0xba, 0x0b, 0xa3, 0x2c, 0xe1, 0xa8, 0x2d, 0xb2, 0xe1, + 0xd7, 0xb9, 0xde, 0x23, 0x4c, 0xd3, 0x27, 0xe1, 0x20, 0x1f, 0xea, 0x78, 0x62, 0xcc, 0x8f, 0xc4, + 0xa4, 0xd3, 0x97, 0xb4, 0x47, 0x55, 0xad, 0xea, 0x7c, 0x5a, 0xa9, 0xae, 0xea, 0x74, 0xd3, 0x6a, + 0x52, 0x4b, 0xd5, 0x3b, 0x31, 0x45, 0xdc, 0x29, 0x83, 0xc8, 0xf2, 0x3f, 0x48, 0x30, 0x2a, 0x4c, + 0x3e, 0x87, 0x5b, 0xc5, 0x6f, 0x84, 0x6f, 0x15, 0xd7, 0x32, 0x1e, 0xf0, 0xe4, 0x2b, 0xc5, 0x5f, + 0xf9, 0xa6, 0xb3, 0x23, 0xcd, 0xbc, 0x7a, 0xcf, 0xb0, 0x69, 0xd4, 0xab, 0xd9, 0x61, 0xc4, 0x9c, + 0x82, 0x7a, 0x30, 0xa5, 0x46, 0x62, 0x80, 0x58, 0xda, 0x5a, 0x36, 0x4b, 0x3c, 0xb1, 0x7a, 0x59, + 0xc0, 0x4f, 0x45, 0x29, 0x38, 0xa6, 0x42, 0x26, 0x10, 0xe3, 0x42, 0xef, 0x43, 0x61, 0x8f, 0x52, + 0x33, 0xe1, 0xbd, 0x7a, 0x40, 0xe4, 0xf1, 0x4d, 0x28, 0xf1, 0xd9, 0x6d, 0x6d, 0x35, 0x30, 0x87, + 0x92, 0xff, 0xc7, 0x5f, 0x8f, 0xa6, 0xe3, 0xe3, 0x5e, 0x3c, 0x95, 0xce, 0x12, 0x4f, 0x47, 0x93, + 0x62, 0x29, 0x7a, 0x02, 0x79, 0xaa, 0x65, 0xbd, 0x16, 0x0a, 0xc4, 0xad, 0xb5, 0xa6, 0x1f, 0x90, + 0xb6, 0xd6, 0x9a, 0x98, 0x41, 0xa0, 0x4d, 0x18, 0x62, 0xd9, 0x87, 0x1d, 0xc1, 0x7c, 0xf6, 0x23, + 0xcd, 0xe6, 0xef, 0x3b, 0x04, 0xfb, 0x65, 0x63, 0x07, 0x47, 0xfe, 0x14, 0xc6, 0x43, 0xe7, 0x14, + 0x7d, 0x02, 0x63, 0x9a, 0xa1, 0xb4, 0xeb, 0x8a, 0xa6, 0xe8, 0x2d, 0xe2, 0x7e, 0x1c, 0xb8, 0x96, + 0x74, 0xc3, 0x58, 0x0b, 0xf0, 0x89, 0x53, 0x3e, 0x2b, 0x94, 0x8c, 0x05, 0x69, 0x38, 0x84, 0x28, + 0x2b, 0x00, 0xfe, 0x1c, 0x51, 0x05, 0x86, 0x98, 0x9f, 0x39, 0xf9, 0x64, 0xa4, 0x3e, 0xc2, 0x2c, + 0x64, 0xee, 0x67, 0x63, 0x67, 0x1c, 0xdd, 0x06, 0xb0, 0x49, 0xcb, 0x22, 0x94, 0x07, 0x83, 0x5c, + 0xf8, 0x03, 0x63, 0xd3, 0xa3, 0xe0, 0x00, 0x97, 0xfc, 0x4f, 0x12, 0x8c, 0x6f, 0x10, 0xfa, 0x99, + 0x61, 0xed, 0x37, 0x0c, 0x4d, 0x6d, 0xf5, 0xcf, 0x21, 0xd8, 0xe2, 0x50, 0xb0, 0x7d, 0x6b, 0xc0, + 0xce, 0x84, 0xac, 0x4b, 0x0b, 0xb9, 0xf2, 0x57, 0x12, 0xcc, 0x87, 0x38, 0x1f, 0xf9, 0x47, 0x77, + 0x1b, 0x86, 0x4c, 0xc3, 0xa2, 0x6e, 0x22, 0x3e, 0x95, 0x42, 0x16, 0xc6, 0x02, 0xa9, 0x98, 0xc1, + 0x60, 0x07, 0x0d, 0xad, 0x41, 0x8e, 0x1a, 0xc2, 0x55, 0x4f, 0x87, 0x49, 0x88, 0x55, 0x07, 0x81, + 0x99, 0xdb, 0x32, 0x70, 0x8e, 0x1a, 0x6c, 0x23, 0xca, 0x21, 0xae, 0x60, 0xf0, 0x79, 0x45, 0x33, + 0xc0, 0x50, 0xd8, 0xb5, 0x8c, 0xee, 0x99, 0xe7, 0xe0, 0x6d, 0xc4, 0x8a, 0x65, 0x74, 0x31, 0xc7, + 0x92, 0xbf, 0x96, 0x60, 0x3a, 0xc4, 0x79, 0x0e, 0x81, 0xff, 0xfd, 0x70, 0xe0, 0xbf, 0x71, 0x9a, + 0x89, 0xa4, 0x84, 0xff, 0xaf, 0x73, 0x91, 0x69, 0xb0, 0x09, 0xa3, 0x5d, 0x18, 0x35, 0x8d, 0x76, + 0xf3, 0x25, 0x7c, 0x0e, 0x9c, 0x64, 0x79, 0xb3, 0xe1, 0x63, 0xe1, 0x20, 0x30, 0x3a, 0x84, 0x69, + 0x5d, 0xe9, 0x12, 0xdb, 0x54, 0x5a, 0xa4, 0xf9, 0x12, 0x1e, 0x48, 0x2e, 0xf2, 0xef, 0x0d, 0x51, + 0x44, 0x1c, 0x57, 0x82, 0xd6, 0xa1, 0xa8, 0x9a, 0xbc, 0x8e, 0x13, 0xb5, 0xcb, 0xc0, 0x2c, 0xea, + 0x54, 0x7d, 0x4e, 0x3c, 0x17, 0x3f, 0xb0, 0x8b, 0x21, 0xff, 0x75, 0xd4, 0x1b, 0x98, 0xff, 0xa1, + 0xc7, 0x50, 0xe2, 0x8d, 0x19, 0x2d, 0x43, 0x73, 0xbf, 0x0c, 0xb0, 0x9d, 0x6d, 0x88, 0xb1, 0x17, + 0x47, 0x95, 0xcb, 0x09, 0x8f, 0xbe, 0x2e, 0x19, 0x7b, 0xc2, 0x68, 0x03, 0x0a, 0xe6, 0x8f, 0xa9, + 0x60, 0x78, 0x92, 0xe3, 0x65, 0x0b, 0xc7, 0x91, 0x7f, 0x2f, 0x1f, 0x31, 0x97, 0xa7, 0xba, 0x67, + 0x2f, 0x6d, 0xd7, 0xbd, 0x8a, 0x29, 0x75, 0xe7, 0x77, 0xa0, 0x28, 0x32, 0xbc, 0x70, 0xe6, 0x5f, + 0x9c, 0xc6, 0x99, 0x83, 0x59, 0xcc, 0xbb, 0xb0, 0xb8, 0x83, 0x2e, 0x30, 0xfa, 0x18, 0x86, 0x89, + 0xa3, 0xc2, 0xc9, 0x8d, 0xf7, 0x4e, 0xa3, 0xc2, 0x8f, 0xab, 0x7e, 0xa1, 0x2a, 0xc6, 0x04, 0x2a, + 0x7a, 0x87, 0xad, 0x17, 0xe3, 0x65, 0x97, 0x40, 0xbb, 0x5c, 0xe0, 0xe9, 0xea, 0x8a, 0x33, 0x6d, + 0x6f, 0xf8, 0xc5, 0x51, 0x05, 0xfc, 0x9f, 0x38, 0x28, 0x21, 0xff, 0x8b, 0x04, 0xd3, 0x7c, 0x85, + 0x5a, 0x3d, 0x4b, 0xa5, 0xfd, 0x73, 0x4b, 0x4c, 0x4f, 0x43, 0x89, 0xe9, 0xce, 0x80, 0x65, 0x89, + 0x59, 0x98, 0x9a, 0x9c, 0xbe, 0x91, 0xe0, 0x62, 0x8c, 0xfb, 0x1c, 0xe2, 0xe2, 0x76, 0x38, 0x2e, + 0xbe, 0x75, 0xda, 0x09, 0xa5, 0xc4, 0xc6, 0xff, 0x9a, 0x4e, 0x98, 0x0e, 0x3f, 0x29, 0xb7, 0x01, + 0x4c, 0x4b, 0x3d, 0x50, 0x35, 0xd2, 0x11, 0x1f, 0xc1, 0x4b, 0x81, 0x16, 0x27, 0x8f, 0x82, 0x03, + 0x5c, 0xc8, 0x86, 0xb9, 0x36, 0xd9, 0x55, 0x7a, 0x1a, 0x5d, 0x6c, 0xb7, 0x97, 0x14, 0x53, 0xd9, + 0x51, 0x35, 0x95, 0xaa, 0xe2, 0xb9, 0x60, 0xa4, 0xfe, 0xd0, 0xf9, 0x38, 0x9d, 0xc4, 0xf1, 0xe2, + 0xa8, 0x72, 0x25, 0xe9, 0xeb, 0x90, 0xcb, 0xd2, 0xc7, 0x29, 0xd0, 0xa8, 0x0f, 0x65, 0x8b, 0x7c, + 0xda, 0x53, 0x2d, 0xd2, 0x5e, 0xb6, 0x0c, 0x33, 0xa4, 0x36, 0xcf, 0xd5, 0xfe, 0xfa, 0xf1, 0x51, + 0xa5, 0x8c, 0x53, 0x78, 0x06, 0x2b, 0x4e, 0x85, 0x47, 0xcf, 0x60, 0x46, 0x11, 0xcd, 0x68, 0x41, + 0xad, 0xce, 0x29, 0xb9, 0x7f, 0x7c, 0x54, 0x99, 0x59, 0x8c, 0x93, 0x07, 0x2b, 0x4c, 0x02, 0x45, + 0x35, 0x28, 0x1e, 0xf0, 0xbe, 0x35, 0xbb, 0x3c, 0xc4, 0xf1, 0x59, 0x22, 0x28, 0x3a, 0xad, 0x6c, + 0x0c, 0x73, 0x78, 0xa5, 0xc9, 0x4f, 0x9f, 0xcb, 0xc5, 0x2e, 0x94, 0xac, 0x96, 0x14, 0x27, 0x9e, + 0xbf, 0x18, 0x97, 0xfc, 0xa8, 0xf5, 0xc4, 0x27, 0xe1, 0x20, 0x1f, 0xfa, 0x08, 0x46, 0xf6, 0xc4, + 0xab, 0x84, 0x5d, 0x2e, 0x66, 0x4a, 0xc2, 0xa1, 0x57, 0x8c, 0xfa, 0xb4, 0x50, 0x31, 0xe2, 0x0e, + 0xdb, 0xd8, 0x47, 0x44, 0x6f, 0x40, 0x91, 0xff, 0x58, 0x5d, 0xe6, 0xcf, 0x71, 0x25, 0x3f, 0xb6, + 0x3d, 0x71, 0x86, 0xb1, 0x4b, 0x77, 0x59, 0x57, 0x1b, 0x4b, 0xfc, 0x59, 0x38, 0xc2, 0xba, 0xda, + 0x58, 0xc2, 0x2e, 0x1d, 0x7d, 0x02, 0x45, 0x9b, 0xac, 0xa9, 0x7a, 0xef, 0xb0, 0x0c, 0x99, 0x3e, + 0x2a, 0x37, 0x1f, 0x71, 0xee, 0xc8, 0xc3, 0x98, 0xaf, 0x41, 0xd0, 0xb1, 0x0b, 0x8b, 0xf6, 0x60, + 0xc4, 0xea, 0xe9, 0x8b, 0xf6, 0xb6, 0x4d, 0xac, 0xf2, 0x28, 0xd7, 0x31, 0x28, 0x9c, 0x63, 0x97, + 0x3f, 0xaa, 0xc5, 0x5b, 0x21, 0x8f, 0x03, 0xfb, 0xe0, 0x68, 0x0f, 0x80, 0xff, 0xe0, 0x6f, 0x70, + 0xe5, 0x39, 0xae, 0xea, 0x7e, 0x16, 0x55, 0x49, 0x4f, 0x7d, 0xe2, 0x1d, 0xde, 0x23, 0xe3, 0x00, + 0x36, 0xfa, 0x43, 0x09, 0x90, 0xdd, 0x33, 0x4d, 0x8d, 0x74, 0x89, 0x4e, 0x15, 0x8d, 0x8f, 0xda, + 0xe5, 0x31, 0xae, 0xf2, 0xdd, 0x41, 0x2b, 0x18, 0x13, 0x8c, 0xaa, 0xf6, 0x9e, 0xd7, 0xe3, 0xac, + 0x38, 0x41, 0x2f, 0xdb, 0xc4, 0x5d, 0x31, 0xeb, 0xf1, 0x4c, 0x9b, 0x98, 0xfc, 0xba, 0xe9, 0x6f, + 0xa2, 0xa0, 0x63, 0x17, 0x16, 0x3d, 0x85, 0x39, 0xb7, 0xc1, 0x12, 0x1b, 0x06, 0x5d, 0x51, 0x35, + 0x62, 0xf7, 0x6d, 0x4a, 0xba, 0xe5, 0x09, 0xee, 0x60, 0x5e, 0x97, 0x09, 0x4e, 0xe4, 0xc2, 0x29, + 0xd2, 0xa8, 0x0b, 0x15, 0x37, 0x38, 0xb1, 0x93, 0xeb, 0x45, 0xc7, 0x47, 0x76, 0x4b, 0xd1, 0x9c, + 0x2f, 0x0e, 0x93, 0x5c, 0xc1, 0xeb, 0xc7, 0x47, 0x95, 0xca, 0xf2, 0xc9, 0xac, 0x78, 0x10, 0x16, + 0xfa, 0x00, 0xca, 0x4a, 0x9a, 0x9e, 0x29, 0xae, 0xe7, 0x35, 0x16, 0xf1, 0x52, 0x15, 0xa4, 0x4a, + 0x23, 0x0a, 0x53, 0x4a, 0xb8, 0xd5, 0xd5, 0x2e, 0x4f, 0x67, 0x7a, 0xf2, 0x8c, 0x74, 0xc8, 0xfa, + 0xcf, 0x1e, 0x11, 0x82, 0x8d, 0x63, 0x1a, 0xd0, 0xef, 0x00, 0x52, 0xa2, 0xdd, 0xb9, 0x76, 0x19, + 0x65, 0x4a, 0x74, 0xb1, 0xb6, 0x5e, 0xdf, 0xed, 0x62, 0x24, 0x1b, 0x27, 0xe8, 0x61, 0x05, 0xba, + 0x12, 0xe9, 0x28, 0xb6, 0xcb, 0xf3, 0x5c, 0x79, 0x2d, 0x9b, 0x72, 0x4f, 0x2e, 0xf0, 0x61, 0x25, + 0x8a, 0x88, 0xe3, 0x4a, 0xd0, 0x1a, 0xcc, 0x8a, 0xc1, 0x6d, 0xdd, 0x56, 0x76, 0x49, 0xb3, 0x6f, + 0xb7, 0xa8, 0x66, 0x97, 0x67, 0x78, 0x7c, 0xe7, 0x1f, 0xf7, 0x16, 0x13, 0xe8, 0x38, 0x51, 0x0a, + 0xbd, 0x0b, 0x53, 0xbb, 0x86, 0xb5, 0xa3, 0xb6, 0xdb, 0x44, 0x77, 0x91, 0x66, 0x39, 0xd2, 0x2c, + 0xdb, 0x87, 0x95, 0x08, 0x0d, 0xc7, 0xb8, 0x91, 0x0d, 0x17, 0x05, 0x72, 0xc3, 0x32, 0x5a, 0xeb, + 0x46, 0x4f, 0xa7, 0x4e, 0xd9, 0x77, 0xd1, 0x4b, 0xa3, 0x17, 0x17, 0x93, 0x18, 0x5e, 0x1c, 0x55, + 0xae, 0x26, 0x57, 0xf9, 0x3e, 0x13, 0x4e, 0xc6, 0x46, 0x26, 0x8c, 0x89, 0x3e, 0xf1, 0x25, 0x4d, + 0xb1, 0xed, 0x72, 0x99, 0x1f, 0xfd, 0x07, 0x83, 0x03, 0x9e, 0x27, 0x12, 0x3d, 0xff, 0x53, 0xc7, + 0x47, 0x95, 0xb1, 0x20, 0x03, 0x0e, 0x69, 0xe0, 0x7d, 0x41, 0xe2, 0x6b, 0xd4, 0xf9, 0xf4, 0x56, + 0x9f, 0xae, 0x2f, 0xc8, 0x37, 0xed, 0xa5, 0xf5, 0x05, 0x05, 0x20, 0x4f, 0x7e, 0x97, 0xfe, 0xcf, + 0x1c, 0xcc, 0xf8, 0xcc, 0x99, 0xfb, 0x82, 0x12, 0x44, 0xfe, 0xbf, 0xbf, 0x3a, 0x5b, 0xaf, 0x8e, + 0xbf, 0x74, 0xff, 0xf7, 0x7a, 0x75, 0x7c, 0xdb, 0x52, 0x6e, 0x0f, 0x7f, 0x9b, 0x0b, 0x4e, 0xe0, + 0x94, 0x0d, 0x23, 0x2f, 0xa1, 0xc5, 0xf8, 0x27, 0xd7, 0x73, 0x22, 0x7f, 0x93, 0x87, 0xa9, 0xe8, + 0x69, 0x0c, 0xf5, 0x15, 0x48, 0x03, 0xfb, 0x0a, 0x1a, 0x30, 0xbb, 0xdb, 0xd3, 0xb4, 0x3e, 0x9f, + 0x43, 0xa0, 0xb9, 0xc0, 0xf9, 0x2e, 0xf8, 0x9a, 0x90, 0x9c, 0x5d, 0x49, 0xe0, 0xc1, 0x89, 0x92, + 0xf1, 0x36, 0x83, 0xc2, 0x8f, 0x6d, 0x33, 0x18, 0x3a, 0x43, 0x9b, 0x41, 0x72, 0xa7, 0x46, 0xfe, + 0x4c, 0x9d, 0x1a, 0x67, 0xe9, 0x31, 0x48, 0x08, 0x62, 0x03, 0xfb, 0x65, 0x5f, 0x83, 0x4b, 0x42, + 0x8c, 0xf2, 0xde, 0x01, 0x9d, 0x5a, 0x86, 0xa6, 0x11, 0x6b, 0xb9, 0xd7, 0xed, 0xf6, 0xe5, 0x5f, + 0xc2, 0x44, 0xb8, 0x2b, 0xc6, 0xd9, 0x69, 0xa7, 0x31, 0x47, 0x7c, 0x9d, 0x0d, 0xec, 0xb4, 0x33, + 0x8e, 0x3d, 0x0e, 0xf9, 0xf7, 0x25, 0x98, 0x4b, 0xee, 0x7e, 0x45, 0x1a, 0x4c, 0x74, 0x95, 0xc3, + 0x60, 0x47, 0xb2, 0x74, 0xc6, 0x77, 0x33, 0xde, 0x0e, 0xb1, 0x1e, 0xc2, 0xc2, 0x11, 0x6c, 0xf9, + 0x07, 0x09, 0xe6, 0x53, 0x1a, 0x11, 0xce, 0xd7, 0x12, 0xf4, 0x21, 0x94, 0xba, 0xca, 0x61, 0xb3, + 0x67, 0x75, 0xc8, 0x99, 0x5f, 0x0a, 0xf9, 0x71, 0x5f, 0x17, 0x28, 0xd8, 0xc3, 0x93, 0xff, 0x52, + 0x82, 0x9f, 0xa5, 0x5e, 0xa4, 0xd0, 0xbd, 0x50, 0xcf, 0x84, 0x1c, 0xe9, 0x99, 0x40, 0x71, 0xc1, + 0x57, 0xd4, 0x32, 0xf1, 0x85, 0x04, 0xe5, 0xb4, 0x9b, 0x25, 0xba, 0x1b, 0x32, 0xf2, 0xe7, 0x11, + 0x23, 0xa7, 0x63, 0x72, 0xaf, 0xc8, 0xc6, 0x7f, 0x95, 0xe0, 0xf2, 0x09, 0x15, 0x9a, 0x77, 0x81, + 0x21, 0xed, 0x20, 0x17, 0x7f, 0xd4, 0x16, 0x5f, 0xc4, 0xfc, 0x0b, 0x4c, 0x02, 0x0f, 0x4e, 0x95, + 0x46, 0xdb, 0x30, 0x2f, 0x6e, 0x4f, 0x51, 0x9a, 0x28, 0x3e, 0x78, 0x6b, 0xd9, 0x72, 0x32, 0x0b, + 0x4e, 0x93, 0x95, 0xff, 0x46, 0x82, 0xb9, 0xe4, 0x27, 0x03, 0xf4, 0x76, 0x68, 0xc9, 0x2b, 0x91, + 0x25, 0x9f, 0x8c, 0x48, 0x89, 0x05, 0xff, 0x18, 0x26, 0xc4, 0xc3, 0x82, 0x80, 0x11, 0xce, 0x2c, + 0x27, 0xe5, 0x17, 0x01, 0xe1, 0x96, 0xb7, 0xfc, 0x98, 0x84, 0xc7, 0x70, 0x04, 0x4d, 0xfe, 0x83, + 0x1c, 0x0c, 0x35, 0x5b, 0x8a, 0x46, 0xce, 0xa1, 0xba, 0x7d, 0x2f, 0x54, 0xdd, 0x0e, 0xfa, 0xa7, + 0x2d, 0x6e, 0x55, 0x6a, 0x61, 0x8b, 0x23, 0x85, 0xed, 0x9b, 0x99, 0xd0, 0x4e, 0xae, 0x69, 0x7f, + 0x0d, 0x46, 0x3c, 0xa5, 0xa7, 0x4b, 0xb5, 0xf2, 0x5f, 0xe4, 0x60, 0x34, 0xa0, 0xe2, 0x94, 0x89, + 0x7a, 0x37, 0x54, 0x9d, 0xe4, 0x33, 0x3c, 0xe3, 0x04, 0x74, 0x55, 0xdd, 0x7a, 0xc4, 0x69, 0x3a, + 0xf6, 0xdb, 0x4c, 0xe3, 0x65, 0xca, 0x2f, 0x61, 0x82, 0x2a, 0x56, 0x87, 0x50, 0xef, 0xb3, 0x46, + 0x9e, 0xfb, 0xa2, 0xd7, 0xfd, 0xbe, 0x15, 0xa2, 0xe2, 0x08, 0xf7, 0xa5, 0x87, 0x30, 0x1e, 0x52, + 0x76, 0xaa, 0x9e, 0xe1, 0xbf, 0x97, 0xe0, 0xe7, 0x03, 0x9f, 0x82, 0x50, 0x3d, 0x74, 0x48, 0xaa, + 0x91, 0x43, 0xb2, 0x90, 0x0e, 0xf0, 0xea, 0x7a, 0xcf, 0xea, 0x37, 0x9f, 0x7f, 0xbf, 0x70, 0xe1, + 0xdb, 0xef, 0x17, 0x2e, 0x7c, 0xf7, 0xfd, 0xc2, 0x85, 0xdf, 0x3d, 0x5e, 0x90, 0x9e, 0x1f, 0x2f, + 0x48, 0xdf, 0x1e, 0x2f, 0x48, 0xdf, 0x1d, 0x2f, 0x48, 0xff, 0x7e, 0xbc, 0x20, 0xfd, 0xc9, 0x0f, + 0x0b, 0x17, 0x3e, 0x2c, 0x0a, 0xb8, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x62, 0xda, 0xf9, + 0x07, 0x3e, 0x00, 0x00, +} + func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -417,21 +1957,27 @@ func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { } func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -439,21 +1985,27 @@ func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { } func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -461,29 +2013,35 @@ func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { } func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i += copy(dAtA[i:], m.PathPrefix) - dAtA[i] = 0x10 - i++ + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x10 + i -= len(m.PathPrefix) + copy(dAtA[i:], m.PathPrefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -491,41 +2049,52 @@ func (m *DaemonSet) Marshal() (dAtA []byte, err error) { } func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -533,41 +2102,52 @@ func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n4, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -575,37 +2155,46 @@ func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -613,54 +2202,65 @@ func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Selector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n6, err := m.Selector.MarshalTo(dAtA[i:]) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.TemplateGeneration)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n6 - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n7, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n8, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TemplateGeneration)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x12 + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -668,58 +2268,65 @@ func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + i-- + dAtA[i] = 0x40 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + i-- + dAtA[i] = 0x38 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -727,31 +2334,39 @@ func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n9, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Deployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -759,41 +2374,52 @@ func (m *Deployment) Marshal() (dAtA []byte, err error) { } func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n10, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n11, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n12, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n12 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -801,49 +2427,62 @@ func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n13, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n13 + i-- dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n14, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 - return i, nil + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -851,37 +2490,46 @@ func (m *DeploymentList) Marshal() (dAtA []byte, err error) { } func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n15, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -889,51 +2537,61 @@ func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { } func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentRollback) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + { + size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a if len(m.UpdatedAnnotations) > 0 { keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) for k := range m.UpdatedAnnotations { keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - for _, k := range keysForUpdatedAnnotations { + for iNdEx := len(keysForUpdatedAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.UpdatedAnnotations[string(keysForUpdatedAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.UpdatedAnnotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForUpdatedAnnotations[iNdEx]) + copy(dAtA[i:], keysForUpdatedAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUpdatedAnnotations[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n16, err := m.RollbackTo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -941,79 +2599,92 @@ func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.ProgressDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x48 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n17, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.RollbackTo != nil { + { + size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n18, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n19, err := m.Strategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x42 } - dAtA[i] = 0x38 - i++ + i-- if m.Paused { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.RollbackTo != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n20, err := m.RollbackTo.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x38 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x28 + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n20 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ProgressDeadlineSeconds != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x22 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1021,52 +2692,59 @@ func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x40 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x38 if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1074,31 +2752,39 @@ func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n21, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n21 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1106,33 +2792,41 @@ func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1140,29 +2834,37 @@ func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { } func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPIngressPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n22, err := m.Backend.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n22 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1170,53 +2872,65 @@ func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { } func (m *HTTPIngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Paths) > 0 { - for _, msg := range m.Paths { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Paths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *HostPortRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - return i, nil + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *IDRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1224,23 +2938,28 @@ func (m *IDRange) Marshal() (dAtA []byte, err error) { } func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - return i, nil + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *IPBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1248,36 +2967,36 @@ func (m *IPBlock) Marshal() (dAtA []byte, err error) { } func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) - i += copy(dAtA[i:], m.CIDR) if len(m.Except) > 0 { - for _, s := range m.Except { + for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Except[iNdEx]) + copy(dAtA[i:], m.Except[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.CIDR) + copy(dAtA[i:], m.CIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Ingress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1285,41 +3004,52 @@ func (m *Ingress) Marshal() (dAtA []byte, err error) { } func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n24, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n24 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n25, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n25 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressBackend) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1327,29 +3057,37 @@ func (m *IngressBackend) Marshal() (dAtA []byte, err error) { } func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ServicePort.Size())) - n26, err := m.ServicePort.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n26 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1357,37 +3095,46 @@ func (m *IngressList) Marshal() (dAtA []byte, err error) { } func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n27, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1395,29 +3142,37 @@ func (m *IngressRule) Marshal() (dAtA []byte, err error) { } func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IngressRuleValue.Size())) - n28, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.IngressRuleValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n28 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1425,27 +3180,34 @@ func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { } func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.HTTP != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HTTP.Size())) - n29, err := m.HTTP.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n29 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *IngressSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1453,51 +3215,62 @@ func (m *IngressSpec) Marshal() (dAtA []byte, err error) { } func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Backend != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n30, err := m.Backend.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i += n30 } if len(m.TLS) > 0 { - for _, msg := range m.TLS { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.TLS) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TLS[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.Backend != nil { + { + size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *IngressStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1505,25 +3278,32 @@ func (m *IngressStatus) Marshal() (dAtA []byte, err error) { } func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n31, err := m.LoadBalancer.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n31 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressTLS) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1531,36 +3311,36 @@ func (m *IngressTLS) Marshal() (dAtA []byte, err error) { } func (m *IngressTLS) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressTLS) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0x12 if len(m.Hosts) > 0 { - for _, s := range m.Hosts { + for iNdEx := len(m.Hosts) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Hosts[iNdEx]) + copy(dAtA[i:], m.Hosts[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hosts[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1568,33 +3348,42 @@ func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n32, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n32 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n33, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n33 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1602,41 +3391,50 @@ func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyEgressRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyEgressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.To) > 0 { + for iNdEx := len(m.To) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.To[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.To) > 0 { - for _, msg := range m.To { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1644,41 +3442,50 @@ func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyIngressRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyIngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.From) > 0 { + for iNdEx := len(m.From) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.From[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.From) > 0 { - for _, msg := range m.From { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1686,37 +3493,46 @@ func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n34, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n34 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1724,47 +3540,58 @@ func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPeer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.PodSelector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n35, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.IPBlock != nil { + { + size, err := m.IPBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n35 + i-- + dAtA[i] = 0x1a } if m.NamespaceSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n36, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n36 + i-- + dAtA[i] = 0x12 } - if m.IPBlock != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) - n37, err := m.IPBlock.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PodSelector != nil { + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n37 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1772,33 +3599,41 @@ func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Protocol != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) - i += copy(dAtA[i:], *m.Protocol) - } + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l if m.Port != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n38, err := m.Port.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n38 + i-- + dAtA[i] = 0x12 + } + if m.Protocol != nil { + i -= len(*m.Protocol) + copy(dAtA[i:], *m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1806,64 +3641,69 @@ func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n39, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n39 - if len(m.Ingress) > 0 { - for _, msg := range m.Ingress { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.PolicyTypes) > 0 { + for iNdEx := len(m.PolicyTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PolicyTypes[iNdEx]) + copy(dAtA[i:], m.PolicyTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyTypes[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Egress) > 0 { - for _, msg := range m.Egress { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Egress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Egress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1a } } - if len(m.PolicyTypes) > 0 { - for _, s := range m.PolicyTypes { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1871,33 +3711,42 @@ func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n40, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n40 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n41, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n41 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1905,37 +3754,46 @@ func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n42, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n42 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1943,288 +3801,283 @@ func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.RuntimeClass != nil { + { + size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.AllowedCSIDrivers) > 0 { + for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba } } - dAtA[i] = 0x30 - i++ - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.HostPorts) > 0 { - for _, msg := range m.HostPorts { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.RunAsGroup != nil { + { + size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - dAtA[i] = 0x40 - i++ - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x48 - i++ - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinux.Size())) - n43, err := m.SELinux.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AllowedProcMountTypes) > 0 { + for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedProcMountTypes[iNdEx]) + copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } } - i += n43 - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) - n44, err := m.RunAsUser.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.ForbiddenSysctls) > 0 { + for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ForbiddenSysctls[iNdEx]) + copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } } - i += n44 - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) - n45, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AllowedUnsafeSysctls) > 0 { + for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedUnsafeSysctls[iNdEx]) + copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } } - i += n45 - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) - n46, err := m.FSGroup.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AllowedFlexVolumes) > 0 { + for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } } - i += n46 - dAtA[i] = 0x70 - i++ - if m.ReadOnlyRootFilesystem { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.AllowedHostPaths) > 0 { + for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } } - i++ - if m.DefaultAllowPrivilegeEscalation != nil { - dAtA[i] = 0x78 - i++ - if *m.DefaultAllowPrivilegeEscalation { + if m.AllowPrivilegeEscalation != nil { + i-- + if *m.AllowPrivilegeEscalation { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - if m.AllowPrivilegeEscalation != nil { - dAtA[i] = 0x80 - i++ + i-- dAtA[i] = 0x1 - i++ - if *m.AllowPrivilegeEscalation { + i-- + dAtA[i] = 0x80 + } + if m.DefaultAllowPrivilegeEscalation != nil { + i-- + if *m.DefaultAllowPrivilegeEscalation { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x78 } - if len(m.AllowedHostPaths) > 0 { - for _, msg := range m.AllowedHostPaths { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i-- + if m.ReadOnlyRootFilesystem { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + { + size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.AllowedFlexVolumes) > 0 { - for _, msg := range m.AllowedFlexVolumes { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i-- + dAtA[i] = 0x6a + { + size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.AllowedUnsafeSysctls) > 0 { - for _, s := range m.AllowedUnsafeSysctls { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x62 + { + size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.ForbiddenSysctls) > 0 { - for _, s := range m.ForbiddenSysctls { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + i-- + dAtA[i] = 0x5a + { + size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + i-- + if m.HostIPC { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + i-- + if m.HostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if len(m.HostPorts) > 0 { + for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x3a } } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + if m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Volumes[iNdEx]) + copy(dAtA[i:], m.Volumes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) + i-- + dAtA[i] = 0x2a } } - if m.RunAsGroup != nil { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsGroup.Size())) - n47, err := m.RunAsGroup.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AllowedCapabilities) > 0 { + for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedCapabilities[iNdEx]) + copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x22 } - i += n47 } - if len(m.AllowedCSIDrivers) > 0 { - for _, msg := range m.AllowedCSIDrivers { - dAtA[i] = 0xba - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.RequiredDropCapabilities) > 0 { + for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequiredDropCapabilities[iNdEx]) + copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.DefaultAddCapabilities) > 0 { + for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DefaultAddCapabilities[iNdEx]) + copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - return i, nil + i-- + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2232,41 +4085,52 @@ func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n48, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n48 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n49, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n49 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n50, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n50 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2274,41 +4138,52 @@ func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n51, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n51 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2316,37 +4191,46 @@ func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n52, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n52 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2354,43 +4238,52 @@ func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n53, err := m.Selector.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n53 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n54, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n54 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - return i, nil + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2398,44 +4291,51 @@ func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ReplicationControllerDummy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2443,17 +4343,22 @@ func (m *ReplicationControllerDummy) Marshal() (dAtA []byte, err error) { } func (m *ReplicationControllerDummy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationControllerDummy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - return i, nil + return len(dAtA) - i, nil } func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2461,20 +4366,25 @@ func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { } func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2482,27 +4392,34 @@ func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n55, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n55 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2510,37 +4427,46 @@ func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n56, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n56 - } if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n57, err := m.MaxSurge.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n57 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2548,33 +4474,41 @@ func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2582,33 +4516,80 @@ func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DefaultRuntimeClassName != nil { + i -= len(*m.DefaultRuntimeClassName) + copy(dAtA[i:], *m.DefaultRuntimeClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) + i-- + dAtA[i] = 0x12 + } + if len(m.AllowedRuntimeClassNames) > 0 { + for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedRuntimeClassNames[iNdEx]) + copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2616,31 +4597,39 @@ func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if m.SELinuxOptions != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n58, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n58 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2648,41 +4637,52 @@ func (m *Scale) Marshal() (dAtA []byte, err error) { } func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n59, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n59 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n60, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n60 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n61, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n61 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2690,20 +4690,25 @@ func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { } func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2711,46 +4716,54 @@ func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { } func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i -= len(m.TargetSelector) + copy(dAtA[i:], m.TargetSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i-- + dAtA[i] = 0x1a if len(m.Selector) > 0 { keysForSelector := make([]string, 0, len(m.Selector)) for k := range m.Selector { keysForSelector = append(keysForSelector, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i += copy(dAtA[i:], m.TargetSelector) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2758,39 +4771,52 @@ func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AllowedCSIDriver) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -2799,6 +4825,9 @@ func (m *AllowedCSIDriver) Size() (n int) { } func (m *AllowedFlexVolume) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Driver) @@ -2807,6 +4836,9 @@ func (m *AllowedFlexVolume) Size() (n int) { } func (m *AllowedHostPath) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.PathPrefix) @@ -2816,6 +4848,9 @@ func (m *AllowedHostPath) Size() (n int) { } func (m *DaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -2828,6 +4863,9 @@ func (m *DaemonSet) Size() (n int) { } func (m *DaemonSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2844,6 +4882,9 @@ func (m *DaemonSetCondition) Size() (n int) { } func (m *DaemonSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -2858,6 +4899,9 @@ func (m *DaemonSetList) Size() (n int) { } func (m *DaemonSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Selector != nil { @@ -2877,6 +4921,9 @@ func (m *DaemonSetSpec) Size() (n int) { } func (m *DaemonSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) @@ -2900,6 +4947,9 @@ func (m *DaemonSetStatus) Size() (n int) { } func (m *DaemonSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2912,6 +4962,9 @@ func (m *DaemonSetUpdateStrategy) Size() (n int) { } func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -2924,6 +4977,9 @@ func (m *Deployment) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2942,6 +4998,9 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -2956,6 +5015,9 @@ func (m *DeploymentList) Size() (n int) { } func (m *DeploymentRollback) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -2974,6 +5036,9 @@ func (m *DeploymentRollback) Size() (n int) { } func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -3003,6 +5068,9 @@ func (m *DeploymentSpec) Size() (n int) { } func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -3024,6 +5092,9 @@ func (m *DeploymentStatus) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -3036,6 +5107,9 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *FSGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -3050,6 +5124,9 @@ func (m *FSGroupStrategyOptions) Size() (n int) { } func (m *HTTPIngressPath) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Path) @@ -3060,6 +5137,9 @@ func (m *HTTPIngressPath) Size() (n int) { } func (m *HTTPIngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Paths) > 0 { @@ -3072,6 +5152,9 @@ func (m *HTTPIngressRuleValue) Size() (n int) { } func (m *HostPortRange) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Min)) @@ -3080,6 +5163,9 @@ func (m *HostPortRange) Size() (n int) { } func (m *IDRange) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Min)) @@ -3088,6 +5174,9 @@ func (m *IDRange) Size() (n int) { } func (m *IPBlock) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.CIDR) @@ -3102,6 +5191,9 @@ func (m *IPBlock) Size() (n int) { } func (m *Ingress) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -3114,6 +5206,9 @@ func (m *Ingress) Size() (n int) { } func (m *IngressBackend) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ServiceName) @@ -3124,6 +5219,9 @@ func (m *IngressBackend) Size() (n int) { } func (m *IngressList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -3138,6 +5236,9 @@ func (m *IngressList) Size() (n int) { } func (m *IngressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Host) @@ -3148,6 +5249,9 @@ func (m *IngressRule) Size() (n int) { } func (m *IngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.HTTP != nil { @@ -3158,6 +5262,9 @@ func (m *IngressRuleValue) Size() (n int) { } func (m *IngressSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Backend != nil { @@ -3180,6 +5287,9 @@ func (m *IngressSpec) Size() (n int) { } func (m *IngressStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.LoadBalancer.Size() @@ -3188,6 +5298,9 @@ func (m *IngressStatus) Size() (n int) { } func (m *IngressTLS) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Hosts) > 0 { @@ -3202,6 +5315,9 @@ func (m *IngressTLS) Size() (n int) { } func (m *NetworkPolicy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -3212,6 +5328,9 @@ func (m *NetworkPolicy) Size() (n int) { } func (m *NetworkPolicyEgressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Ports) > 0 { @@ -3230,6 +5349,9 @@ func (m *NetworkPolicyEgressRule) Size() (n int) { } func (m *NetworkPolicyIngressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Ports) > 0 { @@ -3248,6 +5370,9 @@ func (m *NetworkPolicyIngressRule) Size() (n int) { } func (m *NetworkPolicyList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -3262,6 +5387,9 @@ func (m *NetworkPolicyList) Size() (n int) { } func (m *NetworkPolicyPeer) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.PodSelector != nil { @@ -3280,6 +5408,9 @@ func (m *NetworkPolicyPeer) Size() (n int) { } func (m *NetworkPolicyPort) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Protocol != nil { @@ -3294,6 +5425,9 @@ func (m *NetworkPolicyPort) Size() (n int) { } func (m *NetworkPolicySpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.PodSelector.Size() @@ -3320,6 +5454,9 @@ func (m *NetworkPolicySpec) Size() (n int) { } func (m *PodSecurityPolicy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -3330,6 +5467,9 @@ func (m *PodSecurityPolicy) Size() (n int) { } func (m *PodSecurityPolicyList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -3344,6 +5484,9 @@ func (m *PodSecurityPolicyList) Size() (n int) { } func (m *PodSecurityPolicySpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -3435,10 +5578,17 @@ func (m *PodSecurityPolicySpec) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } + if m.RuntimeClass != nil { + l = m.RuntimeClass.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } func (m *ReplicaSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -3451,6 +5601,9 @@ func (m *ReplicaSet) Size() (n int) { } func (m *ReplicaSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -3467,6 +5620,9 @@ func (m *ReplicaSetCondition) Size() (n int) { } func (m *ReplicaSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -3481,6 +5637,9 @@ func (m *ReplicaSetList) Size() (n int) { } func (m *ReplicaSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -3497,6 +5656,9 @@ func (m *ReplicaSetSpec) Size() (n int) { } func (m *ReplicaSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -3514,12 +5676,18 @@ func (m *ReplicaSetStatus) Size() (n int) { } func (m *ReplicationControllerDummy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l return n } func (m *RollbackConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Revision)) @@ -3527,6 +5695,9 @@ func (m *RollbackConfig) Size() (n int) { } func (m *RollingUpdateDaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -3537,6 +5708,9 @@ func (m *RollingUpdateDaemonSet) Size() (n int) { } func (m *RollingUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -3551,6 +5725,9 @@ func (m *RollingUpdateDeployment) Size() (n int) { } func (m *RunAsGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -3565,6 +5742,9 @@ func (m *RunAsGroupStrategyOptions) Size() (n int) { } func (m *RunAsUserStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -3578,7 +5758,29 @@ func (m *RunAsUserStrategyOptions) Size() (n int) { return n } +func (m *RuntimeClassStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AllowedRuntimeClassNames) > 0 { + for _, s := range m.AllowedRuntimeClassNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultRuntimeClassName != nil { + l = len(*m.DefaultRuntimeClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *SELinuxStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -3591,6 +5793,9 @@ func (m *SELinuxStrategyOptions) Size() (n int) { } func (m *Scale) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -3603,6 +5808,9 @@ func (m *Scale) Size() (n int) { } func (m *ScaleSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -3610,6 +5818,9 @@ func (m *ScaleSpec) Size() (n int) { } func (m *ScaleStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -3627,6 +5838,9 @@ func (m *ScaleStatus) Size() (n int) { } func (m *SupplementalGroupsStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -3641,14 +5855,7 @@ func (m *SupplementalGroupsStrategyOptions) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -3689,7 +5896,7 @@ func (this *DaemonSet) String() string { return "nil" } s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -3703,7 +5910,7 @@ func (this *DaemonSetCondition) String() string { s := strings.Join([]string{`&DaemonSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -3714,9 +5921,14 @@ func (this *DaemonSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]DaemonSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -3726,8 +5938,8 @@ func (this *DaemonSetSpec) String() string { return "nil" } s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, @@ -3740,6 +5952,11 @@ func (this *DaemonSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DaemonSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DaemonSetStatus{`, `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, @@ -3750,7 +5967,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -3761,7 +5978,7 @@ func (this *DaemonSetUpdateStrategy) String() string { } s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, `}`, }, "") return s @@ -3771,7 +5988,7 @@ func (this *Deployment) String() string { return "nil" } s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -3787,8 +6004,8 @@ func (this *DeploymentCondition) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3797,9 +6014,14 @@ func (this *DeploymentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -3832,13 +6054,13 @@ func (this *DeploymentSpec) String() string { } s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `RollbackTo:` + strings.Replace(fmt.Sprintf("%v", this.RollbackTo), "RollbackConfig", "RollbackConfig", 1) + `,`, + `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, `}`, }, "") @@ -3848,13 +6070,18 @@ func (this *DeploymentStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, @@ -3867,7 +6094,7 @@ func (this *DeploymentStrategy) String() string { } s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, `}`, }, "") return s @@ -3876,9 +6103,14 @@ func (this *FSGroupStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&FSGroupStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -3898,8 +6130,13 @@ func (this *HTTPIngressRuleValue) String() string { if this == nil { return "nil" } + repeatedStringForPaths := "[]HTTPIngressPath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," + } + repeatedStringForPaths += "}" s := strings.Join([]string{`&HTTPIngressRuleValue{`, - `Paths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Paths), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + `,`, + `Paths:` + repeatedStringForPaths + `,`, `}`, }, "") return s @@ -3942,7 +6179,7 @@ func (this *Ingress) String() string { return "nil" } s := strings.Join([]string{`&Ingress{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -3955,7 +6192,7 @@ func (this *IngressBackend) String() string { } s := strings.Join([]string{`&IngressBackend{`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `ServicePort:` + strings.Replace(strings.Replace(this.ServicePort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3964,9 +6201,14 @@ func (this *IngressList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Ingress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&IngressList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Ingress", "Ingress", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -3987,7 +6229,7 @@ func (this *IngressRuleValue) String() string { return "nil" } s := strings.Join([]string{`&IngressRuleValue{`, - `HTTP:` + strings.Replace(fmt.Sprintf("%v", this.HTTP), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, `}`, }, "") return s @@ -3996,10 +6238,20 @@ func (this *IngressSpec) String() string { if this == nil { return "nil" } + repeatedStringForTLS := "[]IngressTLS{" + for _, f := range this.TLS { + repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," + } + repeatedStringForTLS += "}" + repeatedStringForRules := "[]IngressRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&IngressSpec{`, - `Backend:` + strings.Replace(fmt.Sprintf("%v", this.Backend), "IngressBackend", "IngressBackend", 1) + `,`, - `TLS:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TLS), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "IngressRule", "IngressRule", 1), `&`, ``, 1) + `,`, + `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + repeatedStringForTLS + `,`, + `Rules:` + repeatedStringForRules + `,`, `}`, }, "") return s @@ -4009,7 +6261,7 @@ func (this *IngressStatus) String() string { return "nil" } s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "k8s_io_api_core_v1.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `LoadBalancer:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LoadBalancer), "LoadBalancerStatus", "v11.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4030,7 +6282,7 @@ func (this *NetworkPolicy) String() string { return "nil" } s := strings.Join([]string{`&NetworkPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -4040,9 +6292,19 @@ func (this *NetworkPolicyEgressRule) String() string { if this == nil { return "nil" } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForTo := "[]NetworkPolicyPeer{" + for _, f := range this.To { + repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForTo += "}" s := strings.Join([]string{`&NetworkPolicyEgressRule{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, - `To:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.To), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `To:` + repeatedStringForTo + `,`, `}`, }, "") return s @@ -4051,9 +6313,19 @@ func (this *NetworkPolicyIngressRule) String() string { if this == nil { return "nil" } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForFrom := "[]NetworkPolicyPeer{" + for _, f := range this.From { + repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForFrom += "}" s := strings.Join([]string{`&NetworkPolicyIngressRule{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, - `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `From:` + repeatedStringForFrom + `,`, `}`, }, "") return s @@ -4062,9 +6334,14 @@ func (this *NetworkPolicyList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]NetworkPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&NetworkPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -4074,9 +6351,9 @@ func (this *NetworkPolicyPeer) String() string { return "nil" } s := strings.Join([]string{`&NetworkPolicyPeer{`, - `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `IPBlock:` + strings.Replace(fmt.Sprintf("%v", this.IPBlock), "IPBlock", "IPBlock", 1) + `,`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, `}`, }, "") return s @@ -4087,7 +6364,7 @@ func (this *NetworkPolicyPort) String() string { } s := strings.Join([]string{`&NetworkPolicyPort{`, `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, - `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -4096,10 +6373,20 @@ func (this *NetworkPolicySpec) String() string { if this == nil { return "nil" } + repeatedStringForIngress := "[]NetworkPolicyIngressRule{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + repeatedStringForEgress := "[]NetworkPolicyEgressRule{" + for _, f := range this.Egress { + repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForEgress += "}" s := strings.Join([]string{`&NetworkPolicySpec{`, - `PodSelector:` + strings.Replace(strings.Replace(this.PodSelector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + `,`, - `Egress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Egress), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + `,`, + `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + repeatedStringForIngress + `,`, + `Egress:` + repeatedStringForEgress + `,`, `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, `}`, }, "") @@ -4110,7 +6397,7 @@ func (this *PodSecurityPolicy) String() string { return "nil" } s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -4120,9 +6407,14 @@ func (this *PodSecurityPolicyList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PodSecurityPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -4131,6 +6423,26 @@ func (this *PodSecurityPolicySpec) String() string { if this == nil { return "nil" } + repeatedStringForHostPorts := "[]HostPortRange{" + for _, f := range this.HostPorts { + repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," + } + repeatedStringForHostPorts += "}" + repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" + for _, f := range this.AllowedHostPaths { + repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedHostPaths += "}" + repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" + for _, f := range this.AllowedFlexVolumes { + repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedFlexVolumes += "}" + repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" + for _, f := range this.AllowedCSIDrivers { + repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedCSIDrivers += "}" s := strings.Join([]string{`&PodSecurityPolicySpec{`, `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, @@ -4138,7 +6450,7 @@ func (this *PodSecurityPolicySpec) String() string { `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostPorts), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + `,`, + `HostPorts:` + repeatedStringForHostPorts + `,`, `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, @@ -4148,13 +6460,14 @@ func (this *PodSecurityPolicySpec) String() string { `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `AllowedHostPaths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedHostPaths), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + `,`, - `AllowedFlexVolumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedFlexVolumes), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + `,`, + `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, + `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, - `RunAsGroup:` + strings.Replace(fmt.Sprintf("%v", this.RunAsGroup), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, - `AllowedCSIDrivers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedCSIDrivers), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + `,`, + `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, + `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, + `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, `}`, }, "") return s @@ -4164,7 +6477,7 @@ func (this *ReplicaSet) String() string { return "nil" } s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -4178,7 +6491,7 @@ func (this *ReplicaSetCondition) String() string { s := strings.Join([]string{`&ReplicaSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -4189,9 +6502,14 @@ func (this *ReplicaSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ReplicaSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -4202,8 +6520,8 @@ func (this *ReplicaSetSpec) String() string { } s := strings.Join([]string{`&ReplicaSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `}`, }, "") @@ -4213,13 +6531,18 @@ func (this *ReplicaSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]ReplicaSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&ReplicaSetStatus{`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -4248,7 +6571,7 @@ func (this *RollingUpdateDaemonSet) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -4258,8 +6581,8 @@ func (this *RollingUpdateDeployment) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -4268,9 +6591,14 @@ func (this *RunAsGroupStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -4279,9 +6607,25 @@ func (this *RunAsUserStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&RunAsUserStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, + `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, + `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, `}`, }, "") return s @@ -4292,7 +6636,7 @@ func (this *SELinuxStrategyOptions) String() string { } s := strings.Join([]string{`&SELinuxStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "k8s_io_api_core_v1.SELinuxOptions", 1) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, `}`, }, "") return s @@ -4302,7 +6646,7 @@ func (this *Scale) String() string { return "nil" } s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -4345,9 +6689,14 @@ func (this *SupplementalGroupsStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -4375,7 +6724,7 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4403,7 +6752,7 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4413,6 +6762,9 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4427,6 +6779,9 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4454,7 +6809,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4482,7 +6837,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4492,6 +6847,9 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4506,6 +6864,9 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4533,7 +6894,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4561,7 +6922,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4571,6 +6932,9 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4590,7 +6954,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4605,6 +6969,9 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4632,7 +6999,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4660,7 +7027,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4669,6 +7036,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4690,7 +7060,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4699,6 +7069,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4720,7 +7093,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4729,6 +7102,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4745,6 +7121,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4772,7 +7151,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4800,7 +7179,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4810,6 +7189,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4829,7 +7211,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4839,6 +7221,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4858,7 +7243,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4867,6 +7252,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4888,7 +7276,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4898,6 +7286,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4917,7 +7308,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4927,6 +7318,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4941,6 +7335,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4968,7 +7365,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4996,7 +7393,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5005,6 +7402,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5026,7 +7426,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5035,6 +7435,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5052,6 +7455,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5079,7 +7485,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5107,7 +7513,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5116,11 +7522,14 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5140,7 +7549,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5149,6 +7558,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5170,7 +7582,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5179,6 +7591,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5200,7 +7615,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5219,7 +7634,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TemplateGeneration |= (int64(b) & 0x7F) << shift + m.TemplateGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5238,7 +7653,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5253,6 +7668,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5280,7 +7698,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5308,7 +7726,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift + m.CurrentNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5327,7 +7745,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberMisscheduled |= (int32(b) & 0x7F) << shift + m.NumberMisscheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5346,7 +7764,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift + m.DesiredNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5365,7 +7783,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberReady |= (int32(b) & 0x7F) << shift + m.NumberReady |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5384,7 +7802,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5403,7 +7821,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift + m.UpdatedNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5422,7 +7840,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberAvailable |= (int32(b) & 0x7F) << shift + m.NumberAvailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5441,7 +7859,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberUnavailable |= (int32(b) & 0x7F) << shift + m.NumberUnavailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5460,7 +7878,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5480,7 +7898,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5489,6 +7907,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5506,6 +7927,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5533,7 +7957,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5561,7 +7985,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5571,6 +7995,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5590,7 +8017,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5599,6 +8026,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5618,6 +8048,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5645,7 +8078,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5673,7 +8106,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5682,6 +8115,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5703,7 +8139,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5712,6 +8148,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5733,7 +8172,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5742,6 +8181,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5758,6 +8200,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5785,7 +8230,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5813,7 +8258,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5823,6 +8268,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5842,7 +8290,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5852,6 +8300,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5871,7 +8322,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5881,6 +8332,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5900,7 +8354,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5910,6 +8364,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5929,7 +8386,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5938,6 +8395,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5959,7 +8419,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5968,6 +8428,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5984,6 +8447,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6011,7 +8477,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6039,7 +8505,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6047,7 +8513,10 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6069,7 +8538,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6078,6 +8547,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6095,6 +8567,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6122,7 +8597,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6150,7 +8625,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6160,6 +8635,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6179,7 +8657,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6188,6 +8666,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6208,7 +8689,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6225,7 +8706,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6235,6 +8716,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6251,7 +8735,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6261,6 +8745,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -6297,7 +8784,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6306,6 +8793,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6322,6 +8812,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6349,7 +8842,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6377,7 +8870,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6397,7 +8890,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6406,11 +8899,14 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6430,7 +8926,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6439,6 +8935,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6460,7 +8959,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6469,6 +8968,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6490,7 +8992,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6509,7 +9011,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6529,7 +9031,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6549,7 +9051,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6558,6 +9060,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6582,7 +9087,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6597,6 +9102,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6624,7 +9132,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6652,7 +9160,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6671,7 +9179,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6690,7 +9198,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6709,7 +9217,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6728,7 +9236,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + m.UnavailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6747,7 +9255,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6756,6 +9264,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6778,7 +9289,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6797,7 +9308,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6812,6 +9323,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6839,7 +9353,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6867,7 +9381,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6877,6 +9391,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6896,7 +9413,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6905,6 +9422,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6924,6 +9444,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6951,7 +9474,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6979,7 +9502,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6989,6 +9512,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7008,7 +9534,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7017,6 +9543,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7034,6 +9563,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7061,7 +9593,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7089,7 +9621,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7099,6 +9631,9 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7118,7 +9653,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7127,6 +9662,9 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7143,6 +9681,9 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7170,7 +9711,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7198,7 +9739,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7207,6 +9748,9 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7224,6 +9768,9 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7251,7 +9798,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7279,7 +9826,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= (int32(b) & 0x7F) << shift + m.Min |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7298,7 +9845,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= (int32(b) & 0x7F) << shift + m.Max |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7312,6 +9859,9 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7339,7 +9889,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7367,7 +9917,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= (int64(b) & 0x7F) << shift + m.Min |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -7386,7 +9936,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= (int64(b) & 0x7F) << shift + m.Max |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -7400,6 +9950,9 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7427,7 +9980,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7455,7 +10008,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7465,6 +10018,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7484,7 +10040,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7494,6 +10050,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7508,6 +10067,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7535,7 +10097,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7563,7 +10125,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7572,6 +10134,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7593,7 +10158,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7602,6 +10167,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7623,7 +10191,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7632,6 +10200,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7648,6 +10219,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7675,7 +10249,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7703,7 +10277,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7713,6 +10287,9 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7732,7 +10309,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7741,6 +10318,9 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7757,6 +10337,9 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7784,7 +10367,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7812,7 +10395,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7821,6 +10404,9 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7842,7 +10428,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7851,6 +10437,9 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7868,6 +10457,9 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7895,7 +10487,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7923,7 +10515,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7933,6 +10525,9 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7952,7 +10547,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7961,6 +10556,9 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7977,6 +10575,9 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8004,7 +10605,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8032,7 +10633,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8041,6 +10642,9 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8060,6 +10664,9 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8087,7 +10694,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8115,7 +10722,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8124,6 +10731,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8148,7 +10758,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8157,6 +10767,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8179,7 +10792,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8188,6 +10801,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8205,6 +10821,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8232,7 +10851,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8260,7 +10879,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8269,6 +10888,9 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8285,6 +10907,9 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8312,7 +10937,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8340,7 +10965,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8350,6 +10975,9 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8369,7 +10997,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8379,6 +11007,9 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8393,6 +11024,9 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8420,7 +11054,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8448,7 +11082,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8457,6 +11091,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8478,7 +11115,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8487,6 +11124,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8503,6 +11143,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8530,7 +11173,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8558,7 +11201,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8567,6 +11210,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8589,7 +11235,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8598,6 +11244,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8615,6 +11264,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8642,7 +11294,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8670,7 +11322,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8679,6 +11331,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8701,7 +11356,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8710,6 +11365,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8727,6 +11385,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8754,7 +11415,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8782,7 +11443,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8791,6 +11452,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8812,7 +11476,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8821,6 +11485,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8838,6 +11505,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8865,7 +11535,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8893,7 +11563,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8902,11 +11572,14 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.PodSelector == nil { - m.PodSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.PodSelector = &v1.LabelSelector{} } if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -8926,7 +11599,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8935,11 +11608,14 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.NamespaceSelector == nil { - m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.NamespaceSelector = &v1.LabelSelector{} } if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -8959,7 +11635,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8968,6 +11644,9 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8987,6 +11666,9 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9014,7 +11696,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9042,7 +11724,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9052,6 +11734,9 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9072,7 +11757,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9081,11 +11766,14 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Port == nil { - m.Port = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.Port = &intstr.IntOrString{} } if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -9100,6 +11788,9 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9127,7 +11818,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9155,7 +11846,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9164,6 +11855,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9185,7 +11879,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9194,6 +11888,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9216,7 +11913,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9225,6 +11922,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9247,7 +11947,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9257,6 +11957,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9271,6 +11974,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9298,7 +12004,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9326,7 +12032,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9335,6 +12041,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9356,7 +12065,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9365,6 +12074,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9381,6 +12093,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9408,7 +12123,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9436,7 +12151,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9445,6 +12160,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9466,7 +12184,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9475,6 +12193,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9492,6 +12213,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9519,7 +12243,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9547,7 +12271,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9567,7 +12291,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9577,6 +12301,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9596,7 +12323,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9606,6 +12333,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9625,7 +12355,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9635,6 +12365,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9654,7 +12387,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9664,6 +12397,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9683,7 +12419,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9703,7 +12439,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9712,6 +12448,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9734,7 +12473,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9754,7 +12493,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9774,7 +12513,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9783,6 +12522,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9804,7 +12546,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9813,6 +12555,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9834,7 +12579,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9843,6 +12588,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9864,7 +12612,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9873,6 +12621,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9894,7 +12645,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9914,7 +12665,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9935,7 +12686,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9956,7 +12707,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9965,6 +12716,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9987,7 +12741,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9996,6 +12750,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10018,7 +12775,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10028,6 +12785,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10047,7 +12807,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10057,6 +12817,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10076,24 +12839,63 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) + if m.RunAsGroup == nil { + m.RunAsGroup = &RunAsGroupStrategyOptions{} + } + if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 22: + case 23: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10105,7 +12907,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10114,19 +12916,20 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.RunAsGroup == nil { - m.RunAsGroup = &RunAsGroupStrategyOptions{} - } - if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) + if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 23: + case 24: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10138,7 +12941,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10147,11 +12950,16 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) - if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.RuntimeClass == nil { + m.RuntimeClass = &RuntimeClassStrategyOptions{} + } + if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10164,6 +12972,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10191,7 +13002,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10219,7 +13030,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10228,6 +13039,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10249,7 +13063,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10258,6 +13072,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10279,7 +13096,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10288,6 +13105,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10304,6 +13124,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10331,7 +13154,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10359,7 +13182,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10369,6 +13192,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10388,7 +13214,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10398,6 +13224,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10417,7 +13246,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10426,6 +13255,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10447,7 +13279,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10457,6 +13289,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10476,7 +13311,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10486,6 +13321,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10500,6 +13338,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10527,7 +13368,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10555,7 +13396,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10564,6 +13405,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10585,7 +13429,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10594,6 +13438,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10611,6 +13458,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10638,7 +13488,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10666,7 +13516,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -10686,7 +13536,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10695,11 +13545,14 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -10719,7 +13572,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10728,6 +13581,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10749,7 +13605,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -10763,6 +13619,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10790,7 +13649,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10818,7 +13677,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -10837,7 +13696,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + m.FullyLabeledReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -10856,7 +13715,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -10875,7 +13734,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -10894,7 +13753,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -10913,7 +13772,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10922,6 +13781,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10939,6 +13801,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10966,7 +13831,7 @@ func (m *ReplicationControllerDummy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10989,6 +13854,9 @@ func (m *ReplicationControllerDummy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11016,7 +13884,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11044,7 +13912,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -11058,6 +13926,9 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11085,7 +13956,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11113,7 +13984,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11122,11 +13993,14 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -11141,6 +14015,9 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11168,7 +14045,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11196,7 +14073,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11205,11 +14082,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -11229,7 +14109,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11238,11 +14118,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxSurge = &intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -11257,6 +14140,9 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11284,7 +14170,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11312,7 +14198,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11322,6 +14208,9 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11341,7 +14230,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11350,6 +14239,9 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11367,6 +14259,9 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11394,7 +14289,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11422,7 +14317,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11432,6 +14327,9 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11451,7 +14349,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11460,6 +14358,9 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11477,6 +14378,127 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.DefaultRuntimeClassName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11504,7 +14526,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11532,7 +14554,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11542,6 +14564,9 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11561,7 +14586,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11570,11 +14595,14 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.SELinuxOptions == nil { - m.SELinuxOptions = &k8s_io_api_core_v1.SELinuxOptions{} + m.SELinuxOptions = &v11.SELinuxOptions{} } if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -11589,6 +14617,9 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11616,7 +14647,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11644,7 +14675,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11653,6 +14684,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11674,7 +14708,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11683,6 +14717,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11704,7 +14741,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11713,6 +14750,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11729,6 +14769,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11756,7 +14799,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11784,7 +14827,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -11798,6 +14841,9 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11825,7 +14871,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11853,7 +14899,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -11872,7 +14918,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11881,6 +14927,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11901,7 +14950,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11918,7 +14967,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11928,6 +14977,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -11944,7 +14996,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11954,6 +15006,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -11990,7 +15045,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -12000,6 +15055,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12014,6 +15072,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -12041,7 +15102,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -12069,7 +15130,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -12079,6 +15140,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12098,7 +15162,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -12107,6 +15171,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12124,6 +15191,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -12190,10 +15260,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -12222,6 +15295,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -12240,238 +15316,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 3622 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x1c, 0x47, - 0x76, 0x57, 0xcf, 0x0c, 0x39, 0xc3, 0x47, 0xf1, 0xab, 0x48, 0x91, 0x63, 0xc9, 0xe2, 0xc8, 0x6d, - 0x40, 0x91, 0x1d, 0x69, 0xc6, 0x92, 0x25, 0x59, 0xb1, 0x10, 0xdb, 0x1c, 0x52, 0x94, 0xe8, 0xf0, - 0x63, 0x5c, 0x43, 0x2a, 0x86, 0x11, 0x3b, 0x6e, 0xce, 0x14, 0x87, 0x2d, 0xf6, 0x74, 0xb7, 0xbb, - 0x6b, 0x68, 0x0e, 0x90, 0x43, 0x0e, 0x41, 0x80, 0x00, 0x09, 0x92, 0x8b, 0x93, 0x1c, 0x63, 0x04, - 0xc8, 0x29, 0x41, 0x72, 0xcb, 0x1e, 0x0c, 0x03, 0x0b, 0x78, 0x01, 0x61, 0xe1, 0x05, 0x7c, 0x5b, - 0x9f, 0x88, 0x35, 0x7d, 0x5a, 0xec, 0x3f, 0xb0, 0xd0, 0x61, 0x77, 0x51, 0xd5, 0xd5, 0xdf, 0xdd, - 0x9a, 0x26, 0x2d, 0x11, 0x8b, 0xc5, 0xde, 0x38, 0xf5, 0xde, 0xfb, 0xbd, 0x57, 0x55, 0xaf, 0xde, - 0x7b, 0x5d, 0xf5, 0x08, 0xcb, 0x7b, 0x77, 0xec, 0xaa, 0x6a, 0xd4, 0xf6, 0x7a, 0xdb, 0xc4, 0xd2, - 0x09, 0x25, 0x76, 0x6d, 0x9f, 0xe8, 0x6d, 0xc3, 0xaa, 0x09, 0x82, 0x62, 0xaa, 0x35, 0x72, 0x40, - 0x89, 0x6e, 0xab, 0x86, 0x6e, 0xd7, 0xf6, 0xaf, 0x6f, 0x13, 0xaa, 0x5c, 0xaf, 0x75, 0x88, 0x4e, - 0x2c, 0x85, 0x92, 0x76, 0xd5, 0xb4, 0x0c, 0x6a, 0xa0, 0x8b, 0x0e, 0x7b, 0x55, 0x31, 0xd5, 0xaa, - 0xcf, 0x5e, 0x15, 0xec, 0xe7, 0xaf, 0x75, 0x54, 0xba, 0xdb, 0xdb, 0xae, 0xb6, 0x8c, 0x6e, 0xad, - 0x63, 0x74, 0x8c, 0x1a, 0x97, 0xda, 0xee, 0xed, 0xf0, 0x5f, 0xfc, 0x07, 0xff, 0xcb, 0x41, 0x3b, - 0x2f, 0x07, 0x94, 0xb7, 0x0c, 0x8b, 0xd4, 0xf6, 0x63, 0x1a, 0xcf, 0xdf, 0xf4, 0x79, 0xba, 0x4a, - 0x6b, 0x57, 0xd5, 0x89, 0xd5, 0xaf, 0x99, 0x7b, 0x1d, 0x36, 0x60, 0xd7, 0xba, 0x84, 0x2a, 0x49, - 0x52, 0xb5, 0x34, 0x29, 0xab, 0xa7, 0x53, 0xb5, 0x4b, 0x62, 0x02, 0xb7, 0x07, 0x09, 0xd8, 0xad, - 0x5d, 0xd2, 0x55, 0x62, 0x72, 0xaf, 0xa7, 0xc9, 0xf5, 0xa8, 0xaa, 0xd5, 0x54, 0x9d, 0xda, 0xd4, - 0x8a, 0x0a, 0xc9, 0x37, 0x61, 0x72, 0x41, 0xd3, 0x8c, 0x4f, 0x49, 0x7b, 0xb1, 0xb9, 0xb2, 0x64, - 0xa9, 0xfb, 0xc4, 0x42, 0x97, 0xa0, 0xa0, 0x2b, 0x5d, 0x52, 0x96, 0x2e, 0x49, 0x57, 0x46, 0xea, - 0x67, 0x1f, 0x1f, 0x56, 0xce, 0x1c, 0x1d, 0x56, 0x0a, 0xeb, 0x4a, 0x97, 0x60, 0x4e, 0x91, 0xef, - 0xc2, 0x94, 0x90, 0x5a, 0xd6, 0xc8, 0xc1, 0x43, 0x43, 0xeb, 0x75, 0x09, 0xba, 0x0c, 0xc3, 0x6d, - 0x0e, 0x20, 0x04, 0xc7, 0x85, 0xe0, 0xb0, 0x03, 0x8b, 0x05, 0x55, 0xb6, 0x61, 0x42, 0x08, 0x3f, - 0x30, 0x6c, 0xda, 0x50, 0xe8, 0x2e, 0xba, 0x01, 0x60, 0x2a, 0x74, 0xb7, 0x61, 0x91, 0x1d, 0xf5, - 0x40, 0x88, 0x23, 0x21, 0x0e, 0x0d, 0x8f, 0x82, 0x03, 0x5c, 0xe8, 0x2a, 0x94, 0x2c, 0xa2, 0xb4, - 0x37, 0x74, 0xad, 0x5f, 0xce, 0x5d, 0x92, 0xae, 0x94, 0xea, 0x93, 0x42, 0xa2, 0x84, 0xc5, 0x38, - 0xf6, 0x38, 0xe4, 0xcf, 0x72, 0x30, 0xb2, 0xa4, 0x90, 0xae, 0xa1, 0x37, 0x09, 0x45, 0x1f, 0x43, - 0x89, 0x6d, 0x57, 0x5b, 0xa1, 0x0a, 0xd7, 0x36, 0x7a, 0xe3, 0xb5, 0xaa, 0xef, 0x4e, 0xde, 0xea, - 0x55, 0xcd, 0xbd, 0x0e, 0x1b, 0xb0, 0xab, 0x8c, 0xbb, 0xba, 0x7f, 0xbd, 0xba, 0xb1, 0xfd, 0x88, - 0xb4, 0xe8, 0x1a, 0xa1, 0x8a, 0x6f, 0x9f, 0x3f, 0x86, 0x3d, 0x54, 0xb4, 0x0e, 0x05, 0xdb, 0x24, - 0x2d, 0x6e, 0xd9, 0xe8, 0x8d, 0xab, 0xd5, 0xa7, 0x3a, 0x6b, 0xd5, 0xb3, 0xac, 0x69, 0x92, 0x96, - 0xbf, 0xe2, 0xec, 0x17, 0xe6, 0x38, 0xe8, 0x21, 0x0c, 0xdb, 0x54, 0xa1, 0x3d, 0xbb, 0x9c, 0xe7, - 0x88, 0xd5, 0xcc, 0x88, 0x5c, 0xca, 0xdf, 0x0c, 0xe7, 0x37, 0x16, 0x68, 0xf2, 0x2f, 0x73, 0x80, - 0x3c, 0xde, 0x45, 0x43, 0x6f, 0xab, 0x54, 0x35, 0x74, 0xf4, 0x26, 0x14, 0x68, 0xdf, 0x74, 0x5d, - 0xe0, 0xb2, 0x6b, 0xd0, 0x66, 0xdf, 0x24, 0x4f, 0x0e, 0x2b, 0xb3, 0x71, 0x09, 0x46, 0xc1, 0x5c, - 0x06, 0xad, 0x7a, 0xa6, 0xe6, 0xb8, 0xf4, 0xcd, 0xb0, 0xea, 0x27, 0x87, 0x95, 0x84, 0xc3, 0x56, - 0xf5, 0x90, 0xc2, 0x06, 0xa2, 0x7d, 0x40, 0x9a, 0x62, 0xd3, 0x4d, 0x4b, 0xd1, 0x6d, 0x47, 0x93, - 0xda, 0x25, 0x62, 0x11, 0x5e, 0xcd, 0xb6, 0x69, 0x4c, 0xa2, 0x7e, 0x5e, 0x58, 0x81, 0x56, 0x63, - 0x68, 0x38, 0x41, 0x03, 0xf3, 0x66, 0x8b, 0x28, 0xb6, 0xa1, 0x97, 0x0b, 0x61, 0x6f, 0xc6, 0x7c, - 0x14, 0x0b, 0x2a, 0x7a, 0x05, 0x8a, 0x5d, 0x62, 0xdb, 0x4a, 0x87, 0x94, 0x87, 0x38, 0xe3, 0x84, - 0x60, 0x2c, 0xae, 0x39, 0xc3, 0xd8, 0xa5, 0xcb, 0x5f, 0x48, 0x30, 0xe6, 0xad, 0xdc, 0xaa, 0x6a, - 0x53, 0xf4, 0x57, 0x31, 0x3f, 0xac, 0x66, 0x9b, 0x12, 0x93, 0xe6, 0x5e, 0xe8, 0xf9, 0xbc, 0x3b, - 0x12, 0xf0, 0xc1, 0x35, 0x18, 0x52, 0x29, 0xe9, 0xb2, 0x7d, 0xc8, 0x5f, 0x19, 0xbd, 0x71, 0x25, - 0xab, 0xcb, 0xd4, 0xc7, 0x04, 0xe8, 0xd0, 0x0a, 0x13, 0xc7, 0x0e, 0x8a, 0xfc, 0xaf, 0x85, 0x80, - 0xf9, 0xcc, 0x35, 0xd1, 0x87, 0x50, 0xb2, 0x89, 0x46, 0x5a, 0xd4, 0xb0, 0x84, 0xf9, 0xaf, 0x67, - 0x34, 0x5f, 0xd9, 0x26, 0x5a, 0x53, 0x88, 0xd6, 0xcf, 0x32, 0xfb, 0xdd, 0x5f, 0xd8, 0x83, 0x44, - 0xef, 0x41, 0x89, 0x92, 0xae, 0xa9, 0x29, 0x94, 0x88, 0x73, 0xf4, 0x72, 0x70, 0x0a, 0xcc, 0x73, - 0x18, 0x58, 0xc3, 0x68, 0x6f, 0x0a, 0x36, 0x7e, 0x7c, 0xbc, 0x25, 0x71, 0x47, 0xb1, 0x07, 0x83, - 0xf6, 0x61, 0xbc, 0x67, 0xb6, 0x19, 0x27, 0x65, 0x51, 0xb0, 0xd3, 0x17, 0x9e, 0x74, 0x3b, 0xeb, - 0xda, 0x6c, 0x85, 0xa4, 0xeb, 0xb3, 0x42, 0xd7, 0x78, 0x78, 0x1c, 0x47, 0xb4, 0xa0, 0x05, 0x98, - 0xe8, 0xaa, 0x3a, 0x8b, 0x4b, 0xfd, 0x26, 0x69, 0x19, 0x7a, 0xdb, 0xe6, 0x6e, 0x35, 0x54, 0x9f, - 0x13, 0x00, 0x13, 0x6b, 0x61, 0x32, 0x8e, 0xf2, 0xa3, 0x77, 0x01, 0xb9, 0xd3, 0xb8, 0xef, 0x04, - 0x71, 0xd5, 0xd0, 0xb9, 0xcf, 0xe5, 0x7d, 0xe7, 0xde, 0x8c, 0x71, 0xe0, 0x04, 0x29, 0xb4, 0x0a, - 0x33, 0x16, 0xd9, 0x57, 0xd9, 0x1c, 0x1f, 0xa8, 0x36, 0x35, 0xac, 0xfe, 0xaa, 0xda, 0x55, 0x69, - 0x79, 0x98, 0xdb, 0x54, 0x3e, 0x3a, 0xac, 0xcc, 0xe0, 0x04, 0x3a, 0x4e, 0x94, 0x92, 0xff, 0x6d, - 0x18, 0x26, 0x22, 0xf1, 0x06, 0x3d, 0x84, 0xd9, 0x56, 0xcf, 0xb2, 0x88, 0x4e, 0xd7, 0x7b, 0xdd, - 0x6d, 0x62, 0x35, 0x5b, 0xbb, 0xa4, 0xdd, 0xd3, 0x48, 0x9b, 0x3b, 0xca, 0x50, 0x7d, 0x5e, 0x58, - 0x3c, 0xbb, 0x98, 0xc8, 0x85, 0x53, 0xa4, 0xd9, 0x2a, 0xe8, 0x7c, 0x68, 0x4d, 0xb5, 0x6d, 0x0f, - 0x33, 0xc7, 0x31, 0xbd, 0x55, 0x58, 0x8f, 0x71, 0xe0, 0x04, 0x29, 0x66, 0x63, 0x9b, 0xd8, 0xaa, - 0x45, 0xda, 0x51, 0x1b, 0xf3, 0x61, 0x1b, 0x97, 0x12, 0xb9, 0x70, 0x8a, 0x34, 0xba, 0x05, 0xa3, - 0x8e, 0x36, 0xbe, 0x7f, 0x62, 0xa3, 0xa7, 0x05, 0xd8, 0xe8, 0xba, 0x4f, 0xc2, 0x41, 0x3e, 0x36, - 0x35, 0x63, 0xdb, 0x26, 0xd6, 0x3e, 0x69, 0xa7, 0x6f, 0xf0, 0x46, 0x8c, 0x03, 0x27, 0x48, 0xb1, - 0xa9, 0x39, 0x1e, 0x18, 0x9b, 0xda, 0x70, 0x78, 0x6a, 0x5b, 0x89, 0x5c, 0x38, 0x45, 0x9a, 0xf9, - 0xb1, 0x63, 0xf2, 0xc2, 0xbe, 0xa2, 0x6a, 0xca, 0xb6, 0x46, 0xca, 0xc5, 0xb0, 0x1f, 0xaf, 0x87, - 0xc9, 0x38, 0xca, 0x8f, 0xee, 0xc3, 0x94, 0x33, 0xb4, 0xa5, 0x2b, 0x1e, 0x48, 0x89, 0x83, 0xbc, - 0x20, 0x40, 0xa6, 0xd6, 0xa3, 0x0c, 0x38, 0x2e, 0x83, 0xde, 0x84, 0xf1, 0x96, 0xa1, 0x69, 0xdc, - 0x1f, 0x17, 0x8d, 0x9e, 0x4e, 0xcb, 0x23, 0x1c, 0x05, 0xb1, 0xf3, 0xb8, 0x18, 0xa2, 0xe0, 0x08, - 0x27, 0x22, 0x00, 0x2d, 0x37, 0xe1, 0xd8, 0x65, 0xe0, 0xf1, 0xf1, 0x7a, 0xd6, 0x18, 0xe0, 0xa5, - 0x2a, 0xbf, 0x06, 0xf0, 0x86, 0x6c, 0x1c, 0x00, 0x96, 0x7f, 0x2a, 0xc1, 0x5c, 0x4a, 0xe8, 0x40, - 0x6f, 0x87, 0x52, 0xec, 0x9f, 0x46, 0x52, 0xec, 0x85, 0x14, 0xb1, 0x40, 0x9e, 0xd5, 0x61, 0xcc, - 0x62, 0xb3, 0xd2, 0x3b, 0x0e, 0x8b, 0x88, 0x91, 0xb7, 0x06, 0x4c, 0x03, 0x07, 0x65, 0xfc, 0x98, - 0x3f, 0x75, 0x74, 0x58, 0x19, 0x0b, 0xd1, 0x70, 0x18, 0x5e, 0xfe, 0xf7, 0x1c, 0xc0, 0x12, 0x31, - 0x35, 0xa3, 0xdf, 0x25, 0xfa, 0x69, 0xd4, 0x50, 0x1b, 0xa1, 0x1a, 0xea, 0xda, 0xa0, 0xed, 0xf1, - 0x4c, 0x4b, 0x2d, 0xa2, 0xfe, 0x32, 0x52, 0x44, 0xd5, 0xb2, 0x43, 0x3e, 0xbd, 0x8a, 0xfa, 0x79, - 0x1e, 0xa6, 0x7d, 0x66, 0xbf, 0x8c, 0xba, 0x1b, 0xda, 0xe3, 0x3f, 0x89, 0xec, 0xf1, 0x5c, 0x82, - 0xc8, 0x73, 0xab, 0xa3, 0x9e, 0x7d, 0x3d, 0x83, 0x1e, 0xc1, 0x38, 0x2b, 0x9c, 0x1c, 0xf7, 0xe0, - 0x65, 0xd9, 0xf0, 0xb1, 0xcb, 0x32, 0x2f, 0x81, 0xae, 0x86, 0x90, 0x70, 0x04, 0x39, 0xa5, 0x0c, - 0x2c, 0x3e, 0xef, 0x32, 0x50, 0xfe, 0x52, 0x82, 0x71, 0x7f, 0x9b, 0x4e, 0xa1, 0x68, 0x5b, 0x0f, - 0x17, 0x6d, 0xaf, 0x64, 0x76, 0xd1, 0x94, 0xaa, 0xed, 0xd7, 0xac, 0xc0, 0xf7, 0x98, 0xd8, 0x01, - 0xdf, 0x56, 0x5a, 0x7b, 0x83, 0xbf, 0xf1, 0xd0, 0x67, 0x12, 0x20, 0x91, 0x05, 0x16, 0x74, 0xdd, - 0xa0, 0x8a, 0x13, 0x2b, 0x1d, 0xb3, 0x56, 0x32, 0x9b, 0xe5, 0x6a, 0xac, 0x6e, 0xc5, 0xb0, 0xee, - 0xe9, 0xd4, 0xea, 0xfb, 0x3b, 0x12, 0x67, 0xc0, 0x09, 0x06, 0x20, 0x05, 0xc0, 0x12, 0x98, 0x9b, - 0x86, 0x38, 0xc8, 0xd7, 0x32, 0xc4, 0x3c, 0x26, 0xb0, 0x68, 0xe8, 0x3b, 0x6a, 0xc7, 0x0f, 0x3b, - 0xd8, 0x03, 0xc2, 0x01, 0xd0, 0xf3, 0xf7, 0x60, 0x2e, 0xc5, 0x5a, 0x34, 0x09, 0xf9, 0x3d, 0xd2, - 0x77, 0x96, 0x0d, 0xb3, 0x3f, 0xd1, 0x0c, 0x0c, 0xed, 0x2b, 0x5a, 0xcf, 0x09, 0xbf, 0x23, 0xd8, - 0xf9, 0xf1, 0x66, 0xee, 0x8e, 0x24, 0x7f, 0x31, 0x14, 0xf4, 0x1d, 0x5e, 0x31, 0x5f, 0x61, 0x1f, - 0xad, 0xa6, 0xa6, 0xb6, 0x14, 0x5b, 0x14, 0x42, 0x67, 0x9d, 0x0f, 0x56, 0x67, 0x0c, 0x7b, 0xd4, - 0x50, 0x6d, 0x9d, 0x7b, 0xbe, 0xb5, 0x75, 0xfe, 0xd9, 0xd4, 0xd6, 0x7f, 0x0d, 0x25, 0xdb, 0xad, - 0xaa, 0x0b, 0x1c, 0xf2, 0xfa, 0x31, 0xe2, 0xab, 0x28, 0xa8, 0x3d, 0x05, 0x5e, 0x29, 0xed, 0x81, - 0x26, 0x15, 0xd1, 0x43, 0xc7, 0x2c, 0xa2, 0x9f, 0x69, 0xe1, 0xcb, 0x62, 0xaa, 0xa9, 0xf4, 0x6c, - 0xd2, 0xe6, 0x81, 0xa8, 0xe4, 0xc7, 0xd4, 0x06, 0x1f, 0xc5, 0x82, 0x8a, 0x3e, 0x0c, 0xb9, 0x6c, - 0xe9, 0x24, 0x2e, 0x3b, 0x9e, 0xee, 0xae, 0x68, 0x0b, 0xe6, 0x4c, 0xcb, 0xe8, 0x58, 0xc4, 0xb6, - 0x97, 0x88, 0xd2, 0xd6, 0x54, 0x9d, 0xb8, 0xeb, 0xe3, 0x54, 0x44, 0x17, 0x8e, 0x0e, 0x2b, 0x73, - 0x8d, 0x64, 0x16, 0x9c, 0x26, 0x2b, 0x3f, 0x2e, 0xc0, 0x64, 0x34, 0x03, 0xa6, 0x14, 0xa9, 0xd2, - 0x89, 0x8a, 0xd4, 0xab, 0x81, 0xc3, 0xe0, 0x54, 0xf0, 0x81, 0x1b, 0x9c, 0xd8, 0x81, 0x58, 0x80, - 0x09, 0x11, 0x0d, 0x5c, 0xa2, 0x28, 0xd3, 0xbd, 0xdd, 0xdf, 0x0a, 0x93, 0x71, 0x94, 0x9f, 0x95, - 0x9e, 0x7e, 0x45, 0xe9, 0x82, 0x14, 0xc2, 0xa5, 0xe7, 0x42, 0x94, 0x01, 0xc7, 0x65, 0xd0, 0x1a, - 0x4c, 0xf7, 0xf4, 0x38, 0x94, 0xe3, 0x8d, 0x17, 0x04, 0xd4, 0xf4, 0x56, 0x9c, 0x05, 0x27, 0xc9, - 0xa1, 0x9d, 0x50, 0x35, 0x3a, 0xcc, 0x23, 0xec, 0x8d, 0xcc, 0x67, 0x27, 0x73, 0x39, 0x8a, 0xee, - 0xc2, 0x98, 0xc5, 0xbf, 0x3b, 0x5c, 0x83, 0x9d, 0xda, 0xfd, 0x9c, 0x10, 0x1b, 0xc3, 0x41, 0x22, - 0x0e, 0xf3, 0x26, 0x94, 0xdb, 0xa5, 0xac, 0xe5, 0xb6, 0xfc, 0x63, 0x29, 0x98, 0x84, 0xbc, 0x12, - 0x78, 0xd0, 0x2d, 0x53, 0x4c, 0x22, 0x50, 0x1d, 0x19, 0xc9, 0xd5, 0xef, 0xed, 0x63, 0x55, 0xbf, - 0x7e, 0xf2, 0x1c, 0x5c, 0xfe, 0x7e, 0x2e, 0xc1, 0xec, 0x72, 0xf3, 0xbe, 0x65, 0xf4, 0x4c, 0xd7, - 0x9c, 0x0d, 0xd3, 0x59, 0xd7, 0x37, 0xa0, 0x60, 0xf5, 0x34, 0x77, 0x1e, 0x2f, 0xbb, 0xf3, 0xc0, - 0x3d, 0x8d, 0xcd, 0x63, 0x3a, 0x22, 0xe5, 0x4c, 0x82, 0x09, 0xa0, 0x75, 0x18, 0xb6, 0x14, 0xbd, - 0x43, 0xdc, 0xb4, 0x7a, 0x79, 0x80, 0xf5, 0x2b, 0x4b, 0x98, 0xb1, 0x07, 0x8a, 0x37, 0x2e, 0x8d, - 0x05, 0x8a, 0xfc, 0x4f, 0x12, 0x4c, 0x3c, 0xd8, 0xdc, 0x6c, 0xac, 0xe8, 0xfc, 0x44, 0xf3, 0xbb, - 0xd5, 0x4b, 0x50, 0x30, 0x15, 0xba, 0x1b, 0xcd, 0xf4, 0x8c, 0x86, 0x39, 0x05, 0xbd, 0x0f, 0x45, - 0x16, 0x49, 0x88, 0xde, 0xce, 0x58, 0x6a, 0x0b, 0xf8, 0xba, 0x23, 0xe4, 0x57, 0x88, 0x62, 0x00, - 0xbb, 0x70, 0xf2, 0x1e, 0xcc, 0x04, 0xcc, 0x61, 0xeb, 0xf1, 0x90, 0x65, 0x47, 0xd4, 0x84, 0x21, - 0xa6, 0x99, 0xe5, 0xc0, 0x7c, 0x86, 0xcb, 0xcc, 0xc8, 0x94, 0xfc, 0x4a, 0x87, 0xfd, 0xb2, 0xb1, - 0x83, 0x25, 0xaf, 0xc1, 0x18, 0xbf, 0x50, 0x36, 0x2c, 0xca, 0x97, 0x05, 0x5d, 0x84, 0x7c, 0x57, - 0xd5, 0x45, 0x9e, 0x1d, 0x15, 0x32, 0x79, 0x96, 0x23, 0xd8, 0x38, 0x27, 0x2b, 0x07, 0x22, 0xf2, - 0xf8, 0x64, 0xe5, 0x00, 0xb3, 0x71, 0xf9, 0x3e, 0x14, 0xc5, 0x72, 0x07, 0x81, 0xf2, 0x4f, 0x07, - 0xca, 0x27, 0x00, 0x6d, 0x40, 0x71, 0xa5, 0x51, 0xd7, 0x0c, 0xa7, 0xea, 0x6a, 0xa9, 0x6d, 0x2b, - 0xba, 0x17, 0x8b, 0x2b, 0x4b, 0x18, 0x73, 0x0a, 0x92, 0x61, 0x98, 0x1c, 0xb4, 0x88, 0x49, 0xb9, - 0x47, 0x8c, 0xd4, 0x81, 0xed, 0xf2, 0x3d, 0x3e, 0x82, 0x05, 0x45, 0xfe, 0xe7, 0x1c, 0x14, 0xc5, - 0x72, 0x9c, 0xc2, 0x57, 0xd8, 0x6a, 0xe8, 0x2b, 0xec, 0xd5, 0x6c, 0xae, 0x91, 0xfa, 0x09, 0xb6, - 0x19, 0xf9, 0x04, 0xbb, 0x9a, 0x11, 0xef, 0xe9, 0xdf, 0x5f, 0xff, 0x27, 0xc1, 0x78, 0xd8, 0x29, - 0xd1, 0x2d, 0x18, 0x65, 0x09, 0x47, 0x6d, 0x91, 0x75, 0xbf, 0xce, 0xf5, 0x2e, 0x61, 0x9a, 0x3e, - 0x09, 0x07, 0xf9, 0x50, 0xc7, 0x13, 0x63, 0x7e, 0x24, 0x26, 0x9d, 0xbe, 0xa4, 0x3d, 0xaa, 0x6a, - 0x55, 0xe7, 0x69, 0xa5, 0xba, 0xa2, 0xd3, 0x0d, 0xab, 0x49, 0x2d, 0x55, 0xef, 0xc4, 0x14, 0x71, - 0xa7, 0x0c, 0x22, 0xcb, 0x3f, 0x92, 0x60, 0x54, 0x98, 0x7c, 0x0a, 0x5f, 0x15, 0x7f, 0x11, 0xfe, - 0xaa, 0xb8, 0x9c, 0xf1, 0x80, 0x27, 0x7f, 0x52, 0xfc, 0x97, 0x6f, 0x3a, 0x3b, 0xd2, 0xcc, 0xab, - 0x77, 0x0d, 0x9b, 0x46, 0xbd, 0x9a, 0x1d, 0x46, 0xcc, 0x29, 0xa8, 0x07, 0x93, 0x6a, 0x24, 0x06, - 0x88, 0xa5, 0xad, 0x65, 0xb3, 0xc4, 0x13, 0xab, 0x97, 0x05, 0xfc, 0x64, 0x94, 0x82, 0x63, 0x2a, - 0x64, 0x02, 0x31, 0x2e, 0xf4, 0x1e, 0x14, 0x76, 0x29, 0x35, 0x13, 0xee, 0xab, 0x07, 0x44, 0x1e, - 0xdf, 0x84, 0x12, 0x9f, 0xdd, 0xe6, 0x66, 0x03, 0x73, 0x28, 0xf9, 0x37, 0xfe, 0x7a, 0x34, 0x1d, - 0x1f, 0xf7, 0xe2, 0xa9, 0x74, 0x92, 0x78, 0x3a, 0x9a, 0x14, 0x4b, 0xd1, 0x03, 0xc8, 0x53, 0x2d, - 0xeb, 0x67, 0xa1, 0x40, 0xdc, 0x5c, 0x6d, 0xfa, 0x01, 0x69, 0x73, 0xb5, 0x89, 0x19, 0x04, 0xda, - 0x80, 0x21, 0x96, 0x7d, 0xd8, 0x11, 0xcc, 0x67, 0x3f, 0xd2, 0x6c, 0xfe, 0xbe, 0x43, 0xb0, 0x5f, - 0x36, 0x76, 0x70, 0xe4, 0x4f, 0x60, 0x2c, 0x74, 0x4e, 0xd1, 0xc7, 0x70, 0x56, 0x33, 0x94, 0x76, - 0x5d, 0xd1, 0x14, 0xbd, 0x45, 0xdc, 0xc7, 0x81, 0xcb, 0x49, 0x5f, 0x18, 0xab, 0x01, 0x3e, 0x71, - 0xca, 0x67, 0x84, 0x92, 0xb3, 0x41, 0x1a, 0x0e, 0x21, 0xca, 0x0a, 0x80, 0x3f, 0x47, 0x54, 0x81, - 0x21, 0xe6, 0x67, 0x4e, 0x3e, 0x19, 0xa9, 0x8f, 0x30, 0x0b, 0x99, 0xfb, 0xd9, 0xd8, 0x19, 0x47, - 0x37, 0x00, 0x6c, 0xd2, 0xb2, 0x08, 0xe5, 0xc1, 0x20, 0x17, 0x7e, 0x60, 0x6c, 0x7a, 0x14, 0x1c, - 0xe0, 0x92, 0x7f, 0x22, 0xc1, 0xd8, 0x3a, 0xa1, 0x9f, 0x1a, 0xd6, 0x5e, 0xc3, 0xd0, 0xd4, 0x56, - 0xff, 0x14, 0x82, 0x2d, 0x0e, 0x05, 0xdb, 0xd7, 0x06, 0xec, 0x4c, 0xc8, 0xba, 0xb4, 0x90, 0x2b, - 0x7f, 0x29, 0xc1, 0x5c, 0x88, 0xf3, 0x9e, 0x7f, 0x74, 0xb7, 0x60, 0xc8, 0x34, 0x2c, 0xea, 0x26, - 0xe2, 0x63, 0x29, 0x64, 0x61, 0x2c, 0x90, 0x8a, 0x19, 0x0c, 0x76, 0xd0, 0xd0, 0x2a, 0xe4, 0xa8, - 0x21, 0x5c, 0xf5, 0x78, 0x98, 0x84, 0x58, 0x75, 0x10, 0x98, 0xb9, 0x4d, 0x03, 0xe7, 0xa8, 0xc1, - 0x36, 0xa2, 0x1c, 0xe2, 0x0a, 0x06, 0x9f, 0xe7, 0x34, 0x03, 0x0c, 0x85, 0x1d, 0xcb, 0xe8, 0x9e, - 0x78, 0x0e, 0xde, 0x46, 0x2c, 0x5b, 0x46, 0x17, 0x73, 0x2c, 0xf9, 0x2b, 0x09, 0xa6, 0x42, 0x9c, - 0xa7, 0x10, 0xf8, 0xdf, 0x0b, 0x07, 0xfe, 0xab, 0xc7, 0x99, 0x48, 0x4a, 0xf8, 0xff, 0x2a, 0x17, - 0x99, 0x06, 0x9b, 0x30, 0xda, 0x81, 0x51, 0xd3, 0x68, 0x37, 0x9f, 0xc1, 0x73, 0xe0, 0x04, 0xcb, - 0x9b, 0x0d, 0x1f, 0x0b, 0x07, 0x81, 0xd1, 0x01, 0x4c, 0xe9, 0x4a, 0x97, 0xd8, 0xa6, 0xd2, 0x22, - 0xcd, 0x67, 0x70, 0x41, 0x72, 0x8e, 0xbf, 0x37, 0x44, 0x11, 0x71, 0x5c, 0x09, 0x5a, 0x83, 0xa2, - 0x6a, 0xf2, 0x3a, 0x4e, 0xd4, 0x2e, 0x03, 0xb3, 0xa8, 0x53, 0xf5, 0x39, 0xf1, 0x5c, 0xfc, 0xc0, - 0x2e, 0x86, 0xfc, 0xdf, 0x51, 0x6f, 0x60, 0xfe, 0x87, 0xee, 0x43, 0x89, 0x37, 0x66, 0xb4, 0x0c, - 0xcd, 0x7d, 0x19, 0x60, 0x3b, 0xdb, 0x10, 0x63, 0x4f, 0x0e, 0x2b, 0x17, 0x12, 0x2e, 0x7d, 0x5d, - 0x32, 0xf6, 0x84, 0xd1, 0x3a, 0x14, 0xcc, 0x1f, 0x52, 0xc1, 0xf0, 0x24, 0xc7, 0xcb, 0x16, 0x8e, - 0x23, 0xff, 0x5d, 0x3e, 0x62, 0x2e, 0x4f, 0x75, 0x8f, 0x9e, 0xd9, 0xae, 0x7b, 0x15, 0x53, 0xea, - 0xce, 0x6f, 0x43, 0x51, 0x64, 0x78, 0xe1, 0xcc, 0x6f, 0x1c, 0xc7, 0x99, 0x83, 0x59, 0xcc, 0xfb, - 0x60, 0x71, 0x07, 0x5d, 0x60, 0xf4, 0x11, 0x0c, 0x13, 0x47, 0x85, 0x93, 0x1b, 0x6f, 0x1f, 0x47, - 0x85, 0x1f, 0x57, 0xfd, 0x42, 0x55, 0x8c, 0x09, 0x54, 0xf4, 0x36, 0x5b, 0x2f, 0xc6, 0xcb, 0x3e, - 0x02, 0xed, 0x72, 0x81, 0xa7, 0xab, 0x8b, 0xce, 0xb4, 0xbd, 0xe1, 0x27, 0x87, 0x15, 0xf0, 0x7f, - 0xe2, 0xa0, 0x84, 0xfc, 0x33, 0x09, 0xa6, 0xf8, 0x0a, 0xb5, 0x7a, 0x96, 0x4a, 0xfb, 0xa7, 0x96, - 0x98, 0x1e, 0x86, 0x12, 0xd3, 0xcd, 0x01, 0xcb, 0x12, 0xb3, 0x30, 0x35, 0x39, 0x7d, 0x2d, 0xc1, - 0xb9, 0x18, 0xf7, 0x29, 0xc4, 0xc5, 0xad, 0x70, 0x5c, 0x7c, 0xed, 0xb8, 0x13, 0x4a, 0x89, 0x8d, - 0xbf, 0x9d, 0x4c, 0x98, 0x0e, 0x3f, 0x29, 0x37, 0x00, 0x4c, 0x4b, 0xdd, 0x57, 0x35, 0xd2, 0x11, - 0x8f, 0xe0, 0xa5, 0x40, 0x8b, 0x93, 0x47, 0xc1, 0x01, 0x2e, 0x64, 0xc3, 0x6c, 0x9b, 0xec, 0x28, - 0x3d, 0x8d, 0x2e, 0xb4, 0xdb, 0x8b, 0x8a, 0xa9, 0x6c, 0xab, 0x9a, 0x4a, 0x55, 0x71, 0x5d, 0x30, - 0x52, 0xbf, 0xeb, 0x3c, 0x4e, 0x27, 0x71, 0x3c, 0x39, 0xac, 0x5c, 0x4c, 0x7a, 0x1d, 0x72, 0x59, - 0xfa, 0x38, 0x05, 0x1a, 0xf5, 0xa1, 0x6c, 0x91, 0x4f, 0x7a, 0xaa, 0x45, 0xda, 0x4b, 0x96, 0x61, - 0x86, 0xd4, 0xe6, 0xb9, 0xda, 0x3f, 0x3f, 0x3a, 0xac, 0x94, 0x71, 0x0a, 0xcf, 0x60, 0xc5, 0xa9, - 0xf0, 0xe8, 0x11, 0x4c, 0x2b, 0xa2, 0x19, 0x2d, 0xa8, 0xd5, 0x39, 0x25, 0x77, 0x8e, 0x0e, 0x2b, - 0xd3, 0x0b, 0x71, 0xf2, 0x60, 0x85, 0x49, 0xa0, 0xa8, 0x06, 0xc5, 0x7d, 0xde, 0xb7, 0x66, 0x97, - 0x87, 0x38, 0x3e, 0x4b, 0x04, 0x45, 0xa7, 0x95, 0x8d, 0x61, 0x0e, 0x2f, 0x37, 0xf9, 0xe9, 0x73, - 0xb9, 0xd8, 0x07, 0x25, 0xab, 0x25, 0xc5, 0x89, 0xe7, 0x37, 0xc6, 0x25, 0x3f, 0x6a, 0x3d, 0xf0, - 0x49, 0x38, 0xc8, 0x87, 0x3e, 0x84, 0x91, 0x5d, 0x71, 0x2b, 0x61, 0x97, 0x8b, 0x99, 0x92, 0x70, - 0xe8, 0x16, 0xa3, 0x3e, 0x25, 0x54, 0x8c, 0xb8, 0xc3, 0x36, 0xf6, 0x11, 0xd1, 0x2b, 0x50, 0xe4, - 0x3f, 0x56, 0x96, 0xf8, 0x75, 0x5c, 0xc9, 0x8f, 0x6d, 0x0f, 0x9c, 0x61, 0xec, 0xd2, 0x5d, 0xd6, - 0x95, 0xc6, 0x22, 0xbf, 0x16, 0x8e, 0xb0, 0xae, 0x34, 0x16, 0xb1, 0x4b, 0x47, 0x1f, 0x43, 0xd1, - 0x26, 0xab, 0xaa, 0xde, 0x3b, 0x28, 0x43, 0xa6, 0x47, 0xe5, 0xe6, 0x3d, 0xce, 0x1d, 0xb9, 0x18, - 0xf3, 0x35, 0x08, 0x3a, 0x76, 0x61, 0xd1, 0x2e, 0x8c, 0x58, 0x3d, 0x7d, 0xc1, 0xde, 0xb2, 0x89, - 0x55, 0x1e, 0xe5, 0x3a, 0x06, 0x85, 0x73, 0xec, 0xf2, 0x47, 0xb5, 0x78, 0x2b, 0xe4, 0x71, 0x60, - 0x1f, 0x1c, 0xfd, 0xa3, 0x04, 0xc8, 0xee, 0x99, 0xa6, 0x46, 0xba, 0x44, 0xa7, 0x8a, 0xc6, 0xef, - 0xe2, 0xec, 0xf2, 0x59, 0xae, 0xf3, 0x9d, 0x41, 0xf3, 0x8a, 0x09, 0x46, 0x95, 0x7b, 0x97, 0xde, - 0x71, 0x56, 0x9c, 0xa0, 0x97, 0x2d, 0xed, 0x8e, 0xcd, 0xff, 0x2e, 0x8f, 0x65, 0x5a, 0xda, 0xe4, - 0x3b, 0x47, 0x7f, 0x69, 0x05, 0x1d, 0xbb, 0xb0, 0xe8, 0x21, 0xcc, 0xba, 0x6d, 0x8f, 0xd8, 0x30, - 0xe8, 0xb2, 0xaa, 0x11, 0xbb, 0x6f, 0x53, 0xd2, 0x2d, 0x8f, 0xf3, 0x6d, 0xf7, 0x7a, 0x3f, 0x70, - 0x22, 0x17, 0x4e, 0x91, 0x46, 0x5d, 0xa8, 0xb8, 0x21, 0x83, 0x9d, 0x27, 0x2f, 0x66, 0xdd, 0xb3, - 0x5b, 0x8a, 0xe6, 0xbc, 0x03, 0x4c, 0x70, 0x05, 0x2f, 0x1f, 0x1d, 0x56, 0x2a, 0x4b, 0x4f, 0x67, - 0xc5, 0x83, 0xb0, 0xd0, 0xfb, 0x50, 0x56, 0xd2, 0xf4, 0x4c, 0x72, 0x3d, 0x2f, 0xb2, 0x38, 0x94, - 0xaa, 0x20, 0x55, 0x1a, 0x51, 0x98, 0x54, 0xc2, 0x0d, 0xa8, 0x76, 0x79, 0x2a, 0xd3, 0x45, 0x64, - 0xa4, 0x6f, 0xd5, 0xbf, 0x8c, 0x88, 0x10, 0x6c, 0x1c, 0xd3, 0x80, 0xfe, 0x06, 0x90, 0x12, 0xed, - 0x99, 0xb5, 0xcb, 0x28, 0x53, 0xfa, 0x89, 0x35, 0xdb, 0xfa, 0x6e, 0x17, 0x23, 0xd9, 0x38, 0x41, - 0x0f, 0x5a, 0x85, 0x19, 0x31, 0xba, 0xa5, 0xdb, 0xca, 0x0e, 0x69, 0xf6, 0xed, 0x16, 0xd5, 0xec, - 0xf2, 0x34, 0x8f, 0x7d, 0xfc, 0xe1, 0x6b, 0x21, 0x81, 0x8e, 0x13, 0xa5, 0xd0, 0x3b, 0x30, 0xb9, - 0x63, 0x58, 0xdb, 0x6a, 0xbb, 0x4d, 0x74, 0x17, 0x69, 0x86, 0x23, 0xcd, 0xb0, 0xd5, 0x58, 0x8e, - 0xd0, 0x70, 0x8c, 0x1b, 0xd9, 0x70, 0x4e, 0x20, 0x37, 0x2c, 0xa3, 0xb5, 0x66, 0xf4, 0x74, 0xea, - 0x94, 0x44, 0xe7, 0xbc, 0x14, 0x73, 0x6e, 0x21, 0x89, 0xe1, 0xc9, 0x61, 0xe5, 0x52, 0x72, 0x05, - 0xec, 0x33, 0xe1, 0x64, 0x6c, 0xb4, 0x0b, 0xc0, 0xe3, 0x82, 0x73, 0xfc, 0x66, 0xf9, 0xf1, 0xbb, - 0x93, 0x25, 0xea, 0x24, 0x9e, 0x40, 0xe7, 0x49, 0xce, 0x23, 0xe3, 0x00, 0x36, 0xfb, 0x4a, 0x51, - 0x22, 0x6d, 0xd5, 0x76, 0x79, 0x8e, 0xef, 0x75, 0x2d, 0xdb, 0x5e, 0x7b, 0x72, 0x81, 0xa7, 0xa9, - 0x28, 0x22, 0x8e, 0x2b, 0xe1, 0x5d, 0x3a, 0xe2, 0xcd, 0xe6, 0x74, 0x3a, 0x9d, 0x8f, 0xd7, 0xa5, - 0xe3, 0x9b, 0xf6, 0xcc, 0xba, 0x74, 0x02, 0x90, 0x4f, 0xbf, 0x25, 0xfe, 0x55, 0x0e, 0xa6, 0x7d, - 0xe6, 0xcc, 0x5d, 0x3a, 0x09, 0x22, 0x7f, 0xec, 0x76, 0x1e, 0xdc, 0xed, 0xfc, 0xa5, 0x04, 0xe3, - 0xfe, 0xd2, 0xfd, 0xfe, 0x75, 0xce, 0xf8, 0xb6, 0xa5, 0xd4, 0xf2, 0xff, 0x9b, 0x0b, 0x4e, 0xe0, - 0x0f, 0xbe, 0x7d, 0xe3, 0x87, 0xb7, 0x28, 0xcb, 0x5f, 0xe7, 0x61, 0x32, 0x7a, 0x1a, 0x43, 0xaf, - 0xfc, 0xd2, 0xc0, 0x57, 0xfe, 0x06, 0xcc, 0xec, 0xf4, 0x34, 0xad, 0xcf, 0x97, 0x21, 0xf0, 0xd4, - 0xef, 0xbc, 0xd2, 0xbd, 0x28, 0x24, 0x67, 0x96, 0x13, 0x78, 0x70, 0xa2, 0x64, 0x4a, 0xc7, 0x42, - 0xfe, 0x44, 0x1d, 0x0b, 0xb1, 0x07, 0xf4, 0xc2, 0x31, 0x1e, 0xd0, 0x13, 0xbb, 0x0f, 0x86, 0x4e, - 0xd0, 0x7d, 0x70, 0x92, 0x76, 0x81, 0x84, 0x20, 0x36, 0xb0, 0x7b, 0xf5, 0x45, 0x38, 0x2f, 0xc4, - 0x28, 0x7f, 0xc9, 0xd7, 0xa9, 0x65, 0x68, 0x1a, 0xb1, 0x96, 0x7a, 0xdd, 0x6e, 0x5f, 0x7e, 0x0b, - 0xc6, 0xc3, 0x3d, 0x2a, 0xce, 0x4e, 0x3b, 0x6d, 0x32, 0xe2, 0xad, 0x34, 0xb0, 0xd3, 0xce, 0x38, - 0xf6, 0x38, 0xe4, 0xbf, 0x97, 0x60, 0x36, 0xb9, 0x17, 0x15, 0x69, 0x30, 0xde, 0x55, 0x0e, 0x82, - 0xfd, 0xc1, 0xd2, 0x09, 0x6f, 0xb1, 0x78, 0x73, 0xc2, 0x5a, 0x08, 0x0b, 0x47, 0xb0, 0xe5, 0xef, - 0x25, 0x98, 0x4b, 0x69, 0x0b, 0x38, 0x5d, 0x4b, 0xd0, 0x07, 0x50, 0xea, 0x2a, 0x07, 0xcd, 0x9e, - 0xd5, 0x21, 0x27, 0xbe, 0xb7, 0xe3, 0x11, 0x63, 0x4d, 0xa0, 0x60, 0x0f, 0x4f, 0xfe, 0x4f, 0x09, - 0x5e, 0x48, 0xad, 0x65, 0xd0, 0xed, 0x50, 0x07, 0x83, 0x1c, 0xe9, 0x60, 0x40, 0x71, 0xc1, 0xe7, - 0xd4, 0xc0, 0xf0, 0xb9, 0x04, 0xe5, 0xb4, 0xef, 0x3c, 0x74, 0x2b, 0x64, 0xe4, 0x4b, 0x11, 0x23, - 0xa7, 0x62, 0x72, 0xcf, 0xc9, 0xc6, 0xff, 0x91, 0x60, 0x36, 0xf9, 0x7b, 0x17, 0xbd, 0x1e, 0xb2, - 0xb0, 0x12, 0xb1, 0x70, 0x22, 0x22, 0x25, 0xec, 0xfb, 0x08, 0xc6, 0xc5, 0x57, 0xb1, 0x80, 0x11, - 0x7b, 0x2f, 0x27, 0x45, 0x74, 0x01, 0xe1, 0xd6, 0xa0, 0xdc, 0xab, 0xc2, 0x63, 0x38, 0x82, 0x26, - 0xff, 0x43, 0x0e, 0x86, 0x9a, 0x2d, 0x45, 0x23, 0xa7, 0x50, 0x0c, 0xbe, 0x1b, 0x2a, 0x06, 0x07, - 0xfd, 0xc7, 0x11, 0xb7, 0x2a, 0xb5, 0x0e, 0xc4, 0x91, 0x3a, 0xf0, 0xd5, 0x4c, 0x68, 0x4f, 0x2f, - 0x01, 0xff, 0x0c, 0x46, 0x3c, 0xa5, 0xc7, 0xcb, 0x4c, 0xf2, 0x7f, 0xe4, 0x60, 0x34, 0xa0, 0xe2, - 0x98, 0x79, 0x6d, 0x27, 0x54, 0x0f, 0xe4, 0x33, 0x7c, 0x78, 0x04, 0x74, 0x55, 0xdd, 0x0a, 0xc0, - 0xe9, 0x98, 0xf5, 0x7b, 0x24, 0xe3, 0x85, 0xc1, 0x5b, 0x30, 0x4e, 0x15, 0xab, 0x43, 0xa8, 0x77, - 0x27, 0x9f, 0xe7, 0xbe, 0xe8, 0xf5, 0x59, 0x6f, 0x86, 0xa8, 0x38, 0xc2, 0x7d, 0xfe, 0x2e, 0x8c, - 0x85, 0x94, 0x1d, 0xab, 0xe1, 0xf5, 0xff, 0x25, 0x78, 0x69, 0xe0, 0x8d, 0x09, 0xaa, 0x87, 0x0e, - 0x49, 0x35, 0x72, 0x48, 0xe6, 0xd3, 0x01, 0x9e, 0x5f, 0xe3, 0x54, 0xfd, 0xda, 0xe3, 0xef, 0xe6, - 0xcf, 0x7c, 0xf3, 0xdd, 0xfc, 0x99, 0x6f, 0xbf, 0x9b, 0x3f, 0xf3, 0xb7, 0x47, 0xf3, 0xd2, 0xe3, - 0xa3, 0x79, 0xe9, 0x9b, 0xa3, 0x79, 0xe9, 0xdb, 0xa3, 0x79, 0xe9, 0x17, 0x47, 0xf3, 0xd2, 0xbf, - 0x7c, 0x3f, 0x7f, 0xe6, 0x83, 0xa2, 0x80, 0xfb, 0x5d, 0x00, 0x00, 0x00, 0xff, 0xff, 0x66, 0xf0, - 0x40, 0xcd, 0xc4, 0x3c, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index 55239b8df..6c90cb3c4 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -649,7 +649,7 @@ message NetworkPolicyIngressRule { // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. // If this field is empty or missing, this rule matches all sources (traffic not restricted by source). - // If this field is present and contains at least on item, this rule allows traffic only if the + // If this field is present and contains at least one item, this rule allows traffic only if the // traffic matches at least one item in the from list. // +optional repeated NetworkPolicyPeer from = 2; @@ -872,7 +872,7 @@ message PodSecurityPolicySpec { repeated AllowedFlexVolume allowedFlexVolumes = 18; // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value means no CSI drivers can run inline within a pod spec. + // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. // +optional repeated AllowedCSIDriver allowedCSIDrivers = 23; @@ -902,6 +902,12 @@ message PodSecurityPolicySpec { // This requires the ProcMountType feature flag to be enabled. // +optional repeated string allowedProcMountTypes = 21; + + // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. + // If this field is omitted, the pod's runtimeClassName field is unrestricted. + // Enforcement of this field depends on the RuntimeClass feature gate being enabled. + // +optional + optional RuntimeClassStrategyOptions runtimeClass = 24; } // DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for @@ -1104,6 +1110,21 @@ message RunAsUserStrategyOptions { repeated IDRange ranges = 2; } +// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses +// for a pod. +message RuntimeClassStrategyOptions { + // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. + // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the + // list. An empty list requires the RuntimeClassName field to be unset. + repeated string allowedRuntimeClassNames = 1; + + // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. + // The default MUST be allowed by the allowedRuntimeClassNames list. + // A value of nil does not mutate the Pod. + // +optional + optional string defaultRuntimeClassName = 2; +} + // SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. // Deprecated: use SELinuxStrategyOptions from policy API Group instead. message SELinuxStrategyOptions { diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index c34ea599d..8f581ef1d 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -929,7 +929,7 @@ type PodSecurityPolicySpec struct { // +optional AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value means no CSI drivers can run inline within a pod spec. + // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. // +optional AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. @@ -956,6 +956,11 @@ type PodSecurityPolicySpec struct { // This requires the ProcMountType feature flag to be enabled. // +optional AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` + // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. + // If this field is omitted, the pod's runtimeClassName field is unrestricted. + // Enforcement of this field depends on the RuntimeClass feature gate being enabled. + // +optional + RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` } // AllowedHostPath defines the host volume conditions that will be enabled by a policy @@ -1171,6 +1176,25 @@ const ( SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" ) +// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses +// for a pod. +type RuntimeClassStrategyOptions struct { + // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. + // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the + // list. An empty list requires the RuntimeClassName field to be unset. + AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` + // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. + // The default MUST be allowed by the allowedRuntimeClassNames list. + // A value of nil does not mutate the Pod. + // +optional + DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` +} + +// AllowAllRuntimeClassNames can be used as a value for the +// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is +// allowed. +const AllowAllRuntimeClassNames = "*" + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodSecurityPolicyList is a list of PodSecurityPolicy objects. @@ -1186,6 +1210,7 @@ type PodSecurityPolicyList struct { Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } +// +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. @@ -1271,7 +1296,7 @@ type NetworkPolicyIngressRule struct { // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. // If this field is empty or missing, this rule matches all sources (traffic not restricted by source). - // If this field is present and contains at least on item, this rule allows traffic only if the + // If this field is present and contains at least one item, this rule allows traffic only if the // traffic matches at least one item in the from list. // +optional From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index 654f34ae3..a7eb2ec90 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -379,7 +379,7 @@ func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyIngressRule = map[string]string{ "": "DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.", "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", + "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", } func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { @@ -470,10 +470,11 @@ var map_PodSecurityPolicySpec = map[string]string{ "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", "allowedHostPaths": "allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.", "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", - "allowedCSIDrivers": "AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value means no CSI drivers can run inline within a pod spec.", + "allowedCSIDrivers": "AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes.", "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", "allowedProcMountTypes": "AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", + "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", } func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { @@ -596,6 +597,16 @@ func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { return map_RunAsUserStrategyOptions } +var map_RuntimeClassStrategyOptions = map[string]string{ + "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", + "allowedRuntimeClassNames": "allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", + "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", +} + +func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { + return map_RuntimeClassStrategyOptions +} + var map_SELinuxStrategyOptions = map[string]string{ "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use SELinuxStrategyOptions from policy API Group instead.", "rule": "rule is the strategy that will dictate the allowable labels that may be set.", diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index a67968af4..cb6101796 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -124,7 +124,7 @@ func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DaemonSet, len(*in)) @@ -280,7 +280,7 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -595,7 +595,7 @@ func (in *IngressBackend) DeepCopy() *IngressBackend { func (in *IngressList) DeepCopyInto(out *IngressList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Ingress, len(*in)) @@ -826,7 +826,7 @@ func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule { func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]NetworkPolicy, len(*in)) @@ -979,7 +979,7 @@ func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodSecurityPolicy, len(*in)) @@ -1085,6 +1085,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]corev1.ProcMountType, len(*in)) copy(*out, *in) } + if in.RuntimeClass != nil { + in, out := &in.RuntimeClass, &out.RuntimeClass + *out = new(RuntimeClassStrategyOptions) + (*in).DeepCopyInto(*out) + } return } @@ -1147,7 +1152,7 @@ func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicaSet, len(*in)) @@ -1356,6 +1361,32 @@ func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { + *out = *in + if in.AllowedRuntimeClassNames != nil { + in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DefaultRuntimeClassName != nil { + in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. +func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { + if in == nil { + return nil + } + out := new(RuntimeClassStrategyOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { *out = *in diff --git a/vendor/k8s.io/api/networking/v1/generated.pb.go b/vendor/k8s.io/api/networking/v1/generated.pb.go index 86bd80c85..c9b22fc79 100644 --- a/vendor/k8s.io/api/networking/v1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1/generated.pb.go @@ -17,38 +17,25 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto - - It has these top-level messages: - IPBlock - NetworkPolicy - NetworkPolicyEgressRule - NetworkPolicyIngressRule - NetworkPolicyList - NetworkPolicyPeer - NetworkPolicyPort - NetworkPolicySpec -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + proto "github.com/gogo/protobuf/proto" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import strings "strings" -import reflect "reflect" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -61,39 +48,229 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *IPBlock) Reset() { *m = IPBlock{} } -func (*IPBlock) ProtoMessage() {} -func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *IPBlock) Reset() { *m = IPBlock{} } +func (*IPBlock) ProtoMessage() {} +func (*IPBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{0} +} +func (m *IPBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPBlock.Merge(m, src) +} +func (m *IPBlock) XXX_Size() int { + return m.Size() +} +func (m *IPBlock) XXX_DiscardUnknown() { + xxx_messageInfo_IPBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_IPBlock proto.InternalMessageInfo + +func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } +func (*NetworkPolicy) ProtoMessage() {} +func (*NetworkPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{1} +} +func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicy.Merge(m, src) +} +func (m *NetworkPolicy) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicy.DiscardUnknown(m) +} -func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } -func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo + +func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } +func (*NetworkPolicyEgressRule) ProtoMessage() {} +func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{2} +} +func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyEgressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyEgressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyEgressRule.Merge(m, src) +} +func (m *NetworkPolicyEgressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyEgressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyEgressRule.DiscardUnknown(m) +} -func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } -func (*NetworkPolicyEgressRule) ProtoMessage() {} -func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} + return fileDescriptor_1c72867a70a7cc90, []int{3} +} +func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyIngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyIngressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyIngressRule.Merge(m, src) +} +func (m *NetworkPolicyIngressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyIngressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyIngressRule.DiscardUnknown(m) } -func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } -func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo -func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } -func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } +func (*NetworkPolicyList) ProtoMessage() {} +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{4} +} +func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyList.Merge(m, src) +} +func (m *NetworkPolicyList) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyList.DiscardUnknown(m) +} -func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } -func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo -func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } -func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } +func (*NetworkPolicyPeer) ProtoMessage() {} +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{5} +} +func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyPeer) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPeer.Merge(m, src) +} +func (m *NetworkPolicyPeer) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPeer) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPeer.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo + +func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } +func (*NetworkPolicyPort) ProtoMessage() {} +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{6} +} +func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPort.Merge(m, src) +} +func (m *NetworkPolicyPort) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPort) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPort.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo + +func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } +func (*NetworkPolicySpec) ProtoMessage() {} +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{7} +} +func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicySpec.Merge(m, src) +} +func (m *NetworkPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo func init() { proto.RegisterType((*IPBlock)(nil), "k8s.io.api.networking.v1.IPBlock") @@ -105,10 +282,70 @@ func init() { proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.networking.v1.NetworkPolicyPort") proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.networking.v1.NetworkPolicySpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto", fileDescriptor_1c72867a70a7cc90) +} + +var fileDescriptor_1c72867a70a7cc90 = []byte{ + // 804 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcf, 0x8f, 0xdb, 0x44, + 0x14, 0x8e, 0x9d, 0x6c, 0x92, 0x4e, 0x28, 0x65, 0x07, 0x21, 0xac, 0x45, 0xd8, 0xc1, 0x17, 0x56, + 0xaa, 0x18, 0x93, 0x16, 0x21, 0x6e, 0x08, 0x43, 0x29, 0x91, 0xba, 0xbb, 0xd1, 0x6c, 0x2f, 0x20, + 0x90, 0x70, 0x9c, 0x59, 0xef, 0x34, 0xb1, 0xc7, 0x1a, 0x4f, 0x42, 0xf7, 0xc6, 0x9f, 0xc0, 0x1f, + 0xc2, 0x91, 0x1b, 0x87, 0x72, 0xdc, 0x63, 0x8f, 0x3d, 0x59, 0xac, 0xf9, 0x2f, 0xf6, 0x84, 0x66, + 0x3c, 0x89, 0xf3, 0xa3, 0x11, 0xd9, 0x15, 0xbd, 0x65, 0xde, 0xbc, 0xef, 0x7b, 0xf3, 0xde, 0xfb, + 0xf2, 0x19, 0x7c, 0x35, 0xfe, 0x22, 0x43, 0x94, 0x79, 0xe3, 0xe9, 0x90, 0xf0, 0x84, 0x08, 0x92, + 0x79, 0x33, 0x92, 0x8c, 0x18, 0xf7, 0xf4, 0x45, 0x90, 0x52, 0x2f, 0x21, 0xe2, 0x17, 0xc6, 0xc7, + 0x34, 0x89, 0xbc, 0x59, 0xcf, 0x8b, 0x48, 0x42, 0x78, 0x20, 0xc8, 0x08, 0xa5, 0x9c, 0x09, 0x06, + 0xad, 0x32, 0x13, 0x05, 0x29, 0x45, 0x55, 0x26, 0x9a, 0xf5, 0x0e, 0x3e, 0x89, 0xa8, 0x38, 0x9f, + 0x0e, 0x51, 0xc8, 0x62, 0x2f, 0x62, 0x11, 0xf3, 0x14, 0x60, 0x38, 0x3d, 0x53, 0x27, 0x75, 0x50, + 0xbf, 0x4a, 0xa2, 0x03, 0x77, 0xa9, 0x64, 0xc8, 0x38, 0x79, 0x4d, 0xb1, 0x83, 0xcf, 0xaa, 0x9c, + 0x38, 0x08, 0xcf, 0x69, 0x42, 0xf8, 0x85, 0x97, 0x8e, 0x23, 0x19, 0xc8, 0xbc, 0x98, 0x88, 0xe0, + 0x75, 0x28, 0x6f, 0x1b, 0x8a, 0x4f, 0x13, 0x41, 0x63, 0xb2, 0x01, 0xf8, 0xfc, 0xbf, 0x00, 0x59, + 0x78, 0x4e, 0xe2, 0x60, 0x03, 0xf7, 0x70, 0x1b, 0x6e, 0x2a, 0xe8, 0xc4, 0xa3, 0x89, 0xc8, 0x04, + 0x5f, 0x07, 0xb9, 0x27, 0xa0, 0xd5, 0x1f, 0xf8, 0x13, 0x16, 0x8e, 0x61, 0x17, 0x34, 0x42, 0x3a, + 0xe2, 0x96, 0xd1, 0x35, 0x0e, 0xef, 0xf8, 0x6f, 0x5d, 0xe6, 0x4e, 0xad, 0xc8, 0x9d, 0xc6, 0xd7, + 0xfd, 0x6f, 0x30, 0x56, 0x37, 0xd0, 0x05, 0x4d, 0xf2, 0x3c, 0x24, 0xa9, 0xb0, 0xcc, 0x6e, 0xfd, + 0xf0, 0x8e, 0x0f, 0x8a, 0xdc, 0x69, 0x3e, 0x52, 0x11, 0xac, 0x6f, 0xdc, 0xbf, 0x0c, 0x70, 0xf7, + 0xb8, 0xdc, 0xc4, 0x80, 0x4d, 0x68, 0x78, 0x01, 0x7f, 0x06, 0x6d, 0x39, 0x9b, 0x51, 0x20, 0x02, + 0xc5, 0xdd, 0x79, 0xf0, 0x29, 0xaa, 0xd6, 0xb6, 0x78, 0x2a, 0x4a, 0xc7, 0x91, 0x0c, 0x64, 0x48, + 0x66, 0xa3, 0x59, 0x0f, 0x9d, 0x0c, 0x9f, 0x91, 0x50, 0x1c, 0x11, 0x11, 0xf8, 0x50, 0xbf, 0x06, + 0x54, 0x31, 0xbc, 0x60, 0x85, 0x47, 0xa0, 0x91, 0xa5, 0x24, 0xb4, 0x4c, 0xc5, 0x7e, 0x1f, 0x6d, + 0x13, 0x05, 0x5a, 0x79, 0xd8, 0x69, 0x4a, 0xc2, 0xaa, 0x4d, 0x79, 0xc2, 0x8a, 0xc6, 0xfd, 0xc3, + 0x00, 0xef, 0xaf, 0x64, 0x3e, 0x8a, 0x38, 0xc9, 0x32, 0x3c, 0x9d, 0x10, 0x38, 0x00, 0x7b, 0x29, + 0xe3, 0x22, 0xb3, 0x8c, 0x6e, 0xfd, 0x06, 0xb5, 0x06, 0x8c, 0x0b, 0xff, 0xae, 0xae, 0xb5, 0x27, + 0x4f, 0x19, 0x2e, 0x89, 0xe0, 0x63, 0x60, 0x0a, 0xa6, 0x06, 0x7a, 0x03, 0x3a, 0x42, 0xb8, 0x0f, + 0x34, 0x9d, 0xf9, 0x94, 0x61, 0x53, 0x30, 0xf7, 0x4f, 0x03, 0x58, 0x2b, 0x59, 0xfd, 0xe4, 0x4d, + 0xbe, 0xfb, 0x08, 0x34, 0xce, 0x38, 0x8b, 0x6f, 0xf3, 0xf2, 0xc5, 0xd0, 0xbf, 0xe5, 0x2c, 0xc6, + 0x8a, 0xc6, 0x7d, 0x61, 0x80, 0xfd, 0x95, 0xcc, 0x27, 0x34, 0x13, 0xf0, 0xc7, 0x0d, 0xed, 0xa0, + 0xdd, 0xb4, 0x23, 0xd1, 0x4a, 0x39, 0xef, 0xe8, 0x5a, 0xed, 0x79, 0x64, 0x49, 0x37, 0x4f, 0xc0, + 0x1e, 0x15, 0x24, 0xce, 0x74, 0x0f, 0x1f, 0xef, 0xd8, 0x43, 0x35, 0x90, 0xbe, 0x44, 0xe3, 0x92, + 0xc4, 0x7d, 0x61, 0xae, 0x75, 0x20, 0x7b, 0x85, 0x67, 0xa0, 0x93, 0xb2, 0xd1, 0x29, 0x99, 0x90, + 0x50, 0x30, 0xae, 0x9b, 0x78, 0xb8, 0x63, 0x13, 0xc1, 0x90, 0x4c, 0xe6, 0x50, 0xff, 0x5e, 0x91, + 0x3b, 0x9d, 0x41, 0xc5, 0x85, 0x97, 0x89, 0xe1, 0x73, 0xb0, 0x9f, 0x04, 0x31, 0xc9, 0xd2, 0x20, + 0x24, 0x8b, 0x6a, 0xe6, 0xed, 0xab, 0xbd, 0x57, 0xe4, 0xce, 0xfe, 0xf1, 0x3a, 0x23, 0xde, 0x2c, + 0x02, 0xbf, 0x03, 0x2d, 0x9a, 0x2a, 0x0b, 0xb1, 0xea, 0xaa, 0xde, 0x47, 0xdb, 0xe7, 0xa8, 0xbd, + 0xc6, 0xef, 0x14, 0xb9, 0x33, 0x37, 0x1e, 0x3c, 0x87, 0xbb, 0xbf, 0xaf, 0x6b, 0x40, 0x0a, 0x0e, + 0x3e, 0x06, 0x6d, 0xe5, 0x55, 0x21, 0x9b, 0x68, 0x6f, 0xba, 0x2f, 0xf7, 0x39, 0xd0, 0xb1, 0xeb, + 0xdc, 0xf9, 0x60, 0xd3, 0xbc, 0xd1, 0xfc, 0x1a, 0x2f, 0xc0, 0xf0, 0x18, 0x34, 0xa4, 0x74, 0xf5, + 0x54, 0xb6, 0x9b, 0x90, 0xf4, 0x4b, 0x54, 0xfa, 0x25, 0xea, 0x27, 0xe2, 0x84, 0x9f, 0x0a, 0x4e, + 0x93, 0xc8, 0x6f, 0x4b, 0xc9, 0xca, 0x27, 0x61, 0xc5, 0xe3, 0x5e, 0xaf, 0x2f, 0x5c, 0x7a, 0x08, + 0x7c, 0xf6, 0xbf, 0x2d, 0xfc, 0x5d, 0x2d, 0xb3, 0xed, 0x4b, 0xff, 0x09, 0xb4, 0x68, 0xf9, 0x27, + 0xd7, 0x12, 0x7e, 0xb0, 0xa3, 0x84, 0x97, 0xac, 0xc1, 0xbf, 0xa7, 0xcb, 0xb4, 0xe6, 0xc1, 0x39, + 0x27, 0xfc, 0x1e, 0x34, 0x49, 0xc9, 0x5e, 0x57, 0xec, 0xbd, 0x1d, 0xd9, 0x2b, 0xbf, 0xf4, 0xdf, + 0xd6, 0xe4, 0x4d, 0x1d, 0xd3, 0x84, 0xf0, 0x4b, 0x39, 0x25, 0x99, 0xfb, 0xf4, 0x22, 0x25, 0x99, + 0xd5, 0x50, 0xdf, 0x93, 0x0f, 0xcb, 0x66, 0x17, 0xe1, 0xeb, 0xdc, 0x01, 0xd5, 0x11, 0x2f, 0x23, + 0xfc, 0xc3, 0xcb, 0x2b, 0xbb, 0xf6, 0xf2, 0xca, 0xae, 0xbd, 0xba, 0xb2, 0x6b, 0xbf, 0x16, 0xb6, + 0x71, 0x59, 0xd8, 0xc6, 0xcb, 0xc2, 0x36, 0x5e, 0x15, 0xb6, 0xf1, 0x77, 0x61, 0x1b, 0xbf, 0xfd, + 0x63, 0xd7, 0x7e, 0x30, 0x67, 0xbd, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x7b, 0xc9, 0x59, + 0x67, 0x08, 0x00, 0x00, +} + func (m *IPBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -116,36 +353,36 @@ func (m *IPBlock) Marshal() (dAtA []byte, err error) { } func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) - i += copy(dAtA[i:], m.CIDR) if len(m.Except) > 0 { - for _, s := range m.Except { + for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Except[iNdEx]) + copy(dAtA[i:], m.Except[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.CIDR) + copy(dAtA[i:], m.CIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -153,33 +390,42 @@ func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -187,41 +433,50 @@ func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyEgressRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyEgressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.To) > 0 { + for iNdEx := len(m.To) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.To[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.To) > 0 { - for _, msg := range m.To { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -229,41 +484,50 @@ func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyIngressRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyIngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.From) > 0 { + for iNdEx := len(m.From) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.From[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.From) > 0 { - for _, msg := range m.From { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -271,37 +535,46 @@ func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -309,47 +582,58 @@ func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPeer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.PodSelector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n4, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.IPBlock != nil { + { + size, err := m.IPBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x1a } if m.NamespaceSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n5, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- + dAtA[i] = 0x12 } - if m.IPBlock != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) - n6, err := m.IPBlock.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PodSelector != nil { + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -357,33 +641,41 @@ func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Protocol != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) - i += copy(dAtA[i:], *m.Protocol) - } if m.Port != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n7, err := m.Port.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- + dAtA[i] = 0x12 } - return i, nil + if m.Protocol != nil { + i -= len(*m.Protocol) + copy(dAtA[i:], *m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -391,70 +683,80 @@ func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n8, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - if len(m.Ingress) > 0 { - for _, msg := range m.Ingress { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.PolicyTypes) > 0 { + for iNdEx := len(m.PolicyTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PolicyTypes[iNdEx]) + copy(dAtA[i:], m.PolicyTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyTypes[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Egress) > 0 { - for _, msg := range m.Egress { + for iNdEx := len(m.Egress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Egress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + } + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.PolicyTypes) > 0 { - for _, s := range m.PolicyTypes { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *IPBlock) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.CIDR) @@ -469,6 +771,9 @@ func (m *IPBlock) Size() (n int) { } func (m *NetworkPolicy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -479,6 +784,9 @@ func (m *NetworkPolicy) Size() (n int) { } func (m *NetworkPolicyEgressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Ports) > 0 { @@ -497,6 +805,9 @@ func (m *NetworkPolicyEgressRule) Size() (n int) { } func (m *NetworkPolicyIngressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Ports) > 0 { @@ -515,6 +826,9 @@ func (m *NetworkPolicyIngressRule) Size() (n int) { } func (m *NetworkPolicyList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -529,6 +843,9 @@ func (m *NetworkPolicyList) Size() (n int) { } func (m *NetworkPolicyPeer) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.PodSelector != nil { @@ -547,6 +864,9 @@ func (m *NetworkPolicyPeer) Size() (n int) { } func (m *NetworkPolicyPort) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Protocol != nil { @@ -561,6 +881,9 @@ func (m *NetworkPolicyPort) Size() (n int) { } func (m *NetworkPolicySpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.PodSelector.Size() @@ -587,14 +910,7 @@ func (m *NetworkPolicySpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -615,7 +931,7 @@ func (this *NetworkPolicy) String() string { return "nil" } s := strings.Join([]string{`&NetworkPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -625,9 +941,19 @@ func (this *NetworkPolicyEgressRule) String() string { if this == nil { return "nil" } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForTo := "[]NetworkPolicyPeer{" + for _, f := range this.To { + repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForTo += "}" s := strings.Join([]string{`&NetworkPolicyEgressRule{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, - `To:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.To), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `To:` + repeatedStringForTo + `,`, `}`, }, "") return s @@ -636,9 +962,19 @@ func (this *NetworkPolicyIngressRule) String() string { if this == nil { return "nil" } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForFrom := "[]NetworkPolicyPeer{" + for _, f := range this.From { + repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForFrom += "}" s := strings.Join([]string{`&NetworkPolicyIngressRule{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, - `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `From:` + repeatedStringForFrom + `,`, `}`, }, "") return s @@ -647,9 +983,14 @@ func (this *NetworkPolicyList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]NetworkPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&NetworkPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -659,9 +1000,9 @@ func (this *NetworkPolicyPeer) String() string { return "nil" } s := strings.Join([]string{`&NetworkPolicyPeer{`, - `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `IPBlock:` + strings.Replace(fmt.Sprintf("%v", this.IPBlock), "IPBlock", "IPBlock", 1) + `,`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, `}`, }, "") return s @@ -672,7 +1013,7 @@ func (this *NetworkPolicyPort) String() string { } s := strings.Join([]string{`&NetworkPolicyPort{`, `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, - `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -681,10 +1022,20 @@ func (this *NetworkPolicySpec) String() string { if this == nil { return "nil" } + repeatedStringForIngress := "[]NetworkPolicyIngressRule{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + repeatedStringForEgress := "[]NetworkPolicyEgressRule{" + for _, f := range this.Egress { + repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForEgress += "}" s := strings.Join([]string{`&NetworkPolicySpec{`, - `PodSelector:` + strings.Replace(strings.Replace(this.PodSelector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + `,`, - `Egress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Egress), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + `,`, + `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + repeatedStringForIngress + `,`, + `Egress:` + repeatedStringForEgress + `,`, `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, `}`, }, "") @@ -713,7 +1064,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -741,7 +1092,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -751,6 +1102,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -770,7 +1124,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -780,6 +1134,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -794,6 +1151,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -821,7 +1181,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -849,7 +1209,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -858,6 +1218,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -879,7 +1242,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -888,6 +1251,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -904,6 +1270,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -931,7 +1300,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -959,7 +1328,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -968,6 +1337,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -990,7 +1362,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -999,6 +1371,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1016,6 +1391,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1043,7 +1421,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1071,7 +1449,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1080,6 +1458,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1102,7 +1483,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1111,6 +1492,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1128,6 +1512,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1155,7 +1542,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1183,7 +1570,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1192,6 +1579,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1213,7 +1603,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1222,6 +1612,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1239,6 +1632,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1266,7 +1662,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1294,7 +1690,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1303,11 +1699,14 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.PodSelector == nil { - m.PodSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.PodSelector = &v1.LabelSelector{} } if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1327,7 +1726,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1336,11 +1735,14 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.NamespaceSelector == nil { - m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.NamespaceSelector = &v1.LabelSelector{} } if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1360,7 +1762,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1369,6 +1771,9 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1388,6 +1793,9 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1415,7 +1823,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1443,7 +1851,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1453,6 +1861,9 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1473,7 +1884,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1482,11 +1893,14 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Port == nil { - m.Port = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.Port = &intstr.IntOrString{} } if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1501,6 +1915,9 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1528,7 +1945,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1556,7 +1973,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1565,6 +1982,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1586,7 +2006,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1595,6 +2015,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1617,7 +2040,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1626,6 +2049,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1648,7 +2074,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1658,6 +2084,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1672,6 +2101,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1738,10 +2170,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1770,6 +2205,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1788,62 +2226,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 804 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcf, 0x8f, 0xdb, 0x44, - 0x14, 0x8e, 0x9d, 0x6c, 0x92, 0x4e, 0x28, 0x65, 0x07, 0x21, 0xac, 0x45, 0xd8, 0xc1, 0x17, 0x56, - 0xaa, 0x18, 0x93, 0x16, 0x21, 0x6e, 0x08, 0x43, 0x29, 0x91, 0xba, 0xbb, 0xd1, 0x6c, 0x2f, 0x20, - 0x90, 0x70, 0x9c, 0x59, 0xef, 0x34, 0xb1, 0xc7, 0x1a, 0x4f, 0x42, 0xf7, 0xc6, 0x9f, 0xc0, 0x1f, - 0xc2, 0x91, 0x1b, 0x87, 0x72, 0xdc, 0x63, 0x8f, 0x3d, 0x59, 0xac, 0xf9, 0x2f, 0xf6, 0x84, 0x66, - 0x3c, 0x89, 0xf3, 0xa3, 0x11, 0xd9, 0x15, 0xbd, 0x65, 0xde, 0xbc, 0xef, 0x7b, 0xf3, 0xde, 0xfb, - 0xf2, 0x19, 0x7c, 0x35, 0xfe, 0x22, 0x43, 0x94, 0x79, 0xe3, 0xe9, 0x90, 0xf0, 0x84, 0x08, 0x92, - 0x79, 0x33, 0x92, 0x8c, 0x18, 0xf7, 0xf4, 0x45, 0x90, 0x52, 0x2f, 0x21, 0xe2, 0x17, 0xc6, 0xc7, - 0x34, 0x89, 0xbc, 0x59, 0xcf, 0x8b, 0x48, 0x42, 0x78, 0x20, 0xc8, 0x08, 0xa5, 0x9c, 0x09, 0x06, - 0xad, 0x32, 0x13, 0x05, 0x29, 0x45, 0x55, 0x26, 0x9a, 0xf5, 0x0e, 0x3e, 0x89, 0xa8, 0x38, 0x9f, - 0x0e, 0x51, 0xc8, 0x62, 0x2f, 0x62, 0x11, 0xf3, 0x14, 0x60, 0x38, 0x3d, 0x53, 0x27, 0x75, 0x50, - 0xbf, 0x4a, 0xa2, 0x03, 0x77, 0xa9, 0x64, 0xc8, 0x38, 0x79, 0x4d, 0xb1, 0x83, 0xcf, 0xaa, 0x9c, - 0x38, 0x08, 0xcf, 0x69, 0x42, 0xf8, 0x85, 0x97, 0x8e, 0x23, 0x19, 0xc8, 0xbc, 0x98, 0x88, 0xe0, - 0x75, 0x28, 0x6f, 0x1b, 0x8a, 0x4f, 0x13, 0x41, 0x63, 0xb2, 0x01, 0xf8, 0xfc, 0xbf, 0x00, 0x59, - 0x78, 0x4e, 0xe2, 0x60, 0x03, 0xf7, 0x70, 0x1b, 0x6e, 0x2a, 0xe8, 0xc4, 0xa3, 0x89, 0xc8, 0x04, - 0x5f, 0x07, 0xb9, 0x27, 0xa0, 0xd5, 0x1f, 0xf8, 0x13, 0x16, 0x8e, 0x61, 0x17, 0x34, 0x42, 0x3a, - 0xe2, 0x96, 0xd1, 0x35, 0x0e, 0xef, 0xf8, 0x6f, 0x5d, 0xe6, 0x4e, 0xad, 0xc8, 0x9d, 0xc6, 0xd7, - 0xfd, 0x6f, 0x30, 0x56, 0x37, 0xd0, 0x05, 0x4d, 0xf2, 0x3c, 0x24, 0xa9, 0xb0, 0xcc, 0x6e, 0xfd, - 0xf0, 0x8e, 0x0f, 0x8a, 0xdc, 0x69, 0x3e, 0x52, 0x11, 0xac, 0x6f, 0xdc, 0xbf, 0x0c, 0x70, 0xf7, - 0xb8, 0xdc, 0xc4, 0x80, 0x4d, 0x68, 0x78, 0x01, 0x7f, 0x06, 0x6d, 0x39, 0x9b, 0x51, 0x20, 0x02, - 0xc5, 0xdd, 0x79, 0xf0, 0x29, 0xaa, 0xd6, 0xb6, 0x78, 0x2a, 0x4a, 0xc7, 0x91, 0x0c, 0x64, 0x48, - 0x66, 0xa3, 0x59, 0x0f, 0x9d, 0x0c, 0x9f, 0x91, 0x50, 0x1c, 0x11, 0x11, 0xf8, 0x50, 0xbf, 0x06, - 0x54, 0x31, 0xbc, 0x60, 0x85, 0x47, 0xa0, 0x91, 0xa5, 0x24, 0xb4, 0x4c, 0xc5, 0x7e, 0x1f, 0x6d, - 0x13, 0x05, 0x5a, 0x79, 0xd8, 0x69, 0x4a, 0xc2, 0xaa, 0x4d, 0x79, 0xc2, 0x8a, 0xc6, 0xfd, 0xc3, - 0x00, 0xef, 0xaf, 0x64, 0x3e, 0x8a, 0x38, 0xc9, 0x32, 0x3c, 0x9d, 0x10, 0x38, 0x00, 0x7b, 0x29, - 0xe3, 0x22, 0xb3, 0x8c, 0x6e, 0xfd, 0x06, 0xb5, 0x06, 0x8c, 0x0b, 0xff, 0xae, 0xae, 0xb5, 0x27, - 0x4f, 0x19, 0x2e, 0x89, 0xe0, 0x63, 0x60, 0x0a, 0xa6, 0x06, 0x7a, 0x03, 0x3a, 0x42, 0xb8, 0x0f, - 0x34, 0x9d, 0xf9, 0x94, 0x61, 0x53, 0x30, 0xf7, 0x4f, 0x03, 0x58, 0x2b, 0x59, 0xfd, 0xe4, 0x4d, - 0xbe, 0xfb, 0x08, 0x34, 0xce, 0x38, 0x8b, 0x6f, 0xf3, 0xf2, 0xc5, 0xd0, 0xbf, 0xe5, 0x2c, 0xc6, - 0x8a, 0xc6, 0x7d, 0x61, 0x80, 0xfd, 0x95, 0xcc, 0x27, 0x34, 0x13, 0xf0, 0xc7, 0x0d, 0xed, 0xa0, - 0xdd, 0xb4, 0x23, 0xd1, 0x4a, 0x39, 0xef, 0xe8, 0x5a, 0xed, 0x79, 0x64, 0x49, 0x37, 0x4f, 0xc0, - 0x1e, 0x15, 0x24, 0xce, 0x74, 0x0f, 0x1f, 0xef, 0xd8, 0x43, 0x35, 0x90, 0xbe, 0x44, 0xe3, 0x92, - 0xc4, 0x7d, 0x61, 0xae, 0x75, 0x20, 0x7b, 0x85, 0x67, 0xa0, 0x93, 0xb2, 0xd1, 0x29, 0x99, 0x90, - 0x50, 0x30, 0xae, 0x9b, 0x78, 0xb8, 0x63, 0x13, 0xc1, 0x90, 0x4c, 0xe6, 0x50, 0xff, 0x5e, 0x91, - 0x3b, 0x9d, 0x41, 0xc5, 0x85, 0x97, 0x89, 0xe1, 0x73, 0xb0, 0x9f, 0x04, 0x31, 0xc9, 0xd2, 0x20, - 0x24, 0x8b, 0x6a, 0xe6, 0xed, 0xab, 0xbd, 0x57, 0xe4, 0xce, 0xfe, 0xf1, 0x3a, 0x23, 0xde, 0x2c, - 0x02, 0xbf, 0x03, 0x2d, 0x9a, 0x2a, 0x0b, 0xb1, 0xea, 0xaa, 0xde, 0x47, 0xdb, 0xe7, 0xa8, 0xbd, - 0xc6, 0xef, 0x14, 0xb9, 0x33, 0x37, 0x1e, 0x3c, 0x87, 0xbb, 0xbf, 0xaf, 0x6b, 0x40, 0x0a, 0x0e, - 0x3e, 0x06, 0x6d, 0xe5, 0x55, 0x21, 0x9b, 0x68, 0x6f, 0xba, 0x2f, 0xf7, 0x39, 0xd0, 0xb1, 0xeb, - 0xdc, 0xf9, 0x60, 0xd3, 0xbc, 0xd1, 0xfc, 0x1a, 0x2f, 0xc0, 0xf0, 0x18, 0x34, 0xa4, 0x74, 0xf5, - 0x54, 0xb6, 0x9b, 0x90, 0xf4, 0x4b, 0x54, 0xfa, 0x25, 0xea, 0x27, 0xe2, 0x84, 0x9f, 0x0a, 0x4e, - 0x93, 0xc8, 0x6f, 0x4b, 0xc9, 0xca, 0x27, 0x61, 0xc5, 0xe3, 0x5e, 0xaf, 0x2f, 0x5c, 0x7a, 0x08, - 0x7c, 0xf6, 0xbf, 0x2d, 0xfc, 0x5d, 0x2d, 0xb3, 0xed, 0x4b, 0xff, 0x09, 0xb4, 0x68, 0xf9, 0x27, - 0xd7, 0x12, 0x7e, 0xb0, 0xa3, 0x84, 0x97, 0xac, 0xc1, 0xbf, 0xa7, 0xcb, 0xb4, 0xe6, 0xc1, 0x39, - 0x27, 0xfc, 0x1e, 0x34, 0x49, 0xc9, 0x5e, 0x57, 0xec, 0xbd, 0x1d, 0xd9, 0x2b, 0xbf, 0xf4, 0xdf, - 0xd6, 0xe4, 0x4d, 0x1d, 0xd3, 0x84, 0xf0, 0x4b, 0x39, 0x25, 0x99, 0xfb, 0xf4, 0x22, 0x25, 0x99, - 0xd5, 0x50, 0xdf, 0x93, 0x0f, 0xcb, 0x66, 0x17, 0xe1, 0xeb, 0xdc, 0x01, 0xd5, 0x11, 0x2f, 0x23, - 0xfc, 0xc3, 0xcb, 0x2b, 0xbb, 0xf6, 0xf2, 0xca, 0xae, 0xbd, 0xba, 0xb2, 0x6b, 0xbf, 0x16, 0xb6, - 0x71, 0x59, 0xd8, 0xc6, 0xcb, 0xc2, 0x36, 0x5e, 0x15, 0xb6, 0xf1, 0x77, 0x61, 0x1b, 0xbf, 0xfd, - 0x63, 0xd7, 0x7e, 0x30, 0x67, 0xbd, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x7b, 0xc9, 0x59, - 0x67, 0x08, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index cbbb26528..3cb738045 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -92,7 +92,7 @@ message NetworkPolicyIngressRule { // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by - // source). If this field is present and contains at least on item, this rule + // source). If this field is present and contains at least one item, this rule // allows traffic only if the traffic matches at least one item in the from list. // +optional repeated NetworkPolicyPeer from = 2; diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go index 59331111f..38a640f0b 100644 --- a/vendor/k8s.io/api/networking/v1/types.go +++ b/vendor/k8s.io/api/networking/v1/types.go @@ -107,7 +107,7 @@ type NetworkPolicyIngressRule struct { // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by - // source). If this field is present and contains at least on item, this rule + // source). If this field is present and contains at least one item, this rule // allows traffic only if the traffic matches at least one item in the from list. // +optional From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go index cfcd0c54c..188b72a1c 100644 --- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -60,7 +60,7 @@ func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyIngressRule = map[string]string{ "": "NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.", "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", + "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", } func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go index d1e4e8845..1833e9782 100644 --- a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go @@ -139,7 +139,7 @@ func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule { func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]NetworkPolicy, len(*in)) diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go index 14430cbac..8ed560090 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go @@ -17,34 +17,20 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto - - It has these top-level messages: - HTTPIngressPath - HTTPIngressRuleValue - Ingress - IngressBackend - IngressList - IngressRule - IngressRuleValue - IngressSpec - IngressStatus - IngressTLS -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import strings "strings" -import reflect "reflect" + io "io" -import io "io" + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -57,45 +43,285 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } -func (*HTTPIngressPath) ProtoMessage() {} -func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } +func (*HTTPIngressPath) ProtoMessage() {} +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{0} +} +func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPIngressPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPIngressPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPIngressPath.Merge(m, src) +} +func (m *HTTPIngressPath) XXX_Size() int { + return m.Size() +} +func (m *HTTPIngressPath) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPIngressPath.DiscardUnknown(m) +} -func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } -func (*HTTPIngressRuleValue) ProtoMessage() {} -func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo -func (m *Ingress) Reset() { *m = Ingress{} } -func (*Ingress) ProtoMessage() {} -func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } +func (*HTTPIngressRuleValue) ProtoMessage() {} +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{1} +} +func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPIngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPIngressRuleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPIngressRuleValue.Merge(m, src) +} +func (m *HTTPIngressRuleValue) XXX_Size() int { + return m.Size() +} +func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPIngressRuleValue.DiscardUnknown(m) +} -func (m *IngressBackend) Reset() { *m = IngressBackend{} } -func (*IngressBackend) ProtoMessage() {} -func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo -func (m *IngressList) Reset() { *m = IngressList{} } -func (*IngressList) ProtoMessage() {} -func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *Ingress) Reset() { *m = Ingress{} } +func (*Ingress) ProtoMessage() {} +func (*Ingress) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{2} +} +func (m *Ingress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Ingress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Ingress) XXX_Merge(src proto.Message) { + xxx_messageInfo_Ingress.Merge(m, src) +} +func (m *Ingress) XXX_Size() int { + return m.Size() +} +func (m *Ingress) XXX_DiscardUnknown() { + xxx_messageInfo_Ingress.DiscardUnknown(m) +} -func (m *IngressRule) Reset() { *m = IngressRule{} } -func (*IngressRule) ProtoMessage() {} -func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_Ingress proto.InternalMessageInfo -func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } -func (*IngressRuleValue) ProtoMessage() {} -func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *IngressBackend) Reset() { *m = IngressBackend{} } +func (*IngressBackend) ProtoMessage() {} +func (*IngressBackend) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{3} +} +func (m *IngressBackend) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressBackend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressBackend) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressBackend.Merge(m, src) +} +func (m *IngressBackend) XXX_Size() int { + return m.Size() +} +func (m *IngressBackend) XXX_DiscardUnknown() { + xxx_messageInfo_IngressBackend.DiscardUnknown(m) +} -func (m *IngressSpec) Reset() { *m = IngressSpec{} } -func (*IngressSpec) ProtoMessage() {} -func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_IngressBackend proto.InternalMessageInfo -func (m *IngressStatus) Reset() { *m = IngressStatus{} } -func (*IngressStatus) ProtoMessage() {} -func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *IngressList) Reset() { *m = IngressList{} } +func (*IngressList) ProtoMessage() {} +func (*IngressList) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{4} +} +func (m *IngressList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressList.Merge(m, src) +} +func (m *IngressList) XXX_Size() int { + return m.Size() +} +func (m *IngressList) XXX_DiscardUnknown() { + xxx_messageInfo_IngressList.DiscardUnknown(m) +} -func (m *IngressTLS) Reset() { *m = IngressTLS{} } -func (*IngressTLS) ProtoMessage() {} -func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_IngressList proto.InternalMessageInfo + +func (m *IngressRule) Reset() { *m = IngressRule{} } +func (*IngressRule) ProtoMessage() {} +func (*IngressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{5} +} +func (m *IngressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressRule.Merge(m, src) +} +func (m *IngressRule) XXX_Size() int { + return m.Size() +} +func (m *IngressRule) XXX_DiscardUnknown() { + xxx_messageInfo_IngressRule.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressRule proto.InternalMessageInfo + +func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } +func (*IngressRuleValue) ProtoMessage() {} +func (*IngressRuleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{6} +} +func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressRuleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressRuleValue.Merge(m, src) +} +func (m *IngressRuleValue) XXX_Size() int { + return m.Size() +} +func (m *IngressRuleValue) XXX_DiscardUnknown() { + xxx_messageInfo_IngressRuleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo + +func (m *IngressSpec) Reset() { *m = IngressSpec{} } +func (*IngressSpec) ProtoMessage() {} +func (*IngressSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{7} +} +func (m *IngressSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressSpec.Merge(m, src) +} +func (m *IngressSpec) XXX_Size() int { + return m.Size() +} +func (m *IngressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IngressSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressSpec proto.InternalMessageInfo + +func (m *IngressStatus) Reset() { *m = IngressStatus{} } +func (*IngressStatus) ProtoMessage() {} +func (*IngressStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{8} +} +func (m *IngressStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressStatus.Merge(m, src) +} +func (m *IngressStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressStatus proto.InternalMessageInfo + +func (m *IngressTLS) Reset() { *m = IngressTLS{} } +func (*IngressTLS) ProtoMessage() {} +func (*IngressTLS) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{9} +} +func (m *IngressTLS) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressTLS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressTLS) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressTLS.Merge(m, src) +} +func (m *IngressTLS) XXX_Size() int { + return m.Size() +} +func (m *IngressTLS) XXX_DiscardUnknown() { + xxx_messageInfo_IngressTLS.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressTLS proto.InternalMessageInfo func init() { proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressPath") @@ -109,10 +335,70 @@ func init() { proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.networking.v1beta1.IngressStatus") proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.networking.v1beta1.IngressTLS") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto", fileDescriptor_5bea11de0ceb8f53) +} + +var fileDescriptor_5bea11de0ceb8f53 = []byte{ + // 812 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcf, 0x6e, 0xfb, 0x44, + 0x10, 0x8e, 0xf3, 0xa7, 0x69, 0xd7, 0xfd, 0xa7, 0xa5, 0x87, 0xa8, 0x12, 0x6e, 0xe4, 0x03, 0x2a, + 0x88, 0xae, 0x69, 0x0a, 0x88, 0xb3, 0x0f, 0xa8, 0x15, 0x81, 0x86, 0x75, 0x84, 0x10, 0xe2, 0xd0, + 0x8d, 0xb3, 0x38, 0x26, 0x89, 0x6d, 0x76, 0xd7, 0x41, 0xdc, 0x78, 0x01, 0x04, 0x4f, 0xc1, 0x99, + 0x23, 0x8f, 0xd0, 0x63, 0x8f, 0x3d, 0x55, 0x34, 0xbc, 0x07, 0x42, 0xbb, 0xde, 0xda, 0x4e, 0xd2, + 0xfe, 0x6a, 0xfd, 0x6e, 0xde, 0x9d, 0xf9, 0xbe, 0xd9, 0x99, 0xf9, 0x66, 0x0c, 0x3e, 0x9f, 0x7e, + 0xc6, 0x51, 0x18, 0x3b, 0xd3, 0x74, 0x44, 0x59, 0x44, 0x05, 0xe5, 0xce, 0x82, 0x46, 0xe3, 0x98, + 0x39, 0xda, 0x40, 0x92, 0xd0, 0x89, 0xa8, 0xf8, 0x39, 0x66, 0xd3, 0x30, 0x0a, 0x9c, 0xc5, 0xf9, + 0x88, 0x0a, 0x72, 0xee, 0x04, 0x34, 0xa2, 0x8c, 0x08, 0x3a, 0x46, 0x09, 0x8b, 0x45, 0x0c, 0xdf, + 0xcd, 0xdc, 0x11, 0x49, 0x42, 0x54, 0xb8, 0x23, 0xed, 0x7e, 0x7c, 0x16, 0x84, 0x62, 0x92, 0x8e, + 0x90, 0x1f, 0xcf, 0x9d, 0x20, 0x0e, 0x62, 0x47, 0xa1, 0x46, 0xe9, 0x0f, 0xea, 0xa4, 0x0e, 0xea, + 0x2b, 0x63, 0x3b, 0xb6, 0x4b, 0xc1, 0xfd, 0x98, 0x51, 0x67, 0xb1, 0x11, 0xf1, 0xf8, 0xe3, 0xc2, + 0x67, 0x4e, 0xfc, 0x49, 0x18, 0x51, 0xf6, 0x8b, 0x93, 0x4c, 0x03, 0x79, 0xc1, 0x9d, 0x39, 0x15, + 0xe4, 0x39, 0x94, 0xf3, 0x12, 0x8a, 0xa5, 0x91, 0x08, 0xe7, 0x74, 0x03, 0xf0, 0xe9, 0x6b, 0x00, + 0xee, 0x4f, 0xe8, 0x9c, 0x6c, 0xe0, 0x2e, 0x5e, 0xc2, 0xa5, 0x22, 0x9c, 0x39, 0x61, 0x24, 0xb8, + 0x60, 0xeb, 0x20, 0xfb, 0x37, 0x03, 0x1c, 0x5c, 0x0e, 0x87, 0x83, 0xab, 0x28, 0x60, 0x94, 0xf3, + 0x01, 0x11, 0x13, 0xd8, 0x05, 0xcd, 0x84, 0x88, 0x49, 0xc7, 0xe8, 0x1a, 0xa7, 0x3b, 0xee, 0xee, + 0xed, 0xc3, 0x49, 0x6d, 0xf9, 0x70, 0xd2, 0x94, 0x36, 0xac, 0x2c, 0xf0, 0x5b, 0xd0, 0x1e, 0x11, + 0x7f, 0x4a, 0xa3, 0x71, 0xa7, 0xde, 0x35, 0x4e, 0xcd, 0xde, 0x19, 0x7a, 0x63, 0x37, 0x90, 0xa6, + 0x77, 0x33, 0x90, 0x7b, 0xa0, 0x39, 0xdb, 0xfa, 0x02, 0x3f, 0xd1, 0xd9, 0x53, 0x70, 0x54, 0x7a, + 0x0e, 0x4e, 0x67, 0xf4, 0x1b, 0x32, 0x4b, 0x29, 0xf4, 0x40, 0x4b, 0x46, 0xe6, 0x1d, 0xa3, 0xdb, + 0x38, 0x35, 0x7b, 0xe8, 0x95, 0x78, 0x6b, 0x29, 0xb9, 0x7b, 0x3a, 0x60, 0x4b, 0x9e, 0x38, 0xce, + 0xb8, 0xec, 0xdf, 0xeb, 0xa0, 0xad, 0xbd, 0xe0, 0x0d, 0xd8, 0x96, 0x1d, 0x1c, 0x13, 0x41, 0x54, + 0xe2, 0x66, 0xef, 0xa3, 0x52, 0x8c, 0xbc, 0xa0, 0x28, 0x99, 0x06, 0xf2, 0x82, 0x23, 0xe9, 0x8d, + 0x16, 0xe7, 0xe8, 0x7a, 0xf4, 0x23, 0xf5, 0xc5, 0x97, 0x54, 0x10, 0x17, 0xea, 0x28, 0xa0, 0xb8, + 0xc3, 0x39, 0x2b, 0xec, 0x83, 0x26, 0x4f, 0xa8, 0xaf, 0x2b, 0xf6, 0x41, 0xb5, 0x8a, 0x79, 0x09, + 0xf5, 0x8b, 0x16, 0xc8, 0x13, 0x56, 0x2c, 0x70, 0x08, 0xb6, 0xb8, 0x20, 0x22, 0xe5, 0x9d, 0x86, + 0xe2, 0xfb, 0xb0, 0x22, 0x9f, 0xc2, 0xb8, 0xfb, 0x9a, 0x71, 0x2b, 0x3b, 0x63, 0xcd, 0x65, 0xff, + 0x65, 0x80, 0xfd, 0xd5, 0x5e, 0xc1, 0x4f, 0x80, 0xc9, 0x29, 0x5b, 0x84, 0x3e, 0xfd, 0x8a, 0xcc, + 0xa9, 0x16, 0xc5, 0x3b, 0x1a, 0x6f, 0x7a, 0x85, 0x09, 0x97, 0xfd, 0x60, 0x90, 0xc3, 0x06, 0x31, + 0x13, 0x3a, 0xe9, 0x97, 0x4b, 0x2a, 0x35, 0x8a, 0x32, 0x8d, 0xa2, 0xab, 0x48, 0x5c, 0x33, 0x4f, + 0xb0, 0x30, 0x0a, 0x36, 0x02, 0x49, 0x32, 0x5c, 0x66, 0xb6, 0xff, 0x36, 0x80, 0xa9, 0x9f, 0xdc, + 0x0f, 0xb9, 0x80, 0xdf, 0x6f, 0x34, 0x12, 0x55, 0x6b, 0xa4, 0x44, 0xab, 0x36, 0x1e, 0xea, 0x98, + 0xdb, 0x4f, 0x37, 0xa5, 0x26, 0x7e, 0x01, 0x5a, 0xa1, 0xa0, 0x73, 0xde, 0xa9, 0x2b, 0x1d, 0xbe, + 0x57, 0x51, 0xf7, 0xb9, 0xfe, 0xae, 0x24, 0x18, 0x67, 0x1c, 0xf6, 0x9f, 0xc5, 0xd3, 0xa5, 0xd2, + 0xe5, 0xe0, 0x4d, 0x62, 0x2e, 0xd6, 0x07, 0xef, 0x32, 0xe6, 0x02, 0x2b, 0x0b, 0x4c, 0xc1, 0x61, + 0xb8, 0x36, 0x1a, 0xba, 0xb4, 0x4e, 0xb5, 0x97, 0xe4, 0x30, 0xb7, 0xa3, 0xe9, 0x0f, 0xd7, 0x2d, + 0x78, 0x23, 0x84, 0x4d, 0xc1, 0x86, 0x17, 0xfc, 0x1a, 0x34, 0x27, 0x42, 0x24, 0xba, 0xc6, 0x17, + 0xd5, 0x07, 0xb2, 0x78, 0xc2, 0xb6, 0xca, 0x6e, 0x38, 0x1c, 0x60, 0x45, 0x65, 0xff, 0x57, 0xd4, + 0xc3, 0xcb, 0x34, 0x9e, 0xaf, 0x19, 0xe3, 0x6d, 0xd6, 0x8c, 0xf9, 0xdc, 0x8a, 0x81, 0x97, 0xa0, + 0x21, 0x66, 0x4f, 0x0d, 0x7c, 0xbf, 0x1a, 0xe3, 0xb0, 0xef, 0xb9, 0xa6, 0x2e, 0x58, 0x63, 0xd8, + 0xf7, 0xb0, 0xa4, 0x80, 0xd7, 0xa0, 0xc5, 0xd2, 0x19, 0x95, 0x23, 0xd8, 0xa8, 0x3e, 0xd2, 0x32, + 0xff, 0x42, 0x10, 0xf2, 0xc4, 0x71, 0xc6, 0x63, 0xff, 0x04, 0xf6, 0x56, 0xe6, 0x14, 0xde, 0x80, + 0xdd, 0x59, 0x4c, 0xc6, 0x2e, 0x99, 0x91, 0xc8, 0xa7, 0x4c, 0x97, 0x61, 0x45, 0x75, 0xf2, 0x6f, + 0xa5, 0xe4, 0x5b, 0xf2, 0xd3, 0x53, 0x7e, 0xa4, 0x83, 0xec, 0x96, 0x6d, 0x78, 0x85, 0xd1, 0x26, + 0x00, 0x14, 0x39, 0xc2, 0x13, 0xd0, 0x92, 0x3a, 0xcb, 0xd6, 0xec, 0x8e, 0xbb, 0x23, 0x5f, 0x28, + 0xe5, 0xc7, 0x71, 0x76, 0x0f, 0x7b, 0x00, 0x70, 0xea, 0x33, 0x2a, 0xd4, 0x32, 0xa8, 0x2b, 0xa1, + 0xe6, 0x6b, 0xcf, 0xcb, 0x2d, 0xb8, 0xe4, 0xe5, 0x9e, 0xdd, 0x3e, 0x5a, 0xb5, 0xbb, 0x47, 0xab, + 0x76, 0xff, 0x68, 0xd5, 0x7e, 0x5d, 0x5a, 0xc6, 0xed, 0xd2, 0x32, 0xee, 0x96, 0x96, 0x71, 0xbf, + 0xb4, 0x8c, 0x7f, 0x96, 0x96, 0xf1, 0xc7, 0xbf, 0x56, 0xed, 0xbb, 0xb6, 0x2e, 0xd3, 0xff, 0x01, + 0x00, 0x00, 0xff, 0xff, 0xdb, 0x8a, 0xe4, 0xd8, 0x21, 0x08, 0x00, 0x00, +} + func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -120,29 +406,37 @@ func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { } func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPIngressPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n1, err := m.Backend.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -150,29 +444,36 @@ func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { } func (m *HTTPIngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Paths) > 0 { - for _, msg := range m.Paths { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Paths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *Ingress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -180,41 +481,52 @@ func (m *Ingress) Marshal() (dAtA []byte, err error) { } func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n2, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n3, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n4, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressBackend) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -222,29 +534,37 @@ func (m *IngressBackend) Marshal() (dAtA []byte, err error) { } func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ServicePort.Size())) - n5, err := m.ServicePort.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -252,37 +572,46 @@ func (m *IngressList) Marshal() (dAtA []byte, err error) { } func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -290,29 +619,37 @@ func (m *IngressRule) Marshal() (dAtA []byte, err error) { } func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IngressRuleValue.Size())) - n7, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.IngressRuleValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -320,27 +657,34 @@ func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { } func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.HTTP != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HTTP.Size())) - n8, err := m.HTTP.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *IngressSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -348,51 +692,62 @@ func (m *IngressSpec) Marshal() (dAtA []byte, err error) { } func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Backend != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n9, err := m.Backend.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i += n9 } if len(m.TLS) > 0 { - for _, msg := range m.TLS { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.TLS) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TLS[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.Backend != nil { + { + size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *IngressStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -400,25 +755,32 @@ func (m *IngressStatus) Marshal() (dAtA []byte, err error) { } func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n10, err := m.LoadBalancer.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressTLS) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -426,42 +788,47 @@ func (m *IngressTLS) Marshal() (dAtA []byte, err error) { } func (m *IngressTLS) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressTLS) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0x12 if len(m.Hosts) > 0 { - for _, s := range m.Hosts { + for iNdEx := len(m.Hosts) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Hosts[iNdEx]) + copy(dAtA[i:], m.Hosts[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hosts[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *HTTPIngressPath) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Path) @@ -472,6 +839,9 @@ func (m *HTTPIngressPath) Size() (n int) { } func (m *HTTPIngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Paths) > 0 { @@ -484,6 +854,9 @@ func (m *HTTPIngressRuleValue) Size() (n int) { } func (m *Ingress) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -496,6 +869,9 @@ func (m *Ingress) Size() (n int) { } func (m *IngressBackend) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ServiceName) @@ -506,6 +882,9 @@ func (m *IngressBackend) Size() (n int) { } func (m *IngressList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -520,6 +899,9 @@ func (m *IngressList) Size() (n int) { } func (m *IngressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Host) @@ -530,6 +912,9 @@ func (m *IngressRule) Size() (n int) { } func (m *IngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.HTTP != nil { @@ -540,6 +925,9 @@ func (m *IngressRuleValue) Size() (n int) { } func (m *IngressSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Backend != nil { @@ -562,6 +950,9 @@ func (m *IngressSpec) Size() (n int) { } func (m *IngressStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.LoadBalancer.Size() @@ -570,6 +961,9 @@ func (m *IngressStatus) Size() (n int) { } func (m *IngressTLS) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Hosts) > 0 { @@ -584,14 +978,7 @@ func (m *IngressTLS) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -611,8 +998,13 @@ func (this *HTTPIngressRuleValue) String() string { if this == nil { return "nil" } + repeatedStringForPaths := "[]HTTPIngressPath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," + } + repeatedStringForPaths += "}" s := strings.Join([]string{`&HTTPIngressRuleValue{`, - `Paths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Paths), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + `,`, + `Paths:` + repeatedStringForPaths + `,`, `}`, }, "") return s @@ -622,7 +1014,7 @@ func (this *Ingress) String() string { return "nil" } s := strings.Join([]string{`&Ingress{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -635,7 +1027,7 @@ func (this *IngressBackend) String() string { } s := strings.Join([]string{`&IngressBackend{`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `ServicePort:` + strings.Replace(strings.Replace(this.ServicePort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -644,9 +1036,14 @@ func (this *IngressList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Ingress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&IngressList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Ingress", "Ingress", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -667,7 +1064,7 @@ func (this *IngressRuleValue) String() string { return "nil" } s := strings.Join([]string{`&IngressRuleValue{`, - `HTTP:` + strings.Replace(fmt.Sprintf("%v", this.HTTP), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, `}`, }, "") return s @@ -676,10 +1073,20 @@ func (this *IngressSpec) String() string { if this == nil { return "nil" } + repeatedStringForTLS := "[]IngressTLS{" + for _, f := range this.TLS { + repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," + } + repeatedStringForTLS += "}" + repeatedStringForRules := "[]IngressRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&IngressSpec{`, - `Backend:` + strings.Replace(fmt.Sprintf("%v", this.Backend), "IngressBackend", "IngressBackend", 1) + `,`, - `TLS:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TLS), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "IngressRule", "IngressRule", 1), `&`, ``, 1) + `,`, + `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + repeatedStringForTLS + `,`, + `Rules:` + repeatedStringForRules + `,`, `}`, }, "") return s @@ -689,7 +1096,7 @@ func (this *IngressStatus) String() string { return "nil" } s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "k8s_io_api_core_v1.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `LoadBalancer:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LoadBalancer), "LoadBalancerStatus", "v11.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -728,7 +1135,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -756,7 +1163,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -766,6 +1173,9 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -785,7 +1195,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -794,6 +1204,9 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -810,6 +1223,9 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -837,7 +1253,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -865,7 +1281,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -874,6 +1290,9 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -891,6 +1310,9 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -918,7 +1340,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -946,7 +1368,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -955,6 +1377,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -976,7 +1401,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -985,6 +1410,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1006,7 +1434,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1015,6 +1443,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1031,6 +1462,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1058,7 +1492,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1086,7 +1520,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1096,6 +1530,9 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1115,7 +1552,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1124,6 +1561,9 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1140,6 +1580,9 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1167,7 +1610,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1195,7 +1638,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1204,6 +1647,9 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1225,7 +1671,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1234,6 +1680,9 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1251,6 +1700,9 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1278,7 +1730,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1306,7 +1758,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1316,6 +1768,9 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1335,7 +1790,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1344,6 +1799,9 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1360,6 +1818,9 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1387,7 +1848,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1415,7 +1876,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1424,6 +1885,9 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1443,6 +1907,9 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1470,7 +1937,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1498,7 +1965,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1507,6 +1974,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1531,7 +2001,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1540,6 +2010,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1562,7 +2035,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1571,6 +2044,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1588,6 +2064,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1615,7 +2094,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1643,7 +2122,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1652,6 +2131,9 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1668,6 +2150,9 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1695,7 +2180,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1723,7 +2208,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1733,6 +2218,9 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1752,7 +2240,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1762,6 +2250,9 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1776,6 +2267,9 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1842,10 +2336,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1874,6 +2371,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1892,62 +2392,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 812 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcf, 0x6e, 0xfb, 0x44, - 0x10, 0x8e, 0xf3, 0xa7, 0x69, 0xd7, 0xfd, 0xa7, 0xa5, 0x87, 0xa8, 0x12, 0x6e, 0xe4, 0x03, 0x2a, - 0x88, 0xae, 0x69, 0x0a, 0x88, 0xb3, 0x0f, 0xa8, 0x15, 0x81, 0x86, 0x75, 0x84, 0x10, 0xe2, 0xd0, - 0x8d, 0xb3, 0x38, 0x26, 0x89, 0x6d, 0x76, 0xd7, 0x41, 0xdc, 0x78, 0x01, 0x04, 0x4f, 0xc1, 0x99, - 0x23, 0x8f, 0xd0, 0x63, 0x8f, 0x3d, 0x55, 0x34, 0xbc, 0x07, 0x42, 0xbb, 0xde, 0xda, 0x4e, 0xd2, - 0xfe, 0x6a, 0xfd, 0x6e, 0xde, 0x9d, 0xf9, 0xbe, 0xd9, 0x99, 0xf9, 0x66, 0x0c, 0x3e, 0x9f, 0x7e, - 0xc6, 0x51, 0x18, 0x3b, 0xd3, 0x74, 0x44, 0x59, 0x44, 0x05, 0xe5, 0xce, 0x82, 0x46, 0xe3, 0x98, - 0x39, 0xda, 0x40, 0x92, 0xd0, 0x89, 0xa8, 0xf8, 0x39, 0x66, 0xd3, 0x30, 0x0a, 0x9c, 0xc5, 0xf9, - 0x88, 0x0a, 0x72, 0xee, 0x04, 0x34, 0xa2, 0x8c, 0x08, 0x3a, 0x46, 0x09, 0x8b, 0x45, 0x0c, 0xdf, - 0xcd, 0xdc, 0x11, 0x49, 0x42, 0x54, 0xb8, 0x23, 0xed, 0x7e, 0x7c, 0x16, 0x84, 0x62, 0x92, 0x8e, - 0x90, 0x1f, 0xcf, 0x9d, 0x20, 0x0e, 0x62, 0x47, 0xa1, 0x46, 0xe9, 0x0f, 0xea, 0xa4, 0x0e, 0xea, - 0x2b, 0x63, 0x3b, 0xb6, 0x4b, 0xc1, 0xfd, 0x98, 0x51, 0x67, 0xb1, 0x11, 0xf1, 0xf8, 0xe3, 0xc2, - 0x67, 0x4e, 0xfc, 0x49, 0x18, 0x51, 0xf6, 0x8b, 0x93, 0x4c, 0x03, 0x79, 0xc1, 0x9d, 0x39, 0x15, - 0xe4, 0x39, 0x94, 0xf3, 0x12, 0x8a, 0xa5, 0x91, 0x08, 0xe7, 0x74, 0x03, 0xf0, 0xe9, 0x6b, 0x00, - 0xee, 0x4f, 0xe8, 0x9c, 0x6c, 0xe0, 0x2e, 0x5e, 0xc2, 0xa5, 0x22, 0x9c, 0x39, 0x61, 0x24, 0xb8, - 0x60, 0xeb, 0x20, 0xfb, 0x37, 0x03, 0x1c, 0x5c, 0x0e, 0x87, 0x83, 0xab, 0x28, 0x60, 0x94, 0xf3, - 0x01, 0x11, 0x13, 0xd8, 0x05, 0xcd, 0x84, 0x88, 0x49, 0xc7, 0xe8, 0x1a, 0xa7, 0x3b, 0xee, 0xee, - 0xed, 0xc3, 0x49, 0x6d, 0xf9, 0x70, 0xd2, 0x94, 0x36, 0xac, 0x2c, 0xf0, 0x5b, 0xd0, 0x1e, 0x11, - 0x7f, 0x4a, 0xa3, 0x71, 0xa7, 0xde, 0x35, 0x4e, 0xcd, 0xde, 0x19, 0x7a, 0x63, 0x37, 0x90, 0xa6, - 0x77, 0x33, 0x90, 0x7b, 0xa0, 0x39, 0xdb, 0xfa, 0x02, 0x3f, 0xd1, 0xd9, 0x53, 0x70, 0x54, 0x7a, - 0x0e, 0x4e, 0x67, 0xf4, 0x1b, 0x32, 0x4b, 0x29, 0xf4, 0x40, 0x4b, 0x46, 0xe6, 0x1d, 0xa3, 0xdb, - 0x38, 0x35, 0x7b, 0xe8, 0x95, 0x78, 0x6b, 0x29, 0xb9, 0x7b, 0x3a, 0x60, 0x4b, 0x9e, 0x38, 0xce, - 0xb8, 0xec, 0xdf, 0xeb, 0xa0, 0xad, 0xbd, 0xe0, 0x0d, 0xd8, 0x96, 0x1d, 0x1c, 0x13, 0x41, 0x54, - 0xe2, 0x66, 0xef, 0xa3, 0x52, 0x8c, 0xbc, 0xa0, 0x28, 0x99, 0x06, 0xf2, 0x82, 0x23, 0xe9, 0x8d, - 0x16, 0xe7, 0xe8, 0x7a, 0xf4, 0x23, 0xf5, 0xc5, 0x97, 0x54, 0x10, 0x17, 0xea, 0x28, 0xa0, 0xb8, - 0xc3, 0x39, 0x2b, 0xec, 0x83, 0x26, 0x4f, 0xa8, 0xaf, 0x2b, 0xf6, 0x41, 0xb5, 0x8a, 0x79, 0x09, - 0xf5, 0x8b, 0x16, 0xc8, 0x13, 0x56, 0x2c, 0x70, 0x08, 0xb6, 0xb8, 0x20, 0x22, 0xe5, 0x9d, 0x86, - 0xe2, 0xfb, 0xb0, 0x22, 0x9f, 0xc2, 0xb8, 0xfb, 0x9a, 0x71, 0x2b, 0x3b, 0x63, 0xcd, 0x65, 0xff, - 0x65, 0x80, 0xfd, 0xd5, 0x5e, 0xc1, 0x4f, 0x80, 0xc9, 0x29, 0x5b, 0x84, 0x3e, 0xfd, 0x8a, 0xcc, - 0xa9, 0x16, 0xc5, 0x3b, 0x1a, 0x6f, 0x7a, 0x85, 0x09, 0x97, 0xfd, 0x60, 0x90, 0xc3, 0x06, 0x31, - 0x13, 0x3a, 0xe9, 0x97, 0x4b, 0x2a, 0x35, 0x8a, 0x32, 0x8d, 0xa2, 0xab, 0x48, 0x5c, 0x33, 0x4f, - 0xb0, 0x30, 0x0a, 0x36, 0x02, 0x49, 0x32, 0x5c, 0x66, 0xb6, 0xff, 0x36, 0x80, 0xa9, 0x9f, 0xdc, - 0x0f, 0xb9, 0x80, 0xdf, 0x6f, 0x34, 0x12, 0x55, 0x6b, 0xa4, 0x44, 0xab, 0x36, 0x1e, 0xea, 0x98, - 0xdb, 0x4f, 0x37, 0xa5, 0x26, 0x7e, 0x01, 0x5a, 0xa1, 0xa0, 0x73, 0xde, 0xa9, 0x2b, 0x1d, 0xbe, - 0x57, 0x51, 0xf7, 0xb9, 0xfe, 0xae, 0x24, 0x18, 0x67, 0x1c, 0xf6, 0x9f, 0xc5, 0xd3, 0xa5, 0xd2, - 0xe5, 0xe0, 0x4d, 0x62, 0x2e, 0xd6, 0x07, 0xef, 0x32, 0xe6, 0x02, 0x2b, 0x0b, 0x4c, 0xc1, 0x61, - 0xb8, 0x36, 0x1a, 0xba, 0xb4, 0x4e, 0xb5, 0x97, 0xe4, 0x30, 0xb7, 0xa3, 0xe9, 0x0f, 0xd7, 0x2d, - 0x78, 0x23, 0x84, 0x4d, 0xc1, 0x86, 0x17, 0xfc, 0x1a, 0x34, 0x27, 0x42, 0x24, 0xba, 0xc6, 0x17, - 0xd5, 0x07, 0xb2, 0x78, 0xc2, 0xb6, 0xca, 0x6e, 0x38, 0x1c, 0x60, 0x45, 0x65, 0xff, 0x57, 0xd4, - 0xc3, 0xcb, 0x34, 0x9e, 0xaf, 0x19, 0xe3, 0x6d, 0xd6, 0x8c, 0xf9, 0xdc, 0x8a, 0x81, 0x97, 0xa0, - 0x21, 0x66, 0x4f, 0x0d, 0x7c, 0xbf, 0x1a, 0xe3, 0xb0, 0xef, 0xb9, 0xa6, 0x2e, 0x58, 0x63, 0xd8, - 0xf7, 0xb0, 0xa4, 0x80, 0xd7, 0xa0, 0xc5, 0xd2, 0x19, 0x95, 0x23, 0xd8, 0xa8, 0x3e, 0xd2, 0x32, - 0xff, 0x42, 0x10, 0xf2, 0xc4, 0x71, 0xc6, 0x63, 0xff, 0x04, 0xf6, 0x56, 0xe6, 0x14, 0xde, 0x80, - 0xdd, 0x59, 0x4c, 0xc6, 0x2e, 0x99, 0x91, 0xc8, 0xa7, 0x4c, 0x97, 0x61, 0x45, 0x75, 0xf2, 0x6f, - 0xa5, 0xe4, 0x5b, 0xf2, 0xd3, 0x53, 0x7e, 0xa4, 0x83, 0xec, 0x96, 0x6d, 0x78, 0x85, 0xd1, 0x26, - 0x00, 0x14, 0x39, 0xc2, 0x13, 0xd0, 0x92, 0x3a, 0xcb, 0xd6, 0xec, 0x8e, 0xbb, 0x23, 0x5f, 0x28, - 0xe5, 0xc7, 0x71, 0x76, 0x0f, 0x7b, 0x00, 0x70, 0xea, 0x33, 0x2a, 0xd4, 0x32, 0xa8, 0x2b, 0xa1, - 0xe6, 0x6b, 0xcf, 0xcb, 0x2d, 0xb8, 0xe4, 0xe5, 0x9e, 0xdd, 0x3e, 0x5a, 0xb5, 0xbb, 0x47, 0xab, - 0x76, 0xff, 0x68, 0xd5, 0x7e, 0x5d, 0x5a, 0xc6, 0xed, 0xd2, 0x32, 0xee, 0x96, 0x96, 0x71, 0xbf, - 0xb4, 0x8c, 0x7f, 0x96, 0x96, 0xf1, 0xc7, 0xbf, 0x56, 0xed, 0xbb, 0xb6, 0x2e, 0xd3, 0xff, 0x01, - 0x00, 0x00, 0xff, 0xff, 0xdb, 0x8a, 0xe4, 0xd8, 0x21, 0x08, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto index 7df19138e..a72545c81 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.proto +++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto @@ -64,17 +64,17 @@ message HTTPIngressRuleValue { // based virtual hosting etc. message Ingress { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec is the desired state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressSpec spec = 2; // Status is the current state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressStatus status = 3; } @@ -91,7 +91,7 @@ message IngressBackend { // IngressList is a collection of Ingress. message IngressList { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/networking/v1beta1/types.go b/vendor/k8s.io/api/networking/v1beta1/types.go index 63bf2d52a..37277bf81 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types.go +++ b/vendor/k8s.io/api/networking/v1beta1/types.go @@ -32,17 +32,17 @@ import ( type Ingress struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec is the desired state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is the current state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -53,7 +53,7 @@ type Ingress struct { type IngressList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go index 9e05b7f1b..4ae5e32d0 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go @@ -48,9 +48,9 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { var map_Ingress = map[string]string{ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -69,7 +69,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of Ingress.", } diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go index 6342c985c..16ac936ae 100644 --- a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go @@ -111,7 +111,7 @@ func (in *IngressBackend) DeepCopy() *IngressBackend { func (in *IngressList) DeepCopyInto(out *IngressList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Ingress, len(*in)) diff --git a/vendor/k8s.io/api/node/v1alpha1/doc.go b/vendor/k8s.io/api/node/v1alpha1/doc.go index f7f8b78b4..dfe99540b 100644 --- a/vendor/k8s.io/api/node/v1alpha1/doc.go +++ b/vendor/k8s.io/api/node/v1alpha1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=node.k8s.io diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go index 16f5af929..34f4dd6de 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go @@ -17,27 +17,25 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto - - It has these top-level messages: - RuntimeClass - RuntimeClassList - RuntimeClassSpec -*/ package v1alpha1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import strings "strings" -import reflect "reflect" + io "io" -import io "io" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" + resource "k8s.io/apimachinery/pkg/api/resource" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -50,27 +48,264 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } -func (*RuntimeClass) ProtoMessage() {} -func (*RuntimeClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Overhead) Reset() { *m = Overhead{} } +func (*Overhead) ProtoMessage() {} +func (*Overhead) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{0} +} +func (m *Overhead) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Overhead) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Overhead) XXX_Merge(src proto.Message) { + xxx_messageInfo_Overhead.Merge(m, src) +} +func (m *Overhead) XXX_Size() int { + return m.Size() +} +func (m *Overhead) XXX_DiscardUnknown() { + xxx_messageInfo_Overhead.DiscardUnknown(m) +} + +var xxx_messageInfo_Overhead proto.InternalMessageInfo + +func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } +func (*RuntimeClass) ProtoMessage() {} +func (*RuntimeClass) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{1} +} +func (m *RuntimeClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClass.Merge(m, src) +} +func (m *RuntimeClass) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClass) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClass.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo -func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } -func (*RuntimeClassList) ProtoMessage() {} -func (*RuntimeClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } +func (*RuntimeClassList) ProtoMessage() {} +func (*RuntimeClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{2} +} +func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassList.Merge(m, src) +} +func (m *RuntimeClassList) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassList) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassList.DiscardUnknown(m) +} -func (m *RuntimeClassSpec) Reset() { *m = RuntimeClassSpec{} } -func (*RuntimeClassSpec) ProtoMessage() {} -func (*RuntimeClassSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo + +func (m *RuntimeClassSpec) Reset() { *m = RuntimeClassSpec{} } +func (*RuntimeClassSpec) ProtoMessage() {} +func (*RuntimeClassSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{3} +} +func (m *RuntimeClassSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassSpec.Merge(m, src) +} +func (m *RuntimeClassSpec) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassSpec) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassSpec proto.InternalMessageInfo + +func (m *Scheduling) Reset() { *m = Scheduling{} } +func (*Scheduling) ProtoMessage() {} +func (*Scheduling) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{4} +} +func (m *Scheduling) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scheduling) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scheduling.Merge(m, src) +} +func (m *Scheduling) XXX_Size() int { + return m.Size() +} +func (m *Scheduling) XXX_DiscardUnknown() { + xxx_messageInfo_Scheduling.DiscardUnknown(m) +} + +var xxx_messageInfo_Scheduling proto.InternalMessageInfo func init() { + proto.RegisterType((*Overhead)(nil), "k8s.io.api.node.v1alpha1.Overhead") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.api.node.v1alpha1.Overhead.PodFixedEntry") proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1alpha1.RuntimeClass") proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1alpha1.RuntimeClassList") proto.RegisterType((*RuntimeClassSpec)(nil), "k8s.io.api.node.v1alpha1.RuntimeClassSpec") + proto.RegisterType((*Scheduling)(nil), "k8s.io.api.node.v1alpha1.Scheduling") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.node.v1alpha1.Scheduling.NodeSelectorEntry") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto", fileDescriptor_82a78945ab308218) +} + +var fileDescriptor_82a78945ab308218 = []byte{ + // 698 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xbf, 0x6f, 0xd3, 0x4e, + 0x14, 0xcf, 0xa5, 0xad, 0x94, 0x5e, 0xd2, 0xaa, 0x5f, 0x7f, 0x2b, 0x14, 0x65, 0x70, 0x2a, 0x0b, + 0xa1, 0x0a, 0xa9, 0x67, 0x5a, 0xa1, 0xaa, 0x62, 0x00, 0x61, 0x7e, 0x08, 0x44, 0x69, 0xc1, 0x2d, + 0x0b, 0x62, 0xe0, 0x62, 0x3f, 0x1c, 0x13, 0xdb, 0x67, 0xd9, 0xe7, 0x88, 0x6c, 0x88, 0x05, 0x89, + 0x89, 0x89, 0xff, 0x06, 0xe6, 0x8e, 0x9d, 0x50, 0xa7, 0x96, 0x86, 0xff, 0x81, 0x81, 0x09, 0x9d, + 0x7d, 0x4e, 0x9c, 0xa4, 0xa1, 0x61, 0xf3, 0xdd, 0x7d, 0x7e, 0xdc, 0xfb, 0xbc, 0x7b, 0xc6, 0x77, + 0x3b, 0x3b, 0x31, 0x71, 0x99, 0xde, 0x49, 0x5a, 0x10, 0x05, 0xc0, 0x21, 0xd6, 0xbb, 0x10, 0xd8, + 0x2c, 0xd2, 0xe5, 0x01, 0x0d, 0x5d, 0x3d, 0x60, 0x36, 0xe8, 0xdd, 0x4d, 0xea, 0x85, 0x6d, 0xba, + 0xa9, 0x3b, 0x10, 0x40, 0x44, 0x39, 0xd8, 0x24, 0x8c, 0x18, 0x67, 0x4a, 0x3d, 0x43, 0x12, 0x1a, + 0xba, 0x44, 0x20, 0x49, 0x8e, 0x6c, 0x6c, 0x38, 0x2e, 0x6f, 0x27, 0x2d, 0x62, 0x31, 0x5f, 0x77, + 0x98, 0xc3, 0xf4, 0x94, 0xd0, 0x4a, 0xde, 0xa4, 0xab, 0x74, 0x91, 0x7e, 0x65, 0x42, 0x0d, 0xad, + 0x60, 0x69, 0xb1, 0x48, 0x58, 0x8e, 0x9b, 0x35, 0x6e, 0x0e, 0x31, 0x3e, 0xb5, 0xda, 0x6e, 0x00, + 0x51, 0x4f, 0x0f, 0x3b, 0x4e, 0x4a, 0x8a, 0x20, 0x66, 0x49, 0x64, 0xc1, 0x3f, 0xb1, 0x62, 0xdd, + 0x07, 0x4e, 0x2f, 0xf2, 0xd2, 0xa7, 0xb1, 0xa2, 0x24, 0xe0, 0xae, 0x3f, 0x69, 0xb3, 0x7d, 0x19, + 0x21, 0xb6, 0xda, 0xe0, 0xd3, 0x71, 0x9e, 0x76, 0x5c, 0xc6, 0x95, 0xfd, 0x2e, 0x44, 0x6d, 0xa0, + 0xb6, 0xf2, 0x1d, 0xe1, 0x4a, 0xc8, 0xec, 0x87, 0xee, 0x3b, 0xb0, 0xeb, 0x68, 0x6d, 0x6e, 0xbd, + 0xba, 0x75, 0x83, 0x4c, 0x8b, 0x98, 0xe4, 0x34, 0xf2, 0x4c, 0x52, 0x1e, 0x04, 0x3c, 0xea, 0x19, + 0x1f, 0xd1, 0xd1, 0x69, 0xb3, 0xd4, 0x3f, 0x6d, 0x56, 0xf2, 0xfd, 0xdf, 0xa7, 0xcd, 0xe6, 0x64, + 0xbe, 0xc4, 0x94, 0x91, 0xed, 0xba, 0x31, 0xff, 0x70, 0xf6, 0x57, 0xc8, 0x1e, 0xf5, 0xe1, 0xd3, + 0x59, 0x73, 0x63, 0x96, 0x0e, 0x90, 0xe7, 0x09, 0x0d, 0xb8, 0xcb, 0x7b, 0xe6, 0xa0, 0x96, 0x46, + 0x07, 0x2f, 0x8d, 0x5c, 0x52, 0x59, 0xc1, 0x73, 0x1d, 0xe8, 0xd5, 0xd1, 0x1a, 0x5a, 0x5f, 0x34, + 0xc5, 0xa7, 0x72, 0x1f, 0x2f, 0x74, 0xa9, 0x97, 0x40, 0xbd, 0xbc, 0x86, 0xd6, 0xab, 0x5b, 0xa4, + 0x50, 0xf7, 0xc0, 0x8b, 0x84, 0x1d, 0x27, 0x0d, 0x62, 0xd2, 0x2b, 0x23, 0xdf, 0x2a, 0xef, 0x20, + 0xed, 0x1b, 0xc2, 0x35, 0x33, 0x4b, 0xfd, 0x9e, 0x47, 0xe3, 0x58, 0x79, 0x8d, 0x2b, 0xa2, 0xcf, + 0x36, 0xe5, 0x34, 0x75, 0x1c, 0x4d, 0x75, 0x42, 0x3d, 0x26, 0x02, 0x4d, 0xba, 0x9b, 0x64, 0xbf, + 0xf5, 0x16, 0x2c, 0xfe, 0x14, 0x38, 0x35, 0x14, 0x19, 0x2a, 0x1e, 0xee, 0x99, 0x03, 0x55, 0x65, + 0x17, 0xcf, 0xc7, 0x21, 0x58, 0xf2, 0xee, 0xd7, 0xa7, 0xf7, 0xac, 0x78, 0xaf, 0x83, 0x10, 0x2c, + 0xa3, 0x26, 0x75, 0xe7, 0xc5, 0xca, 0x4c, 0x55, 0xb4, 0xaf, 0x08, 0xaf, 0x14, 0x81, 0xa2, 0x41, + 0xca, 0xab, 0x89, 0x22, 0xc8, 0x6c, 0x45, 0x08, 0x76, 0x5a, 0xc2, 0x4a, 0xfe, 0x2e, 0xf2, 0x9d, + 0x42, 0x01, 0x4f, 0xf0, 0x82, 0xcb, 0xc1, 0x8f, 0xeb, 0xe5, 0xf4, 0xd5, 0x5d, 0x9b, 0xad, 0x02, + 0x63, 0x49, 0x4a, 0x2e, 0x3c, 0x16, 0x64, 0x33, 0xd3, 0xd0, 0x7e, 0x8d, 0xdd, 0x5f, 0x94, 0xa6, + 0xdc, 0xc6, 0xcb, 0x72, 0x14, 0x1e, 0xd1, 0xc0, 0xf6, 0x20, 0xca, 0x9a, 0x6f, 0x5c, 0x91, 0x12, + 0xcb, 0xe6, 0xc8, 0xa9, 0x39, 0x86, 0x56, 0x76, 0x71, 0x85, 0xc9, 0x07, 0x2f, 0x63, 0xd6, 0x2e, + 0x1f, 0x0d, 0xa3, 0x26, 0xea, 0xcd, 0x57, 0xe6, 0x40, 0x41, 0x39, 0xc4, 0x58, 0x0c, 0xa4, 0x9d, + 0x78, 0x6e, 0xe0, 0xd4, 0xe7, 0x52, 0xbd, 0xab, 0xd3, 0xf5, 0x0e, 0x06, 0x58, 0x63, 0x59, 0x3c, + 0x82, 0xe1, 0xda, 0x2c, 0xe8, 0x68, 0x5f, 0xca, 0xb8, 0x70, 0xa4, 0x84, 0xb8, 0x26, 0x64, 0x0e, + 0xc0, 0x03, 0x8b, 0xb3, 0x48, 0x4e, 0xf4, 0xf6, 0x2c, 0x36, 0x64, 0xaf, 0x40, 0xcc, 0xe6, 0x7a, + 0x55, 0x06, 0x55, 0x2b, 0x1e, 0x99, 0x23, 0x0e, 0xca, 0x0b, 0x5c, 0xe5, 0xcc, 0x13, 0x3f, 0x18, + 0x97, 0x05, 0x79, 0x33, 0xd5, 0xa2, 0xa1, 0x98, 0x6c, 0xf1, 0x2a, 0x0e, 0x07, 0x30, 0xe3, 0x7f, + 0x29, 0x5c, 0x1d, 0xee, 0xc5, 0x66, 0x51, 0xa7, 0x71, 0x07, 0xff, 0x37, 0x71, 0x9f, 0x0b, 0x46, + 0x78, 0xb5, 0x38, 0xc2, 0x8b, 0x85, 0x91, 0x34, 0xc8, 0xd1, 0xb9, 0x5a, 0x3a, 0x3e, 0x57, 0x4b, + 0x27, 0xe7, 0x6a, 0xe9, 0x7d, 0x5f, 0x45, 0x47, 0x7d, 0x15, 0x1d, 0xf7, 0x55, 0x74, 0xd2, 0x57, + 0xd1, 0x8f, 0xbe, 0x8a, 0x3e, 0xff, 0x54, 0x4b, 0x2f, 0x2b, 0x79, 0x10, 0x7f, 0x02, 0x00, 0x00, + 0xff, 0xff, 0xa8, 0x77, 0xef, 0x80, 0x9b, 0x06, 0x00, 0x00, +} + +func (m *Overhead) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } + +func (m *Overhead) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Overhead) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodFixed) > 0 { + keysForPodFixed := make([]string, 0, len(m.PodFixed)) + for k := range m.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + for iNdEx := len(keysForPodFixed) - 1; iNdEx >= 0; iNdEx-- { + v := m.PodFixed[k8s_io_api_core_v1.ResourceName(keysForPodFixed[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForPodFixed[iNdEx]) + copy(dAtA[i:], keysForPodFixed[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPodFixed[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -78,33 +313,42 @@ func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { } func (m *RuntimeClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -112,65 +356,188 @@ func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { } func (m *RuntimeClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) + return len(dAtA) - i, nil +} + +func (m *RuntimeClassSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { - return 0, err + return nil, err } - i += n3 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + return dAtA[:n], nil +} + +func (m *RuntimeClassSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scheduling != nil { + { + size, err := m.Scheduling.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Overhead != nil { + { + size, err := m.Overhead.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.RuntimeHandler) + copy(dAtA[i:], m.RuntimeHandler) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RuntimeHandler))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *RuntimeClassSpec) Marshal() (dAtA []byte, err error) { +func (m *Scheduling) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RuntimeClassSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Scheduling) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RuntimeHandler))) - i += copy(dAtA[i:], m.RuntimeHandler) - return i, nil + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base +} +func (m *Overhead) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PodFixed) > 0 { + for k, v := range m.PodFixed { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n } + func (m *RuntimeClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -181,6 +548,9 @@ func (m *RuntimeClass) Size() (n int) { } func (m *RuntimeClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -195,32 +565,79 @@ func (m *RuntimeClassList) Size() (n int) { } func (m *RuntimeClassSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.RuntimeHandler) n += 1 + l + sovGenerated(uint64(l)) + if m.Overhead != nil { + l = m.Overhead.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Scheduling != nil { + l = m.Scheduling.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break +func (m *Scheduling) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } return n } + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *Overhead) String() string { + if this == nil { + return "nil" + } + keysForPodFixed := make([]string, 0, len(this.PodFixed)) + for k := range this.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + mapStringForPodFixed := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForPodFixed { + mapStringForPodFixed += fmt.Sprintf("%v: %v,", k, this.PodFixed[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForPodFixed += "}" + s := strings.Join([]string{`&Overhead{`, + `PodFixed:` + mapStringForPodFixed + `,`, + `}`, + }, "") + return s +} func (this *RuntimeClass) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&RuntimeClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "RuntimeClassSpec", "RuntimeClassSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -230,9 +647,14 @@ func (this *RuntimeClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RuntimeClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RuntimeClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -241,19 +663,229 @@ func (this *RuntimeClassSpec) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&RuntimeClassSpec{`, - `RuntimeHandler:` + fmt.Sprintf("%v", this.RuntimeHandler) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + s := strings.Join([]string{`&RuntimeClassSpec{`, + `RuntimeHandler:` + fmt.Sprintf("%v", this.RuntimeHandler) + `,`, + `Overhead:` + strings.Replace(this.Overhead.String(), "Overhead", "Overhead", 1) + `,`, + `Scheduling:` + strings.Replace(this.Scheduling.String(), "Scheduling", "Scheduling", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Scheduling) String() string { + if this == nil { + return "nil" + } + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTolerations += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&Scheduling{`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Overhead) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Overhead: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Overhead: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodFixed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodFixed == nil { + m.PodFixed = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PodFixed[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } func (m *RuntimeClass) Unmarshal(dAtA []byte) error { l := len(dAtA) @@ -270,7 +902,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -298,7 +930,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -307,6 +939,9 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -328,7 +963,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -337,6 +972,9 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -353,6 +991,9 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -380,7 +1021,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -408,7 +1049,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -417,6 +1058,9 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -438,7 +1082,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -447,6 +1091,9 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -464,6 +1111,9 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -491,7 +1141,7 @@ func (m *RuntimeClassSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -519,7 +1169,7 @@ func (m *RuntimeClassSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -529,11 +1179,300 @@ func (m *RuntimeClassSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.RuntimeHandler = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Overhead == nil { + m.Overhead = &Overhead{} + } + if err := m.Overhead.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheduling", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scheduling == nil { + m.Scheduling = &Scheduling{} + } + if err := m.Scheduling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scheduling) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scheduling: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scheduling: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, v11.Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -543,6 +1482,9 @@ func (m *RuntimeClassSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -609,10 +1551,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -641,6 +1586,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -659,38 +1607,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 421 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x41, 0x6b, 0xd4, 0x40, - 0x14, 0xc7, 0x33, 0xb5, 0x85, 0x75, 0x5a, 0x4b, 0xc9, 0x41, 0xc2, 0x1e, 0xa6, 0x65, 0x0f, 0x52, - 0x04, 0x67, 0xdc, 0x22, 0xe2, 0x49, 0x30, 0x5e, 0x14, 0x2b, 0x42, 0xbc, 0x89, 0x07, 0x27, 0xc9, - 0x33, 0x19, 0xb3, 0xc9, 0x0c, 0x99, 0x49, 0xc0, 0x9b, 0x1f, 0xc1, 0x2f, 0xa4, 0xe7, 0x3d, 0xf6, - 0xd8, 0x53, 0x71, 0xe3, 0x17, 0x91, 0x99, 0x64, 0xbb, 0xdb, 0x2e, 0xc5, 0xbd, 0xe5, 0xbd, 0xf9, - 0xff, 0x7f, 0xef, 0xfd, 0x5f, 0xf0, 0xab, 0xe2, 0x85, 0xa6, 0x42, 0xb2, 0xa2, 0x89, 0xa1, 0xae, - 0xc0, 0x80, 0x66, 0x2d, 0x54, 0xa9, 0xac, 0xd9, 0xf0, 0xc0, 0x95, 0x60, 0x95, 0x4c, 0x81, 0xb5, - 0x53, 0x3e, 0x53, 0x39, 0x9f, 0xb2, 0x0c, 0x2a, 0xa8, 0xb9, 0x81, 0x94, 0xaa, 0x5a, 0x1a, 0xe9, - 0x07, 0xbd, 0x92, 0x72, 0x25, 0xa8, 0x55, 0xd2, 0xa5, 0x72, 0xfc, 0x24, 0x13, 0x26, 0x6f, 0x62, - 0x9a, 0xc8, 0x92, 0x65, 0x32, 0x93, 0xcc, 0x19, 0xe2, 0xe6, 0xab, 0xab, 0x5c, 0xe1, 0xbe, 0x7a, - 0xd0, 0xf8, 0xd9, 0x6a, 0x64, 0xc9, 0x93, 0x5c, 0x54, 0x50, 0x7f, 0x67, 0xaa, 0xc8, 0x6c, 0x43, - 0xb3, 0x12, 0x0c, 0x67, 0xed, 0xc6, 0xf8, 0x31, 0xbb, 0xcb, 0x55, 0x37, 0x95, 0x11, 0x25, 0x6c, - 0x18, 0x9e, 0xff, 0xcf, 0xa0, 0x93, 0x1c, 0x4a, 0x7e, 0xdb, 0x37, 0xf9, 0x8d, 0xf0, 0x41, 0xd4, - 0x4b, 0x5e, 0xcf, 0xb8, 0xd6, 0xfe, 0x17, 0x3c, 0xb2, 0x4b, 0xa5, 0xdc, 0xf0, 0x00, 0x9d, 0xa0, - 0xd3, 0xfd, 0xb3, 0xa7, 0x74, 0x75, 0x8b, 0x6b, 0x36, 0x55, 0x45, 0x66, 0x1b, 0x9a, 0x5a, 0x35, - 0x6d, 0xa7, 0xf4, 0x43, 0xfc, 0x0d, 0x12, 0xf3, 0x1e, 0x0c, 0x0f, 0xfd, 0xf9, 0xd5, 0xb1, 0xd7, - 0x5d, 0x1d, 0xe3, 0x55, 0x2f, 0xba, 0xa6, 0xfa, 0xe7, 0x78, 0x57, 0x2b, 0x48, 0x82, 0x1d, 0x47, - 0x7f, 0x4c, 0xef, 0xba, 0x34, 0x5d, 0xdf, 0xeb, 0xa3, 0x82, 0x24, 0x3c, 0x18, 0xb8, 0xbb, 0xb6, - 0x8a, 0x1c, 0x65, 0xf2, 0x0b, 0xe1, 0xa3, 0x75, 0xe1, 0xb9, 0xd0, 0xc6, 0xff, 0xbc, 0x11, 0x82, - 0x6e, 0x17, 0xc2, 0xba, 0x5d, 0x84, 0xa3, 0x61, 0xd4, 0x68, 0xd9, 0x59, 0x0b, 0xf0, 0x0e, 0xef, - 0x09, 0x03, 0xa5, 0x0e, 0x76, 0x4e, 0xee, 0x9d, 0xee, 0x9f, 0x3d, 0xda, 0x2e, 0x41, 0xf8, 0x60, - 0x40, 0xee, 0xbd, 0xb5, 0xe6, 0xa8, 0x67, 0x4c, 0xa2, 0x9b, 0xeb, 0xdb, 0x64, 0xfe, 0x4b, 0x7c, - 0x38, 0xfc, 0xb6, 0x37, 0xbc, 0x4a, 0x67, 0x50, 0xbb, 0x10, 0xf7, 0xc3, 0x87, 0x03, 0xe1, 0x30, - 0xba, 0xf1, 0x1a, 0xdd, 0x52, 0x87, 0x74, 0xbe, 0x20, 0xde, 0xc5, 0x82, 0x78, 0x97, 0x0b, 0xe2, - 0xfd, 0xe8, 0x08, 0x9a, 0x77, 0x04, 0x5d, 0x74, 0x04, 0x5d, 0x76, 0x04, 0xfd, 0xe9, 0x08, 0xfa, - 0xf9, 0x97, 0x78, 0x9f, 0x46, 0xcb, 0x35, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x94, 0x34, 0x0e, - 0xef, 0x30, 0x03, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.proto b/vendor/k8s.io/api/node/v1alpha1/generated.proto index ca4e5e535..ac05f839e 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/node/v1alpha1/generated.proto @@ -21,6 +21,8 @@ syntax = 'proto2'; package k8s.io.api.node.v1alpha1; +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -28,6 +30,13 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1alpha1"; +// Overhead structure represents the resource overhead associated with running a pod. +message Overhead { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + map podFixed = 1; +} + // RuntimeClass defines a class of container runtime supported in the cluster. // The RuntimeClass is used to determine which container runtime is used to run // all containers in a pod. RuntimeClasses are (currently) manually defined by a @@ -36,19 +45,19 @@ option go_package = "v1alpha1"; // pod. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md message RuntimeClass { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the RuntimeClass - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status optional RuntimeClassSpec spec = 2; } // RuntimeClassList is a list of RuntimeClass objects. message RuntimeClassList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -72,5 +81,38 @@ message RuntimeClassSpec { // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements // and is immutable. optional string runtimeHandler = 1; + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // +optional + optional Overhead overhead = 2; + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + optional Scheduling scheduling = 3; +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +message Scheduling { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + map nodeSelector = 1; + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + repeated k8s.io.api.core.v1.Toleration tolerations = 2; } diff --git a/vendor/k8s.io/api/node/v1alpha1/types.go b/vendor/k8s.io/api/node/v1alpha1/types.go index 2ce67c116..b59767107 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types.go +++ b/vendor/k8s.io/api/node/v1alpha1/types.go @@ -17,6 +17,7 @@ limitations under the License. package v1alpha1 import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,12 +34,12 @@ import ( // https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md type RuntimeClass struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the RuntimeClass - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Spec RuntimeClassSpec `json:"spec" protobuf:"bytes,2,name=spec"` } @@ -58,6 +59,46 @@ type RuntimeClassSpec struct { // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements // and is immutable. RuntimeHandler string `json:"runtimeHandler" protobuf:"bytes,1,opt,name=runtimeHandler"` + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // +optional + Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,2,opt,name=overhead"` + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + Scheduling *Scheduling `json:"scheduling,omitempty" protobuf:"bytes,3,opt,name=scheduling"` +} + +// Overhead structure represents the resource overhead associated with running a pod. +type Overhead struct { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +type Scheduling struct { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,2,rep,name=tolerations"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -66,7 +107,7 @@ type RuntimeClassSpec struct { type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go index a51fa525d..390000172 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go @@ -27,10 +27,19 @@ package v1alpha1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Overhead = map[string]string{ + "": "Overhead structure represents the resource overhead associated with running a pod.", + "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", +} + +func (Overhead) SwaggerDoc() map[string]string { + return map_Overhead +} + var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -39,7 +48,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -50,10 +59,22 @@ func (RuntimeClassList) SwaggerDoc() map[string]string { var map_RuntimeClassSpec = map[string]string{ "": "RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable.", "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements and is immutable.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.", + "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClassSpec) SwaggerDoc() map[string]string { return map_RuntimeClassSpec } +var map_Scheduling = map[string]string{ + "": "Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass.", + "nodeSelector": "nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.", + "tolerations": "tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.", +} + +func (Scheduling) SwaggerDoc() map[string]string { + return map_Scheduling +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go index 0d63110b3..20f805183 100644 --- a/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go @@ -21,15 +21,39 @@ limitations under the License. package v1alpha1 import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Overhead) DeepCopyInto(out *Overhead) { + *out = *in + if in.PodFixed != nil { + in, out := &in.PodFixed, &out.PodFixed + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Overhead. +func (in *Overhead) DeepCopy() *Overhead { + if in == nil { + return nil + } + out := new(Overhead) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) return } @@ -55,7 +79,7 @@ func (in *RuntimeClass) DeepCopyObject() runtime.Object { func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RuntimeClass, len(*in)) @@ -87,6 +111,16 @@ func (in *RuntimeClassList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RuntimeClassSpec) DeepCopyInto(out *RuntimeClassSpec) { *out = *in + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = new(Overhead) + (*in).DeepCopyInto(*out) + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = new(Scheduling) + (*in).DeepCopyInto(*out) + } return } @@ -99,3 +133,33 @@ func (in *RuntimeClassSpec) DeepCopy() *RuntimeClassSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scheduling) DeepCopyInto(out *Scheduling) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduling. +func (in *Scheduling) DeepCopy() *Scheduling { + if in == nil { + return nil + } + out := new(Scheduling) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/node/v1beta1/doc.go b/vendor/k8s.io/api/node/v1beta1/doc.go index 0e8338cf7..e87583cea 100644 --- a/vendor/k8s.io/api/node/v1beta1/doc.go +++ b/vendor/k8s.io/api/node/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=node.k8s.io diff --git a/vendor/k8s.io/api/node/v1beta1/generated.pb.go b/vendor/k8s.io/api/node/v1beta1/generated.pb.go index 27251a8a8..63992f436 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/node/v1beta1/generated.pb.go @@ -17,26 +17,25 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto - - It has these top-level messages: - RuntimeClass - RuntimeClassList -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import strings "strings" -import reflect "reflect" + io "io" -import io "io" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" + resource "k8s.io/apimachinery/pkg/api/resource" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -49,22 +48,233 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } -func (*RuntimeClass) ProtoMessage() {} -func (*RuntimeClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Overhead) Reset() { *m = Overhead{} } +func (*Overhead) ProtoMessage() {} +func (*Overhead) Descriptor() ([]byte, []int) { + return fileDescriptor_f977b0dddc93b4ec, []int{0} +} +func (m *Overhead) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Overhead) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Overhead) XXX_Merge(src proto.Message) { + xxx_messageInfo_Overhead.Merge(m, src) +} +func (m *Overhead) XXX_Size() int { + return m.Size() +} +func (m *Overhead) XXX_DiscardUnknown() { + xxx_messageInfo_Overhead.DiscardUnknown(m) +} + +var xxx_messageInfo_Overhead proto.InternalMessageInfo + +func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } +func (*RuntimeClass) ProtoMessage() {} +func (*RuntimeClass) Descriptor() ([]byte, []int) { + return fileDescriptor_f977b0dddc93b4ec, []int{1} +} +func (m *RuntimeClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClass.Merge(m, src) +} +func (m *RuntimeClass) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClass) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClass.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo + +func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } +func (*RuntimeClassList) ProtoMessage() {} +func (*RuntimeClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_f977b0dddc93b4ec, []int{2} +} +func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassList.Merge(m, src) +} +func (m *RuntimeClassList) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassList) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo -func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } -func (*RuntimeClassList) ProtoMessage() {} -func (*RuntimeClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *Scheduling) Reset() { *m = Scheduling{} } +func (*Scheduling) ProtoMessage() {} +func (*Scheduling) Descriptor() ([]byte, []int) { + return fileDescriptor_f977b0dddc93b4ec, []int{3} +} +func (m *Scheduling) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scheduling) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scheduling.Merge(m, src) +} +func (m *Scheduling) XXX_Size() int { + return m.Size() +} +func (m *Scheduling) XXX_DiscardUnknown() { + xxx_messageInfo_Scheduling.DiscardUnknown(m) +} + +var xxx_messageInfo_Scheduling proto.InternalMessageInfo func init() { + proto.RegisterType((*Overhead)(nil), "k8s.io.api.node.v1beta1.Overhead") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.api.node.v1beta1.Overhead.PodFixedEntry") proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1beta1.RuntimeClass") proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1beta1.RuntimeClassList") + proto.RegisterType((*Scheduling)(nil), "k8s.io.api.node.v1beta1.Scheduling") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.node.v1beta1.Scheduling.NodeSelectorEntry") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto", fileDescriptor_f977b0dddc93b4ec) } + +var fileDescriptor_f977b0dddc93b4ec = []byte{ + // 666 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xbd, 0x6f, 0xd3, 0x40, + 0x14, 0xcf, 0xa5, 0x54, 0x4d, 0x2f, 0x29, 0x14, 0x53, 0x89, 0x28, 0x83, 0x53, 0x82, 0x90, 0xca, + 0xd0, 0x33, 0xad, 0x00, 0x55, 0x2c, 0x20, 0xf3, 0x21, 0x3e, 0x5b, 0x70, 0x61, 0x41, 0x0c, 0x5c, + 0xec, 0x87, 0x63, 0x12, 0xfb, 0xa2, 0xf3, 0x39, 0x22, 0x1b, 0x62, 0x41, 0x62, 0x62, 0xe1, 0xbf, + 0x81, 0xbd, 0x1b, 0x5d, 0x90, 0x3a, 0xb5, 0x34, 0xfc, 0x17, 0x4c, 0xe8, 0xec, 0x73, 0x72, 0x6d, + 0x9a, 0xb6, 0x6c, 0xbe, 0xf3, 0xef, 0xe3, 0xbd, 0xdf, 0xbb, 0x87, 0xef, 0xb4, 0xd7, 0x62, 0x12, + 0x30, 0xab, 0x9d, 0x34, 0x81, 0x47, 0x20, 0x20, 0xb6, 0x7a, 0x10, 0x79, 0x8c, 0x5b, 0xea, 0x07, + 0xed, 0x06, 0x56, 0xc4, 0x3c, 0xb0, 0x7a, 0x2b, 0x4d, 0x10, 0x74, 0xc5, 0xf2, 0x21, 0x02, 0x4e, + 0x05, 0x78, 0xa4, 0xcb, 0x99, 0x60, 0xc6, 0xc5, 0x0c, 0x48, 0x68, 0x37, 0x20, 0x12, 0x48, 0x14, + 0xb0, 0xb6, 0xec, 0x07, 0xa2, 0x95, 0x34, 0x89, 0xcb, 0x42, 0xcb, 0x67, 0x3e, 0xb3, 0x52, 0x7c, + 0x33, 0x79, 0x97, 0x9e, 0xd2, 0x43, 0xfa, 0x95, 0xe9, 0xd4, 0x1a, 0x9a, 0xa1, 0xcb, 0xb8, 0x34, + 0x3c, 0xec, 0x55, 0xbb, 0x3e, 0xc2, 0x84, 0xd4, 0x6d, 0x05, 0x11, 0xf0, 0xbe, 0xd5, 0x6d, 0xfb, + 0x29, 0x89, 0x43, 0xcc, 0x12, 0xee, 0xc2, 0x7f, 0xb1, 0x62, 0x2b, 0x04, 0x41, 0x8f, 0xf2, 0xb2, + 0x26, 0xb1, 0x78, 0x12, 0x89, 0x20, 0x1c, 0xb7, 0xb9, 0x79, 0x12, 0x21, 0x76, 0x5b, 0x10, 0xd2, + 0xc3, 0xbc, 0xc6, 0xcf, 0x22, 0x2e, 0x6d, 0xf4, 0x80, 0xb7, 0x80, 0x7a, 0xc6, 0x2f, 0x84, 0x4b, + 0x5d, 0xe6, 0x3d, 0x08, 0x3e, 0x80, 0x57, 0x45, 0x8b, 0x53, 0x4b, 0xe5, 0x55, 0x8b, 0x4c, 0x48, + 0x98, 0xe4, 0x2c, 0xf2, 0x5c, 0x31, 0xee, 0x47, 0x82, 0xf7, 0xed, 0xcf, 0x68, 0x6b, 0xb7, 0x5e, + 0x18, 0xec, 0xd6, 0x4b, 0xf9, 0xfd, 0xdf, 0xdd, 0x7a, 0x7d, 0x3c, 0x5e, 0xe2, 0xa8, 0xc4, 0x9e, + 0x06, 0xb1, 0xf8, 0xb4, 0x77, 0x2c, 0x64, 0x9d, 0x86, 0xf0, 0x65, 0xaf, 0xbe, 0x7c, 0x9a, 0x01, + 0x90, 0x17, 0x09, 0x8d, 0x44, 0x20, 0xfa, 0xce, 0xb0, 0x95, 0x5a, 0x1b, 0xcf, 0x1d, 0x28, 0xd2, + 0x98, 0xc7, 0x53, 0x6d, 0xe8, 0x57, 0xd1, 0x22, 0x5a, 0x9a, 0x75, 0xe4, 0xa7, 0x71, 0x0f, 0x4f, + 0xf7, 0x68, 0x27, 0x81, 0x6a, 0x71, 0x11, 0x2d, 0x95, 0x57, 0x89, 0xd6, 0xf6, 0xd0, 0x8b, 0x74, + 0xdb, 0x7e, 0x9a, 0xc3, 0xb8, 0x57, 0x46, 0xbe, 0x55, 0x5c, 0x43, 0x8d, 0x1f, 0x45, 0x5c, 0x71, + 0xb2, 0xd0, 0xef, 0x76, 0x68, 0x1c, 0x1b, 0x6f, 0x71, 0x49, 0x8e, 0xd9, 0xa3, 0x82, 0xa6, 0x8e, + 0xe5, 0xd5, 0x6b, 0xc7, 0xa9, 0xc7, 0x44, 0xa2, 0x49, 0x6f, 0x85, 0x6c, 0x34, 0xdf, 0x83, 0x2b, + 0x9e, 0x81, 0xa0, 0xb6, 0xa1, 0x42, 0xc5, 0xa3, 0x3b, 0x67, 0xa8, 0x6a, 0x5c, 0xc5, 0x33, 0x2d, + 0x1a, 0x79, 0x1d, 0xe0, 0x69, 0xf9, 0xb3, 0xf6, 0x39, 0x05, 0x9f, 0x79, 0x98, 0x5d, 0x3b, 0xf9, + 0x7f, 0xe3, 0x09, 0x2e, 0x31, 0x35, 0xb8, 0xea, 0x54, 0x5a, 0xcc, 0xa5, 0x13, 0x27, 0x6c, 0x57, + 0xe4, 0x38, 0xf3, 0x93, 0x33, 0x14, 0x30, 0x36, 0x31, 0x96, 0xcf, 0xca, 0x4b, 0x3a, 0x41, 0xe4, + 0x57, 0xcf, 0xa4, 0x72, 0x97, 0x27, 0xca, 0x6d, 0x0e, 0xa1, 0xf6, 0x59, 0xd9, 0xca, 0xe8, 0xec, + 0x68, 0x32, 0x8d, 0xef, 0x08, 0xcf, 0xeb, 0xf9, 0xc9, 0xf7, 0x61, 0xbc, 0x19, 0xcb, 0x90, 0x9c, + 0x2e, 0x43, 0xc9, 0x4e, 0x13, 0x9c, 0xcf, 0x9f, 0x65, 0x7e, 0xa3, 0xe5, 0xf7, 0x18, 0x4f, 0x07, + 0x02, 0xc2, 0xb8, 0x5a, 0x4c, 0xdf, 0xfc, 0x95, 0x89, 0x2d, 0xe8, 0x75, 0xd9, 0x73, 0x4a, 0x71, + 0xfa, 0x91, 0xe4, 0x3a, 0x99, 0x44, 0xe3, 0x5b, 0x11, 0x6b, 0x9d, 0x19, 0x0c, 0x57, 0xa4, 0xc2, + 0x26, 0x74, 0xc0, 0x15, 0x8c, 0xab, 0xad, 0xba, 0x71, 0x8a, 0x90, 0xc8, 0xba, 0xc6, 0xcb, 0x76, + 0x6b, 0x41, 0x39, 0x56, 0xf4, 0x5f, 0xce, 0x01, 0x03, 0xe3, 0x15, 0x2e, 0x0b, 0xd6, 0x91, 0x3b, + 0x1e, 0xb0, 0x28, 0xef, 0xc8, 0xd4, 0xfd, 0xe4, 0x76, 0xc9, 0x68, 0x5e, 0x0e, 0x61, 0xf6, 0x05, + 0x25, 0x5c, 0x1e, 0xdd, 0xc5, 0x8e, 0xae, 0x53, 0xbb, 0x8d, 0xcf, 0x8f, 0xd5, 0x73, 0xc4, 0x1a, + 0x2d, 0xe8, 0x6b, 0x34, 0xab, 0xad, 0x85, 0xbd, 0xbc, 0xb5, 0x6f, 0x16, 0xb6, 0xf7, 0xcd, 0xc2, + 0xce, 0xbe, 0x59, 0xf8, 0x38, 0x30, 0xd1, 0xd6, 0xc0, 0x44, 0xdb, 0x03, 0x13, 0xed, 0x0c, 0x4c, + 0xf4, 0x7b, 0x60, 0xa2, 0xaf, 0x7f, 0xcc, 0xc2, 0xeb, 0x19, 0x95, 0xc3, 0xbf, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x5b, 0xcf, 0x13, 0x0c, 0x1b, 0x06, 0x00, 0x00, +} + +func (m *Overhead) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Overhead) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Overhead) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodFixed) > 0 { + keysForPodFixed := make([]string, 0, len(m.PodFixed)) + for k := range m.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + for iNdEx := len(keysForPodFixed) - 1; iNdEx >= 0; iNdEx-- { + v := m.PodFixed[k8s_io_api_core_v1.ResourceName(keysForPodFixed[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForPodFixed[iNdEx]) + copy(dAtA[i:], keysForPodFixed[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPodFixed[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -72,29 +282,61 @@ func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { } func (m *RuntimeClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Scheduling != nil { + { + size, err := m.Scheduling.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - i += n1 - dAtA[i] = 0x12 - i++ + if m.Overhead != nil { + { + size, err := m.Overhead.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.Handler) + copy(dAtA[i:], m.Handler) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Handler))) - i += copy(dAtA[i:], m.Handler) - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -102,53 +344,157 @@ func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { } func (m *RuntimeClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + return len(dAtA) - i, nil +} + +func (m *Scheduling) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { - return 0, err + return nil, err } - i += n2 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *Scheduling) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } +func (m *Overhead) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PodFixed) > 0 { + for k, v := range m.PodFixed { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + func (m *RuntimeClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) l = len(m.Handler) n += 1 + l + sovGenerated(uint64(l)) + if m.Overhead != nil { + l = m.Overhead.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Scheduling != nil { + l = m.Scheduling.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *RuntimeClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -162,26 +508,64 @@ func (m *RuntimeClassList) Size() (n int) { return n } -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break +func (m *Scheduling) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } return n } + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *Overhead) String() string { + if this == nil { + return "nil" + } + keysForPodFixed := make([]string, 0, len(this.PodFixed)) + for k := range this.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + mapStringForPodFixed := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForPodFixed { + mapStringForPodFixed += fmt.Sprintf("%v: %v,", k, this.PodFixed[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForPodFixed += "}" + s := strings.Join([]string{`&Overhead{`, + `PodFixed:` + mapStringForPodFixed + `,`, + `}`, + }, "") + return s +} func (this *RuntimeClass) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&RuntimeClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Handler:` + fmt.Sprintf("%v", this.Handler) + `,`, + `Overhead:` + strings.Replace(this.Overhead.String(), "Overhead", "Overhead", 1) + `,`, + `Scheduling:` + strings.Replace(this.Scheduling.String(), "Scheduling", "Scheduling", 1) + `,`, `}`, }, "") return s @@ -190,9 +574,40 @@ func (this *RuntimeClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RuntimeClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RuntimeClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Scheduling) String() string { + if this == nil { + return "nil" + } + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTolerations += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&Scheduling{`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, `}`, }, "") return s @@ -205,6 +620,188 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *Overhead) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Overhead: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Overhead: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodFixed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodFixed == nil { + m.PodFixed = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PodFixed[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *RuntimeClass) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -220,7 +817,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -248,7 +845,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -257,6 +854,9 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -278,7 +878,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -288,11 +888,86 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Handler = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Overhead == nil { + m.Overhead = &Overhead{} + } + if err := m.Overhead.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheduling", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scheduling == nil { + m.Scheduling = &Scheduling{} + } + if err := m.Scheduling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -302,6 +977,9 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -329,7 +1007,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -357,7 +1035,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -366,6 +1044,9 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -387,7 +1068,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -396,6 +1077,9 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -413,6 +1097,223 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scheduling) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scheduling: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scheduling: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, v11.Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -479,10 +1380,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -511,6 +1415,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -529,36 +1436,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 389 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xcd, 0x6a, 0xdb, 0x40, - 0x14, 0x85, 0x35, 0x2e, 0xc6, 0xae, 0xdc, 0x52, 0xa3, 0x4d, 0x8d, 0x17, 0x63, 0x63, 0x28, 0xb8, - 0x0b, 0xcf, 0xd4, 0xa6, 0x94, 0x2e, 0x8b, 0xba, 0x69, 0x4b, 0x4b, 0x41, 0xcb, 0x90, 0x45, 0x46, - 0xd2, 0x8d, 0x34, 0x91, 0xa5, 0x11, 0x9a, 0x91, 0x20, 0xbb, 0x3c, 0x42, 0xf6, 0x79, 0x95, 0x3c, - 0x80, 0x97, 0x5e, 0x7a, 0x65, 0x62, 0xe5, 0x45, 0x82, 0x7e, 0xfc, 0x43, 0x8c, 0x49, 0x76, 0xba, - 0xe7, 0x9e, 0x73, 0xee, 0x87, 0x18, 0xfd, 0x47, 0xf0, 0x5d, 0x12, 0x2e, 0x68, 0x90, 0xda, 0x90, - 0x44, 0xa0, 0x40, 0xd2, 0x0c, 0x22, 0x57, 0x24, 0xb4, 0x5e, 0xb0, 0x98, 0xd3, 0x48, 0xb8, 0x40, - 0xb3, 0xa9, 0x0d, 0x8a, 0x4d, 0xa9, 0x07, 0x11, 0x24, 0x4c, 0x81, 0x4b, 0xe2, 0x44, 0x28, 0x61, - 0x7c, 0xac, 0x8c, 0x84, 0xc5, 0x9c, 0x14, 0x46, 0x52, 0x1b, 0xfb, 0x13, 0x8f, 0x2b, 0x3f, 0xb5, - 0x89, 0x23, 0x42, 0xea, 0x09, 0x4f, 0xd0, 0xd2, 0x6f, 0xa7, 0x97, 0xe5, 0x54, 0x0e, 0xe5, 0x57, - 0xd5, 0xd3, 0xff, 0xba, 0x3f, 0x18, 0x32, 0xc7, 0xe7, 0x11, 0x24, 0xd7, 0x34, 0x0e, 0xbc, 0x42, - 0x90, 0x34, 0x04, 0xc5, 0x68, 0x76, 0x74, 0xbd, 0x4f, 0x4f, 0xa5, 0x92, 0x34, 0x52, 0x3c, 0x84, - 0xa3, 0xc0, 0xb7, 0x97, 0x02, 0xd2, 0xf1, 0x21, 0x64, 0xcf, 0x73, 0xa3, 0x3b, 0xa4, 0xbf, 0xb3, - 0x2a, 0xcb, 0xcf, 0x39, 0x93, 0xd2, 0xb8, 0xd0, 0xdb, 0x05, 0x94, 0xcb, 0x14, 0xeb, 0xa1, 0x21, - 0x1a, 0x77, 0x66, 0x5f, 0xc8, 0xfe, 0x57, 0xec, 0xba, 0x49, 0x1c, 0x78, 0x85, 0x20, 0x49, 0xe1, - 0x26, 0xd9, 0x94, 0xfc, 0xb7, 0xaf, 0xc0, 0x51, 0xff, 0x40, 0x31, 0xd3, 0x58, 0xac, 0x07, 0x5a, - 0xbe, 0x1e, 0xe8, 0x7b, 0xcd, 0xda, 0xb5, 0x1a, 0x9f, 0xf5, 0x96, 0xcf, 0x22, 0x77, 0x0e, 0x49, - 0xaf, 0x31, 0x44, 0xe3, 0xb7, 0xe6, 0x87, 0xda, 0xde, 0xfa, 0x55, 0xc9, 0xd6, 0x76, 0x3f, 0xba, - 0x47, 0x7a, 0xf7, 0x90, 0xee, 0x2f, 0x97, 0xca, 0x38, 0x3f, 0x22, 0x24, 0xaf, 0x23, 0x2c, 0xd2, - 0x25, 0x5f, 0xb7, 0x3e, 0xd8, 0xde, 0x2a, 0x07, 0x74, 0x7f, 0xf4, 0x26, 0x57, 0x10, 0xca, 0x5e, - 0x63, 0xf8, 0x66, 0xdc, 0x99, 0x7d, 0x22, 0x27, 0xde, 0x01, 0x39, 0xe4, 0x32, 0xdf, 0xd7, 0x8d, - 0xcd, 0xdf, 0x45, 0xd6, 0xaa, 0x2a, 0xcc, 0xc9, 0x62, 0x83, 0xb5, 0xe5, 0x06, 0x6b, 0xab, 0x0d, - 0xd6, 0x6e, 0x72, 0x8c, 0x16, 0x39, 0x46, 0xcb, 0x1c, 0xa3, 0x55, 0x8e, 0xd1, 0x43, 0x8e, 0xd1, - 0xed, 0x23, 0xd6, 0xce, 0x5a, 0x75, 0xe3, 0x53, 0x00, 0x00, 0x00, 0xff, 0xff, 0x93, 0x68, 0xe5, - 0x0d, 0xb5, 0x02, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/node/v1beta1/generated.proto b/vendor/k8s.io/api/node/v1beta1/generated.proto index 9082fbd33..49166798d 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.proto +++ b/vendor/k8s.io/api/node/v1beta1/generated.proto @@ -21,6 +21,8 @@ syntax = 'proto2'; package k8s.io.api.node.v1beta1; +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -28,6 +30,13 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; +// Overhead structure represents the resource overhead associated with running a pod. +message Overhead { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + map podFixed = 1; +} + // RuntimeClass defines a class of container runtime supported in the cluster. // The RuntimeClass is used to determine which container runtime is used to run // all containers in a pod. RuntimeClasses are (currently) manually defined by a @@ -36,7 +45,7 @@ option go_package = "v1beta1"; // pod. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md message RuntimeClass { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -51,12 +60,26 @@ message RuntimeClass { // The Handler must conform to the DNS Label (RFC 1123) requirements, and is // immutable. optional string handler = 2; + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // +optional + optional Overhead overhead = 3; + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + optional Scheduling scheduling = 4; } // RuntimeClassList is a list of RuntimeClass objects. message RuntimeClassList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -64,3 +87,22 @@ message RuntimeClassList { repeated RuntimeClass items = 2; } +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +message Scheduling { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + map nodeSelector = 1; + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + repeated k8s.io.api.core.v1.Toleration tolerations = 2; +} + diff --git a/vendor/k8s.io/api/node/v1beta1/types.go b/vendor/k8s.io/api/node/v1beta1/types.go index 993c6e506..793a48f62 100644 --- a/vendor/k8s.io/api/node/v1beta1/types.go +++ b/vendor/k8s.io/api/node/v1beta1/types.go @@ -17,6 +17,7 @@ limitations under the License. package v1beta1 import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,7 +34,7 @@ import ( // https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md type RuntimeClass struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -48,6 +49,46 @@ type RuntimeClass struct { // The Handler must conform to the DNS Label (RFC 1123) requirements, and is // immutable. Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // +optional + Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + Scheduling *Scheduling `json:"scheduling,omitempty" protobuf:"bytes,4,opt,name=scheduling"` +} + +// Overhead structure represents the resource overhead associated with running a pod. +type Overhead struct { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +type Scheduling struct { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,2,rep,name=tolerations"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -56,7 +97,7 @@ type RuntimeClass struct { type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go index 8bfa304e7..681f73f23 100644 --- a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go @@ -27,10 +27,21 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Overhead = map[string]string{ + "": "Overhead structure represents the resource overhead associated with running a pod.", + "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", +} + +func (Overhead) SwaggerDoc() map[string]string { + return map_Overhead +} + var map_RuntimeClass = map[string]string{ - "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.", + "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -39,7 +50,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -47,4 +58,14 @@ func (RuntimeClassList) SwaggerDoc() map[string]string { return map_RuntimeClassList } +var map_Scheduling = map[string]string{ + "": "Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass.", + "nodeSelector": "nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.", + "tolerations": "tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.", +} + +func (Scheduling) SwaggerDoc() map[string]string { + return map_Scheduling +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go index 98b5d480a..c3989528b 100644 --- a/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go @@ -21,14 +21,48 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Overhead) DeepCopyInto(out *Overhead) { + *out = *in + if in.PodFixed != nil { + in, out := &in.PodFixed, &out.PodFixed + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Overhead. +func (in *Overhead) DeepCopy() *Overhead { + if in == nil { + return nil + } + out := new(Overhead) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = new(Overhead) + (*in).DeepCopyInto(*out) + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = new(Scheduling) + (*in).DeepCopyInto(*out) + } return } @@ -54,7 +88,7 @@ func (in *RuntimeClass) DeepCopyObject() runtime.Object { func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RuntimeClass, len(*in)) @@ -82,3 +116,33 @@ func (in *RuntimeClassList) DeepCopyObject() runtime.Object { } return nil } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scheduling) DeepCopyInto(out *Scheduling) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduling. +func (in *Scheduling) DeepCopy() *Scheduling { + if in == nil { + return nil + } + out := new(Scheduling) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go index deeeac696..5b57f699c 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go @@ -17,49 +17,26 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto - - It has these top-level messages: - AllowedCSIDriver - AllowedFlexVolume - AllowedHostPath - Eviction - FSGroupStrategyOptions - HostPortRange - IDRange - PodDisruptionBudget - PodDisruptionBudgetList - PodDisruptionBudgetSpec - PodDisruptionBudgetStatus - PodSecurityPolicy - PodSecurityPolicyList - PodSecurityPolicySpec - RunAsGroupStrategyOptions - RunAsUserStrategyOptions - SELinuxStrategyOptions - SupplementalGroupsStrategyOptions -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import strings "strings" -import reflect "reflect" - -import io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -72,85 +49,537 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } -func (*AllowedCSIDriver) ProtoMessage() {} -func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } +func (*AllowedCSIDriver) ProtoMessage() {} +func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{0} +} +func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedCSIDriver.Merge(m, src) +} +func (m *AllowedCSIDriver) XXX_Size() int { + return m.Size() +} +func (m *AllowedCSIDriver) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo + +func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } +func (*AllowedFlexVolume) ProtoMessage() {} +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{1} +} +func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedFlexVolume.Merge(m, src) +} +func (m *AllowedFlexVolume) XXX_Size() int { + return m.Size() +} +func (m *AllowedFlexVolume) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo + +func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } +func (*AllowedHostPath) ProtoMessage() {} +func (*AllowedHostPath) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{2} +} +func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedHostPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedHostPath.Merge(m, src) +} +func (m *AllowedHostPath) XXX_Size() int { + return m.Size() +} +func (m *AllowedHostPath) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo + +func (m *Eviction) Reset() { *m = Eviction{} } +func (*Eviction) ProtoMessage() {} +func (*Eviction) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{3} +} +func (m *Eviction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Eviction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Eviction) XXX_Merge(src proto.Message) { + xxx_messageInfo_Eviction.Merge(m, src) +} +func (m *Eviction) XXX_Size() int { + return m.Size() +} +func (m *Eviction) XXX_DiscardUnknown() { + xxx_messageInfo_Eviction.DiscardUnknown(m) +} + +var xxx_messageInfo_Eviction proto.InternalMessageInfo + +func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } +func (*FSGroupStrategyOptions) ProtoMessage() {} +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{4} +} +func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) +} +func (m *FSGroupStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo + +func (m *HostPortRange) Reset() { *m = HostPortRange{} } +func (*HostPortRange) ProtoMessage() {} +func (*HostPortRange) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{5} +} +func (m *HostPortRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostPortRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostPortRange.Merge(m, src) +} +func (m *HostPortRange) XXX_Size() int { + return m.Size() +} +func (m *HostPortRange) XXX_DiscardUnknown() { + xxx_messageInfo_HostPortRange.DiscardUnknown(m) +} -func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } -func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_HostPortRange proto.InternalMessageInfo -func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } -func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *IDRange) Reset() { *m = IDRange{} } +func (*IDRange) ProtoMessage() {} +func (*IDRange) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{6} +} +func (m *IDRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IDRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_IDRange.Merge(m, src) +} +func (m *IDRange) XXX_Size() int { + return m.Size() +} +func (m *IDRange) XXX_DiscardUnknown() { + xxx_messageInfo_IDRange.DiscardUnknown(m) +} -func (m *Eviction) Reset() { *m = Eviction{} } -func (*Eviction) ProtoMessage() {} -func (*Eviction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_IDRange proto.InternalMessageInfo -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } +func (*PodDisruptionBudget) ProtoMessage() {} +func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{7} +} +func (m *PodDisruptionBudget) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDisruptionBudget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDisruptionBudget) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDisruptionBudget.Merge(m, src) +} +func (m *PodDisruptionBudget) XXX_Size() int { + return m.Size() +} +func (m *PodDisruptionBudget) XXX_DiscardUnknown() { + xxx_messageInfo_PodDisruptionBudget.DiscardUnknown(m) +} -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_PodDisruptionBudget proto.InternalMessageInfo -func (m *IDRange) Reset() { *m = IDRange{} } -func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } +func (*PodDisruptionBudgetList) ProtoMessage() {} +func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{8} +} +func (m *PodDisruptionBudgetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDisruptionBudgetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDisruptionBudgetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDisruptionBudgetList.Merge(m, src) +} +func (m *PodDisruptionBudgetList) XXX_Size() int { + return m.Size() +} +func (m *PodDisruptionBudgetList) XXX_DiscardUnknown() { + xxx_messageInfo_PodDisruptionBudgetList.DiscardUnknown(m) +} -func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } -func (*PodDisruptionBudget) ProtoMessage() {} -func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_PodDisruptionBudgetList proto.InternalMessageInfo -func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } -func (*PodDisruptionBudgetList) ProtoMessage() {} -func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } +func (*PodDisruptionBudgetSpec) ProtoMessage() {} +func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{9} +} +func (m *PodDisruptionBudgetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDisruptionBudgetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDisruptionBudgetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDisruptionBudgetSpec.Merge(m, src) +} +func (m *PodDisruptionBudgetSpec) XXX_Size() int { + return m.Size() +} +func (m *PodDisruptionBudgetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodDisruptionBudgetSpec.DiscardUnknown(m) +} -func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } -func (*PodDisruptionBudgetSpec) ProtoMessage() {} -func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_PodDisruptionBudgetSpec proto.InternalMessageInfo func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } func (*PodDisruptionBudgetStatus) ProtoMessage() {} func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} + return fileDescriptor_014060e454a820dc, []int{10} +} +func (m *PodDisruptionBudgetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDisruptionBudgetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDisruptionBudgetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDisruptionBudgetStatus.Merge(m, src) +} +func (m *PodDisruptionBudgetStatus) XXX_Size() int { + return m.Size() +} +func (m *PodDisruptionBudgetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodDisruptionBudgetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodDisruptionBudgetStatus proto.InternalMessageInfo + +func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } +func (*PodSecurityPolicy) ProtoMessage() {} +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{11} +} +func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicy.Merge(m, src) +} +func (m *PodSecurityPolicy) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo + +func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } +func (*PodSecurityPolicyList) ProtoMessage() {} +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{12} +} +func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) +} +func (m *PodSecurityPolicyList) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) } -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } +func (*PodSecurityPolicySpec) ProtoMessage() {} +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{13} +} +func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) +} +func (m *PodSecurityPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) +} -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } func (*RunAsGroupStrategyOptions) ProtoMessage() {} func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{14} + return fileDescriptor_014060e454a820dc, []int{14} +} +func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) +} +func (m *RunAsGroupStrategyOptions) XXX_Size() int { + return m.Size() } +func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{15} + return fileDescriptor_014060e454a820dc, []int{15} +} +func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) +} +func (m *RunAsUserStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo + +func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } +func (*RuntimeClassStrategyOptions) ProtoMessage() {} +func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{16} +} +func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) +} +func (m *RuntimeClassStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) } -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo + +func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } +func (*SELinuxStrategyOptions) ProtoMessage() {} +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{17} +} +func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) +} +func (m *SELinuxStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{17} + return fileDescriptor_014060e454a820dc, []int{18} +} +func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { + return m.Size() } +func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo func init() { proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.policy.v1beta1.AllowedCSIDriver") @@ -164,18 +593,147 @@ func init() { proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetList") proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetSpec") proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus") + proto.RegisterMapType((map[string]v1.Time)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus.DisruptedPodsEntry") proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicy") proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicyList") proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicySpec") proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsGroupStrategyOptions") proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsUserStrategyOptions") + proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RuntimeClassStrategyOptions") proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SupplementalGroupsStrategyOptions") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto", fileDescriptor_014060e454a820dc) +} + +var fileDescriptor_014060e454a820dc = []byte{ + // 1883 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xdd, 0x6e, 0x1b, 0xc7, + 0x15, 0xd6, 0x9a, 0xfa, 0xa1, 0x46, 0x3f, 0x16, 0x47, 0x3f, 0x5e, 0x2b, 0x35, 0xd7, 0xd9, 0x00, + 0x85, 0x9b, 0x26, 0xcb, 0x58, 0x76, 0x5c, 0xa3, 0x69, 0x8b, 0x68, 0x45, 0xc9, 0x56, 0x60, 0x59, + 0xec, 0xd0, 0x0e, 0xda, 0xc2, 0x2d, 0x3a, 0xe4, 0x8e, 0xa8, 0x8d, 0x96, 0xbb, 0xdb, 0x99, 0x59, + 0x46, 0xbc, 0xeb, 0x45, 0x2f, 0x7a, 0xd9, 0x17, 0x08, 0xfa, 0x00, 0x45, 0xaf, 0xfa, 0x12, 0x0e, + 0x50, 0x04, 0xb9, 0x0c, 0x7a, 0x41, 0xd4, 0x2c, 0xfa, 0x12, 0xbe, 0x0a, 0x76, 0x38, 0xbb, 0xe4, + 0xfe, 0x91, 0x76, 0x00, 0xfb, 0x8e, 0x3b, 0xe7, 0xfb, 0xbe, 0x33, 0x73, 0xe6, 0xcc, 0x99, 0xc3, + 0x01, 0xe6, 0xc5, 0x7d, 0x66, 0xd8, 0x5e, 0xed, 0x22, 0x68, 0x11, 0xea, 0x12, 0x4e, 0x58, 0xad, + 0x47, 0x5c, 0xcb, 0xa3, 0x35, 0x69, 0xc0, 0xbe, 0x5d, 0xf3, 0x3d, 0xc7, 0x6e, 0xf7, 0x6b, 0xbd, + 0xdb, 0x2d, 0xc2, 0xf1, 0xed, 0x5a, 0x87, 0xb8, 0x84, 0x62, 0x4e, 0x2c, 0xc3, 0xa7, 0x1e, 0xf7, + 0xe0, 0xf5, 0x11, 0xd4, 0xc0, 0xbe, 0x6d, 0x8c, 0xa0, 0x86, 0x84, 0xee, 0x7e, 0xd8, 0xb1, 0xf9, + 0x79, 0xd0, 0x32, 0xda, 0x5e, 0xb7, 0xd6, 0xf1, 0x3a, 0x5e, 0x4d, 0x30, 0x5a, 0xc1, 0x99, 0xf8, + 0x12, 0x1f, 0xe2, 0xd7, 0x48, 0x69, 0x57, 0x9f, 0x70, 0xda, 0xf6, 0x28, 0xa9, 0xf5, 0x32, 0xde, + 0x76, 0xef, 0x8e, 0x31, 0x5d, 0xdc, 0x3e, 0xb7, 0x5d, 0x42, 0xfb, 0x35, 0xff, 0xa2, 0x13, 0x0e, + 0xb0, 0x5a, 0x97, 0x70, 0x9c, 0xc7, 0xaa, 0x15, 0xb1, 0x68, 0xe0, 0x72, 0xbb, 0x4b, 0x32, 0x84, + 0x7b, 0xb3, 0x08, 0xac, 0x7d, 0x4e, 0xba, 0x38, 0xc3, 0xbb, 0x53, 0xc4, 0x0b, 0xb8, 0xed, 0xd4, + 0x6c, 0x97, 0x33, 0x4e, 0xd3, 0x24, 0xfd, 0x2e, 0xd8, 0xd8, 0x77, 0x1c, 0xef, 0x4b, 0x62, 0x1d, + 0x34, 0x8f, 0xeb, 0xd4, 0xee, 0x11, 0x0a, 0x6f, 0x82, 0x79, 0x17, 0x77, 0x89, 0xaa, 0xdc, 0x54, + 0x6e, 0x2d, 0x9b, 0xab, 0xcf, 0x07, 0xda, 0xdc, 0x70, 0xa0, 0xcd, 0x3f, 0xc6, 0x5d, 0x82, 0x84, + 0x45, 0xff, 0x04, 0x54, 0x24, 0xeb, 0xc8, 0x21, 0x97, 0x9f, 0x7b, 0x4e, 0xd0, 0x25, 0xf0, 0xc7, + 0x60, 0xd1, 0x12, 0x02, 0x92, 0xb8, 0x2e, 0x89, 0x8b, 0x23, 0x59, 0x24, 0xad, 0x3a, 0x03, 0x57, + 0x25, 0xf9, 0xa1, 0xc7, 0x78, 0x03, 0xf3, 0x73, 0xb8, 0x07, 0x80, 0x8f, 0xf9, 0x79, 0x83, 0x92, + 0x33, 0xfb, 0x52, 0xd2, 0xa1, 0xa4, 0x83, 0x46, 0x6c, 0x41, 0x13, 0x28, 0xf8, 0x01, 0x28, 0x53, + 0x82, 0xad, 0x53, 0xd7, 0xe9, 0xab, 0x57, 0x6e, 0x2a, 0xb7, 0xca, 0xe6, 0x86, 0x64, 0x94, 0x91, + 0x1c, 0x47, 0x31, 0x42, 0xff, 0x8f, 0x02, 0xca, 0x87, 0x3d, 0xbb, 0xcd, 0x6d, 0xcf, 0x85, 0x7f, + 0x04, 0xe5, 0x70, 0xb7, 0x2c, 0xcc, 0xb1, 0x70, 0xb6, 0xb2, 0xf7, 0x91, 0x31, 0xce, 0xa4, 0x38, + 0x78, 0x86, 0x7f, 0xd1, 0x09, 0x07, 0x98, 0x11, 0xa2, 0x8d, 0xde, 0x6d, 0xe3, 0xb4, 0xf5, 0x05, + 0x69, 0xf3, 0x13, 0xc2, 0xf1, 0x78, 0x7a, 0xe3, 0x31, 0x14, 0xab, 0x42, 0x07, 0xac, 0x59, 0xc4, + 0x21, 0x9c, 0x9c, 0xfa, 0xa1, 0x47, 0x26, 0x66, 0xb8, 0xb2, 0x77, 0xe7, 0xd5, 0xdc, 0xd4, 0x27, + 0xa9, 0x66, 0x65, 0x38, 0xd0, 0xd6, 0x12, 0x43, 0x28, 0x29, 0xae, 0x7f, 0xa5, 0x80, 0x9d, 0xa3, + 0xe6, 0x03, 0xea, 0x05, 0x7e, 0x93, 0x87, 0xbb, 0xdb, 0xe9, 0x4b, 0x13, 0xfc, 0x19, 0x98, 0xa7, + 0x81, 0x13, 0xed, 0xe5, 0x7b, 0xd1, 0x5e, 0xa2, 0xc0, 0x21, 0x2f, 0x07, 0xda, 0x66, 0x8a, 0xf5, + 0xa4, 0xef, 0x13, 0x24, 0x08, 0xf0, 0x33, 0xb0, 0x48, 0xb1, 0xdb, 0x21, 0xe1, 0xd4, 0x4b, 0xb7, + 0x56, 0xf6, 0x74, 0xa3, 0xf0, 0xac, 0x19, 0xc7, 0x75, 0x14, 0x42, 0xc7, 0x3b, 0x2e, 0x3e, 0x19, + 0x92, 0x0a, 0xfa, 0x09, 0x58, 0x13, 0x5b, 0xed, 0x51, 0x2e, 0x2c, 0xf0, 0x06, 0x28, 0x75, 0x6d, + 0x57, 0x4c, 0x6a, 0xc1, 0x5c, 0x91, 0xac, 0xd2, 0x89, 0xed, 0xa2, 0x70, 0x5c, 0x98, 0xf1, 0xa5, + 0x88, 0xd9, 0xa4, 0x19, 0x5f, 0xa2, 0x70, 0x5c, 0x7f, 0x00, 0x96, 0xa4, 0xc7, 0x49, 0xa1, 0xd2, + 0x74, 0xa1, 0x52, 0x8e, 0xd0, 0x3f, 0xae, 0x80, 0xcd, 0x86, 0x67, 0xd5, 0x6d, 0x46, 0x03, 0x11, + 0x2f, 0x33, 0xb0, 0x3a, 0x84, 0xbf, 0x85, 0xfc, 0x78, 0x02, 0xe6, 0x99, 0x4f, 0xda, 0x32, 0x2d, + 0xf6, 0xa6, 0xc4, 0x36, 0x67, 0x7e, 0x4d, 0x9f, 0xb4, 0xc7, 0xc7, 0x32, 0xfc, 0x42, 0x42, 0x0d, + 0x3e, 0x03, 0x8b, 0x8c, 0x63, 0x1e, 0x30, 0xb5, 0x24, 0x74, 0xef, 0xbe, 0xa6, 0xae, 0xe0, 0x8e, + 0x77, 0x71, 0xf4, 0x8d, 0xa4, 0xa6, 0xfe, 0x6f, 0x05, 0x5c, 0xcb, 0x61, 0x3d, 0xb2, 0x19, 0x87, + 0xcf, 0x32, 0x11, 0x33, 0x5e, 0x2d, 0x62, 0x21, 0x5b, 0xc4, 0x2b, 0x3e, 0xbc, 0xd1, 0xc8, 0x44, + 0xb4, 0x9a, 0x60, 0xc1, 0xe6, 0xa4, 0x1b, 0xa5, 0xa2, 0xf1, 0x7a, 0xcb, 0x32, 0xd7, 0xa4, 0xf4, + 0xc2, 0x71, 0x28, 0x82, 0x46, 0x5a, 0xfa, 0x37, 0x57, 0x72, 0x97, 0x13, 0x86, 0x13, 0x9e, 0x81, + 0xd5, 0xae, 0xed, 0xee, 0xf7, 0xb0, 0xed, 0xe0, 0x96, 0x3c, 0x3d, 0xd3, 0x92, 0x20, 0xac, 0xb0, + 0xc6, 0xa8, 0xc2, 0x1a, 0xc7, 0x2e, 0x3f, 0xa5, 0x4d, 0x4e, 0x6d, 0xb7, 0x63, 0x6e, 0x0c, 0x07, + 0xda, 0xea, 0xc9, 0x84, 0x12, 0x4a, 0xe8, 0xc2, 0xdf, 0x83, 0x32, 0x23, 0x0e, 0x69, 0x73, 0x8f, + 0xbe, 0x5e, 0x85, 0x78, 0x84, 0x5b, 0xc4, 0x69, 0x4a, 0xaa, 0xb9, 0x1a, 0xc6, 0x2d, 0xfa, 0x42, + 0xb1, 0x24, 0x74, 0xc0, 0x7a, 0x17, 0x5f, 0x3e, 0x75, 0x71, 0xbc, 0x90, 0xd2, 0x0f, 0x5c, 0x08, + 0x1c, 0x0e, 0xb4, 0xf5, 0x93, 0x84, 0x16, 0x4a, 0x69, 0xeb, 0xff, 0x9f, 0x07, 0xd7, 0x0b, 0xb3, + 0x0a, 0x7e, 0x06, 0xa0, 0xd7, 0x62, 0x84, 0xf6, 0x88, 0xf5, 0x60, 0x74, 0x07, 0xd9, 0x5e, 0x74, + 0x70, 0x77, 0xe5, 0x06, 0xc1, 0xd3, 0x0c, 0x02, 0xe5, 0xb0, 0xe0, 0x5f, 0x14, 0xb0, 0x66, 0x8d, + 0xdc, 0x10, 0xab, 0xe1, 0x59, 0x51, 0x62, 0x3c, 0xf8, 0x21, 0xf9, 0x6e, 0xd4, 0x27, 0x95, 0x0e, + 0x5d, 0x4e, 0xfb, 0xe6, 0xb6, 0x9c, 0xd0, 0x5a, 0xc2, 0x86, 0x92, 0x4e, 0xe1, 0x09, 0x80, 0x56, + 0x2c, 0xc9, 0xe4, 0x9d, 0x26, 0x42, 0xbc, 0x60, 0xde, 0x90, 0x0a, 0xdb, 0x09, 0xbf, 0x11, 0x08, + 0xe5, 0x10, 0xe1, 0xaf, 0xc0, 0x7a, 0x3b, 0xa0, 0x94, 0xb8, 0xfc, 0x21, 0xc1, 0x0e, 0x3f, 0xef, + 0xab, 0xf3, 0x42, 0x6a, 0x47, 0x4a, 0xad, 0x1f, 0x24, 0xac, 0x28, 0x85, 0x0e, 0xf9, 0x16, 0x61, + 0x36, 0x25, 0x56, 0xc4, 0x5f, 0x48, 0xf2, 0xeb, 0x09, 0x2b, 0x4a, 0xa1, 0xe1, 0x7d, 0xb0, 0x4a, + 0x2e, 0x7d, 0xd2, 0x8e, 0x62, 0xba, 0x28, 0xd8, 0x5b, 0x92, 0xbd, 0x7a, 0x38, 0x61, 0x43, 0x09, + 0xe4, 0xae, 0x03, 0x60, 0x36, 0x88, 0x70, 0x03, 0x94, 0x2e, 0x48, 0x7f, 0x74, 0xf3, 0xa0, 0xf0, + 0x27, 0xfc, 0x14, 0x2c, 0xf4, 0xb0, 0x13, 0x10, 0x99, 0xeb, 0xef, 0xbf, 0x5a, 0xae, 0x3f, 0xb1, + 0xbb, 0x04, 0x8d, 0x88, 0x3f, 0xbf, 0x72, 0x5f, 0xd1, 0xbf, 0x56, 0x40, 0xa5, 0xe1, 0x59, 0x4d, + 0xd2, 0x0e, 0xa8, 0xcd, 0xfb, 0x0d, 0xb1, 0xcf, 0x6f, 0xa1, 0x66, 0xa3, 0x44, 0xcd, 0xfe, 0x68, + 0x7a, 0xae, 0x25, 0x67, 0x57, 0x54, 0xb1, 0xf5, 0xe7, 0x0a, 0xd8, 0xce, 0xa0, 0xdf, 0x42, 0x45, + 0xfd, 0x75, 0xb2, 0xa2, 0x7e, 0xf0, 0x3a, 0x8b, 0x29, 0xa8, 0xa7, 0x5f, 0x57, 0x72, 0x96, 0x22, + 0xaa, 0x69, 0xd8, 0xdd, 0x51, 0xbb, 0x67, 0x3b, 0xa4, 0x43, 0x2c, 0xb1, 0x98, 0xf2, 0x44, 0x77, + 0x17, 0x5b, 0xd0, 0x04, 0x0a, 0x32, 0xb0, 0x63, 0x91, 0x33, 0x1c, 0x38, 0x7c, 0xdf, 0xb2, 0x0e, + 0xb0, 0x8f, 0x5b, 0xb6, 0x63, 0x73, 0x5b, 0xb6, 0x23, 0xcb, 0xe6, 0x27, 0xc3, 0x81, 0xb6, 0x53, + 0xcf, 0x45, 0xbc, 0x1c, 0x68, 0x37, 0xb2, 0xdd, 0xbc, 0x11, 0x43, 0xfa, 0xa8, 0x40, 0x1a, 0xf6, + 0x81, 0x4a, 0xc9, 0x9f, 0x82, 0xf0, 0x50, 0xd4, 0xa9, 0xe7, 0x27, 0xdc, 0x96, 0x84, 0xdb, 0x5f, + 0x0e, 0x07, 0x9a, 0x8a, 0x0a, 0x30, 0xb3, 0x1d, 0x17, 0xca, 0xc3, 0x2f, 0xc0, 0x26, 0x96, 0x7d, + 0xf8, 0xa4, 0xd7, 0x79, 0xe1, 0xf5, 0xfe, 0x70, 0xa0, 0x6d, 0xee, 0x67, 0xcd, 0xb3, 0x1d, 0xe6, + 0x89, 0xc2, 0x1a, 0x58, 0xea, 0x89, 0x96, 0x9d, 0xa9, 0x0b, 0x42, 0x7f, 0x7b, 0x38, 0xd0, 0x96, + 0x46, 0x5d, 0x7c, 0xa8, 0xb9, 0x78, 0xd4, 0x14, 0x8d, 0x60, 0x84, 0x82, 0x1f, 0x83, 0x95, 0x73, + 0x8f, 0xf1, 0xc7, 0x84, 0x7f, 0xe9, 0xd1, 0x0b, 0x51, 0x18, 0xca, 0xe6, 0xa6, 0xdc, 0xc1, 0x95, + 0x87, 0x63, 0x13, 0x9a, 0xc4, 0xc1, 0xdf, 0x82, 0xe5, 0x73, 0xd9, 0xf6, 0x31, 0x75, 0x49, 0x24, + 0xda, 0xad, 0x29, 0x89, 0x96, 0x68, 0x11, 0xcd, 0x8a, 0x94, 0x5f, 0x8e, 0x86, 0x19, 0x1a, 0xab, + 0xc1, 0x9f, 0x80, 0x25, 0xf1, 0x71, 0x5c, 0x57, 0xcb, 0x62, 0x36, 0x57, 0x25, 0x7c, 0xe9, 0xe1, + 0x68, 0x18, 0x45, 0xf6, 0x08, 0x7a, 0xdc, 0x38, 0x50, 0x97, 0xb3, 0xd0, 0xe3, 0xc6, 0x01, 0x8a, + 0xec, 0xf0, 0x19, 0x58, 0x62, 0xe4, 0x91, 0xed, 0x06, 0x97, 0x2a, 0x10, 0x47, 0xee, 0xf6, 0x94, + 0xe9, 0x36, 0x0f, 0x05, 0x32, 0xd5, 0x70, 0x8f, 0xd5, 0xa5, 0x1d, 0x45, 0x92, 0xd0, 0x02, 0xcb, + 0x34, 0x70, 0xf7, 0xd9, 0x53, 0x46, 0xa8, 0xba, 0x92, 0xb9, 0xed, 0xd3, 0xfa, 0x28, 0xc2, 0xa6, + 0x3d, 0xc4, 0x91, 0x89, 0x11, 0x68, 0x2c, 0x0c, 0x2d, 0x00, 0xc4, 0x87, 0xe8, 0xeb, 0xd5, 0x9d, + 0x99, 0x7d, 0x20, 0x8a, 0xc1, 0x69, 0x3f, 0xeb, 0xe1, 0xf1, 0x1c, 0x9b, 0xd1, 0x84, 0x2e, 0xfc, + 0xab, 0x02, 0x20, 0x0b, 0x7c, 0xdf, 0x21, 0x5d, 0xe2, 0x72, 0xec, 0x88, 0x51, 0xa6, 0xae, 0x0a, + 0x77, 0xbf, 0x98, 0x16, 0xb5, 0x0c, 0x29, 0xed, 0x36, 0x6e, 0x06, 0xb2, 0x50, 0x94, 0xe3, 0x33, + 0xdc, 0xb4, 0x33, 0xb9, 0xda, 0xb5, 0x99, 0x9b, 0x96, 0xff, 0x2f, 0x69, 0xbc, 0x69, 0xd2, 0x8e, + 0x22, 0x49, 0xf8, 0x39, 0xd8, 0x89, 0xfe, 0x43, 0x22, 0xcf, 0xe3, 0x47, 0xb6, 0x43, 0x58, 0x9f, + 0x71, 0xd2, 0x55, 0xd7, 0x45, 0x32, 0x55, 0x25, 0x73, 0x07, 0xe5, 0xa2, 0x50, 0x01, 0x1b, 0x76, + 0x81, 0x16, 0x15, 0xa1, 0xf0, 0x84, 0xc6, 0x55, 0xf0, 0x90, 0xb5, 0xb1, 0x33, 0xea, 0x8d, 0xae, + 0x0a, 0x07, 0xef, 0x0d, 0x07, 0x9a, 0x56, 0x9f, 0x0e, 0x45, 0xb3, 0xb4, 0xe0, 0x6f, 0x80, 0x8a, + 0x8b, 0xfc, 0x6c, 0x08, 0x3f, 0x3f, 0x0a, 0x2b, 0x5b, 0xa1, 0x83, 0x42, 0x36, 0xf4, 0xc1, 0x06, + 0x4e, 0xfe, 0x9b, 0x67, 0x6a, 0x45, 0x9c, 0xf5, 0xf7, 0xa7, 0xec, 0x43, 0xea, 0x01, 0xc0, 0x54, + 0x65, 0x18, 0x37, 0x52, 0x06, 0x86, 0x32, 0xea, 0xf0, 0x12, 0x40, 0x9c, 0x7e, 0x7c, 0x60, 0x2a, + 0x9c, 0x79, 0x91, 0x65, 0x5e, 0x2c, 0xc6, 0xa9, 0x96, 0x31, 0x31, 0x94, 0xe3, 0x03, 0x72, 0x50, + 0xc1, 0xa9, 0xc7, 0x12, 0xa6, 0x5e, 0x13, 0x8e, 0x7f, 0x3a, 0xdb, 0x71, 0xcc, 0x31, 0xaf, 0x4b, + 0xbf, 0x95, 0xb4, 0x85, 0xa1, 0xac, 0x03, 0xf8, 0x08, 0x6c, 0xc9, 0xc1, 0xa7, 0x2e, 0xc3, 0x67, + 0xa4, 0xd9, 0x67, 0x6d, 0xee, 0x30, 0x75, 0x53, 0xd4, 0x6e, 0x75, 0x38, 0xd0, 0xb6, 0xf6, 0x73, + 0xec, 0x28, 0x97, 0x05, 0x3f, 0x05, 0x1b, 0x67, 0x1e, 0x6d, 0xd9, 0x96, 0x45, 0xdc, 0x48, 0x69, + 0x4b, 0x28, 0x6d, 0x85, 0xf1, 0x3f, 0x4a, 0xd9, 0x50, 0x06, 0x0d, 0x19, 0xd8, 0x96, 0xca, 0x0d, + 0xea, 0xb5, 0x4f, 0xbc, 0xc0, 0xe5, 0xe1, 0x75, 0xc1, 0xd4, 0xed, 0xf8, 0x8a, 0xdc, 0xde, 0xcf, + 0x03, 0xbc, 0x1c, 0x68, 0x37, 0x73, 0xae, 0xab, 0x04, 0x08, 0xe5, 0x6b, 0x43, 0x07, 0xac, 0xca, + 0xe7, 0xaf, 0x03, 0x07, 0x33, 0xa6, 0xaa, 0xe2, 0xa8, 0xdf, 0x9b, 0x5e, 0xd8, 0x62, 0x78, 0xfa, + 0xbc, 0x8b, 0xff, 0x65, 0x93, 0x00, 0x94, 0x50, 0xd7, 0xff, 0xae, 0x80, 0xeb, 0x85, 0x85, 0x11, + 0xde, 0x4b, 0xbc, 0xa9, 0xe8, 0xa9, 0x37, 0x15, 0x98, 0x25, 0xbe, 0x81, 0x27, 0x95, 0xaf, 0x14, + 0xa0, 0x16, 0xdd, 0x10, 0xf0, 0xe3, 0xc4, 0x04, 0xdf, 0x4d, 0x4d, 0xb0, 0x92, 0xe1, 0xbd, 0x81, + 0xf9, 0x7d, 0xa3, 0x80, 0x77, 0xa6, 0xec, 0x40, 0x5c, 0x90, 0x88, 0x35, 0x89, 0x7a, 0x8c, 0xc3, + 0xa3, 0xac, 0x88, 0x3c, 0x1a, 0x17, 0xa4, 0x1c, 0x0c, 0x2a, 0x64, 0xc3, 0xa7, 0xe0, 0x9a, 0xac, + 0x86, 0x69, 0x9b, 0xe8, 0xdc, 0x97, 0xcd, 0x77, 0x86, 0x03, 0xed, 0x5a, 0x3d, 0x1f, 0x82, 0x8a, + 0xb8, 0xfa, 0x3f, 0x15, 0xb0, 0x93, 0x7f, 0xe5, 0xc3, 0x3b, 0x89, 0x70, 0x6b, 0xa9, 0x70, 0x5f, + 0x4d, 0xb1, 0x64, 0xb0, 0xff, 0x00, 0xd6, 0x65, 0x63, 0x90, 0x7c, 0x22, 0x4c, 0x04, 0x3d, 0x3c, + 0x22, 0x61, 0x4f, 0x2f, 0x25, 0xa2, 0xf4, 0x15, 0xff, 0xc6, 0x93, 0x63, 0x28, 0xa5, 0xa6, 0xff, + 0x4b, 0x01, 0xef, 0xce, 0xbc, 0x6c, 0xa1, 0x99, 0x98, 0xba, 0x91, 0x9a, 0x7a, 0xb5, 0x58, 0xe0, + 0xcd, 0xbc, 0x14, 0x9a, 0x1f, 0x3e, 0x7f, 0x51, 0x9d, 0xfb, 0xf6, 0x45, 0x75, 0xee, 0xbb, 0x17, + 0xd5, 0xb9, 0x3f, 0x0f, 0xab, 0xca, 0xf3, 0x61, 0x55, 0xf9, 0x76, 0x58, 0x55, 0xbe, 0x1b, 0x56, + 0x95, 0xff, 0x0e, 0xab, 0xca, 0xdf, 0xfe, 0x57, 0x9d, 0xfb, 0xdd, 0x92, 0x94, 0xfb, 0x3e, 0x00, + 0x00, 0xff, 0xff, 0x48, 0x23, 0x7b, 0x0e, 0x44, 0x18, 0x00, 0x00, +} + func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -183,21 +741,27 @@ func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { } func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -205,21 +769,27 @@ func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { } func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -227,29 +797,35 @@ func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { } func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i += copy(dAtA[i:], m.PathPrefix) - dAtA[i] = 0x10 - i++ + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x10 + i -= len(m.PathPrefix) + copy(dAtA[i:], m.PathPrefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Eviction) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -257,35 +833,44 @@ func (m *Eviction) Marshal() (dAtA []byte, err error) { } func (m *Eviction) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Eviction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 if m.DeleteOptions != nil { + { + size, err := m.DeleteOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeleteOptions.Size())) - n2, err := m.DeleteOptions.MarshalTo(dAtA[i:]) + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n2 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -293,33 +878,41 @@ func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HostPortRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -327,23 +920,28 @@ func (m *HostPortRange) Marshal() (dAtA []byte, err error) { } func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - return i, nil + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *IDRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -351,23 +949,28 @@ func (m *IDRange) Marshal() (dAtA []byte, err error) { } func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - return i, nil + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -375,41 +978,52 @@ func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) { } func (m *PodDisruptionBudget) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDisruptionBudget) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n4, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n5, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodDisruptionBudgetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -417,37 +1031,46 @@ func (m *PodDisruptionBudgetList) Marshal() (dAtA []byte, err error) { } func (m *PodDisruptionBudgetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDisruptionBudgetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodDisruptionBudgetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -455,47 +1078,58 @@ func (m *PodDisruptionBudgetSpec) Marshal() (dAtA []byte, err error) { } func (m *PodDisruptionBudgetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDisruptionBudgetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.MinAvailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinAvailable.Size())) - n7, err := m.MinAvailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- + dAtA[i] = 0x1a } if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n8, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0x12 } - if m.MaxUnavailable != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n9, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MinAvailable != nil { + { + size, err := m.MinAvailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *PodDisruptionBudgetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -503,63 +1137,66 @@ func (m *PodDisruptionBudgetStatus) Marshal() (dAtA []byte, err error) { } func (m *PodDisruptionBudgetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDisruptionBudgetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i = encodeVarintGenerated(dAtA, i, uint64(m.ExpectedPods)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredHealthy)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentHealthy)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.PodDisruptionsAllowed)) + i-- + dAtA[i] = 0x18 if len(m.DisruptedPods) > 0 { keysForDisruptedPods := make([]string, 0, len(m.DisruptedPods)) for k := range m.DisruptedPods { keysForDisruptedPods = append(keysForDisruptedPods, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) - for _, k := range keysForDisruptedPods { + for iNdEx := len(keysForDisruptedPods) - 1; iNdEx >= 0; iNdEx-- { + v := m.DisruptedPods[string(keysForDisruptedPods[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - v := m.DisruptedPods[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForDisruptedPods[iNdEx]) + copy(dAtA[i:], keysForDisruptedPods[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDisruptedPods[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n10, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 } } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodDisruptionsAllowed)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentHealthy)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredHealthy)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ExpectedPods)) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -567,33 +1204,42 @@ func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n11, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n12, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n12 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -601,37 +1247,46 @@ func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n13, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -639,288 +1294,283 @@ func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.RuntimeClass != nil { + { + size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.AllowedCSIDrivers) > 0 { + for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba } } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.RunAsGroup != nil { + { + size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.AllowedProcMountTypes) > 0 { + for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedProcMountTypes[iNdEx]) + copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa } } - dAtA[i] = 0x30 - i++ - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.ForbiddenSysctls) > 0 { + for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ForbiddenSysctls[iNdEx]) + copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } } - i++ - if len(m.HostPorts) > 0 { - for _, msg := range m.HostPorts { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AllowedUnsafeSysctls) > 0 { + for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedUnsafeSysctls[iNdEx]) + copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + } + if len(m.AllowedFlexVolumes) > 0 { + for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 } } - dAtA[i] = 0x40 - i++ - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.AllowedHostPaths) > 0 { + for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } } - i++ - dAtA[i] = 0x48 - i++ - if m.HostIPC { + if m.AllowPrivilegeEscalation != nil { + i-- + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.DefaultAllowPrivilegeEscalation != nil { + i-- + if *m.DefaultAllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 + } + i-- + if m.ReadOnlyRootFilesystem { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinux.Size())) - n14, err := m.SELinux.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x70 + { + size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) - n15, err := m.RunAsUser.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x6a + { + size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 + i-- dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) - n16, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) - n17, err := m.FSGroup.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x5a + { + size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 - dAtA[i] = 0x70 - i++ - if m.ReadOnlyRootFilesystem { + i-- + dAtA[i] = 0x52 + i-- + if m.HostIPC { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.DefaultAllowPrivilegeEscalation != nil { - dAtA[i] = 0x78 - i++ - if *m.DefaultAllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.AllowPrivilegeEscalation != nil { - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + i-- + dAtA[i] = 0x48 + i-- + if m.HostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.AllowedHostPaths) > 0 { - for _, msg := range m.AllowedHostPaths { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x40 + if len(m.HostPorts) > 0 { + for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x3a } } - if len(m.AllowedFlexVolumes) > 0 { - for _, msg := range m.AllowedFlexVolumes { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i-- + if m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.AllowedUnsafeSysctls) > 0 { - for _, s := range m.AllowedUnsafeSysctls { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x30 + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Volumes[iNdEx]) + copy(dAtA[i:], m.Volumes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) + i-- + dAtA[i] = 0x2a } } - if len(m.ForbiddenSysctls) > 0 { - for _, s := range m.ForbiddenSysctls { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.AllowedCapabilities) > 0 { + for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedCapabilities[iNdEx]) + copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x22 } } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.RequiredDropCapabilities) > 0 { + for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequiredDropCapabilities[iNdEx]) + copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - if m.RunAsGroup != nil { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsGroup.Size())) - n18, err := m.RunAsGroup.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.DefaultAddCapabilities) > 0 { + for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DefaultAddCapabilities[iNdEx]) + copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x12 } - i += n18 } - if len(m.AllowedCSIDrivers) > 0 { - for _, msg := range m.AllowedCSIDrivers { - dAtA[i] = 0xba - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i-- + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -928,33 +1578,41 @@ func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -962,33 +1620,80 @@ func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DefaultRuntimeClassName != nil { + i -= len(*m.DefaultRuntimeClassName) + copy(dAtA[i:], *m.DefaultRuntimeClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) + i-- + dAtA[i] = 0x12 + } + if len(m.AllowedRuntimeClassNames) > 0 { + for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedRuntimeClassNames[iNdEx]) + copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -996,31 +1701,39 @@ func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if m.SELinuxOptions != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n19, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n19 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1028,39 +1741,52 @@ func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AllowedCSIDriver) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1069,6 +1795,9 @@ func (m *AllowedCSIDriver) Size() (n int) { } func (m *AllowedFlexVolume) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Driver) @@ -1077,6 +1806,9 @@ func (m *AllowedFlexVolume) Size() (n int) { } func (m *AllowedHostPath) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.PathPrefix) @@ -1086,6 +1818,9 @@ func (m *AllowedHostPath) Size() (n int) { } func (m *Eviction) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1098,6 +1833,9 @@ func (m *Eviction) Size() (n int) { } func (m *FSGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -1112,6 +1850,9 @@ func (m *FSGroupStrategyOptions) Size() (n int) { } func (m *HostPortRange) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Min)) @@ -1120,6 +1861,9 @@ func (m *HostPortRange) Size() (n int) { } func (m *IDRange) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Min)) @@ -1128,6 +1872,9 @@ func (m *IDRange) Size() (n int) { } func (m *PodDisruptionBudget) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1140,6 +1887,9 @@ func (m *PodDisruptionBudget) Size() (n int) { } func (m *PodDisruptionBudgetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1154,6 +1904,9 @@ func (m *PodDisruptionBudgetList) Size() (n int) { } func (m *PodDisruptionBudgetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MinAvailable != nil { @@ -1172,6 +1925,9 @@ func (m *PodDisruptionBudgetSpec) Size() (n int) { } func (m *PodDisruptionBudgetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -1192,6 +1948,9 @@ func (m *PodDisruptionBudgetStatus) Size() (n int) { } func (m *PodSecurityPolicy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1202,6 +1961,9 @@ func (m *PodSecurityPolicy) Size() (n int) { } func (m *PodSecurityPolicyList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1216,6 +1978,9 @@ func (m *PodSecurityPolicyList) Size() (n int) { } func (m *PodSecurityPolicySpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -1307,10 +2072,17 @@ func (m *PodSecurityPolicySpec) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } + if m.RuntimeClass != nil { + l = m.RuntimeClass.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } func (m *RunAsGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -1325,6 +2097,9 @@ func (m *RunAsGroupStrategyOptions) Size() (n int) { } func (m *RunAsUserStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -1338,7 +2113,29 @@ func (m *RunAsUserStrategyOptions) Size() (n int) { return n } +func (m *RuntimeClassStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AllowedRuntimeClassNames) > 0 { + for _, s := range m.AllowedRuntimeClassNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultRuntimeClassName != nil { + l = len(*m.DefaultRuntimeClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *SELinuxStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -1351,6 +2148,9 @@ func (m *SELinuxStrategyOptions) Size() (n int) { } func (m *SupplementalGroupsStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -1365,14 +2165,7 @@ func (m *SupplementalGroupsStrategyOptions) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1413,8 +2206,8 @@ func (this *Eviction) String() string { return "nil" } s := strings.Join([]string{`&Eviction{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "v1.DeleteOptions", 1) + `,`, `}`, }, "") return s @@ -1423,9 +2216,14 @@ func (this *FSGroupStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&FSGroupStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -1457,7 +2255,7 @@ func (this *PodDisruptionBudget) String() string { return "nil" } s := strings.Join([]string{`&PodDisruptionBudget{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodDisruptionBudgetSpec", "PodDisruptionBudgetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodDisruptionBudgetStatus", "PodDisruptionBudgetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1468,9 +2266,14 @@ func (this *PodDisruptionBudgetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PodDisruptionBudget{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PodDisruptionBudgetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1480,9 +2283,9 @@ func (this *PodDisruptionBudgetSpec) String() string { return "nil" } s := strings.Join([]string{`&PodDisruptionBudgetSpec{`, - `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -1496,7 +2299,7 @@ func (this *PodDisruptionBudgetStatus) String() string { keysForDisruptedPods = append(keysForDisruptedPods, k) } github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) - mapStringForDisruptedPods := "map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time{" + mapStringForDisruptedPods := "map[string]v1.Time{" for _, k := range keysForDisruptedPods { mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k]) } @@ -1517,7 +2320,7 @@ func (this *PodSecurityPolicy) String() string { return "nil" } s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -1527,9 +2330,14 @@ func (this *PodSecurityPolicyList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PodSecurityPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1538,6 +2346,26 @@ func (this *PodSecurityPolicySpec) String() string { if this == nil { return "nil" } + repeatedStringForHostPorts := "[]HostPortRange{" + for _, f := range this.HostPorts { + repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," + } + repeatedStringForHostPorts += "}" + repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" + for _, f := range this.AllowedHostPaths { + repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedHostPaths += "}" + repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" + for _, f := range this.AllowedFlexVolumes { + repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedFlexVolumes += "}" + repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" + for _, f := range this.AllowedCSIDrivers { + repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedCSIDrivers += "}" s := strings.Join([]string{`&PodSecurityPolicySpec{`, `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, @@ -1545,7 +2373,7 @@ func (this *PodSecurityPolicySpec) String() string { `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostPorts), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + `,`, + `HostPorts:` + repeatedStringForHostPorts + `,`, `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, @@ -1555,13 +2383,14 @@ func (this *PodSecurityPolicySpec) String() string { `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `AllowedHostPaths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedHostPaths), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + `,`, - `AllowedFlexVolumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedFlexVolumes), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + `,`, + `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, + `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, - `RunAsGroup:` + strings.Replace(fmt.Sprintf("%v", this.RunAsGroup), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, - `AllowedCSIDrivers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedCSIDrivers), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + `,`, + `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, + `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, + `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, `}`, }, "") return s @@ -1570,9 +2399,14 @@ func (this *RunAsGroupStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -1581,9 +2415,25 @@ func (this *RunAsUserStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&RunAsUserStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, + `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, + `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, `}`, }, "") return s @@ -1594,7 +2444,7 @@ func (this *SELinuxStrategyOptions) String() string { } s := strings.Join([]string{`&SELinuxStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "k8s_io_api_core_v1.SELinuxOptions", 1) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, `}`, }, "") return s @@ -1603,9 +2453,14 @@ func (this *SupplementalGroupsStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -1633,7 +2488,7 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1661,7 +2516,7 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1671,6 +2526,9 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1685,6 +2543,9 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1712,7 +2573,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1740,7 +2601,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1750,6 +2611,9 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1764,6 +2628,9 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1791,7 +2658,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1819,7 +2686,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1829,6 +2696,9 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1848,7 +2718,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1863,6 +2733,9 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1890,7 +2763,7 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1918,7 +2791,7 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1927,6 +2800,9 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1948,7 +2824,7 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1957,11 +2833,14 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.DeleteOptions == nil { - m.DeleteOptions = &k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions{} + m.DeleteOptions = &v1.DeleteOptions{} } if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1976,6 +2855,9 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2003,7 +2885,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2031,7 +2913,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2041,6 +2923,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2060,7 +2945,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2069,6 +2954,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2086,6 +2974,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2113,7 +3004,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2141,7 +3032,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= (int32(b) & 0x7F) << shift + m.Min |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2160,7 +3051,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= (int32(b) & 0x7F) << shift + m.Max |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2174,6 +3065,9 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2201,7 +3095,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2229,7 +3123,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= (int64(b) & 0x7F) << shift + m.Min |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2248,7 +3142,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= (int64(b) & 0x7F) << shift + m.Max |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2262,6 +3156,9 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2289,7 +3186,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2317,7 +3214,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2326,6 +3223,9 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2347,7 +3247,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2356,6 +3256,9 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2377,7 +3280,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2386,6 +3289,9 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2402,6 +3308,9 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2429,7 +3338,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2457,7 +3366,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2466,6 +3375,9 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2487,7 +3399,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2496,6 +3408,9 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2513,6 +3428,9 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2540,7 +3458,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2568,7 +3486,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2577,11 +3495,14 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MinAvailable == nil { - m.MinAvailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MinAvailable = &intstr.IntOrString{} } if err := m.MinAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2601,7 +3522,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2610,11 +3531,14 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2634,7 +3558,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2643,11 +3567,14 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2662,6 +3589,9 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2689,7 +3619,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2717,7 +3647,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2736,7 +3666,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2745,14 +3675,17 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.DisruptedPods == nil { - m.DisruptedPods = make(map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time) + m.DisruptedPods = make(map[string]v1.Time) } var mapkey string - mapvalue := &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + mapvalue := &v1.Time{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -2765,7 +3698,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2782,7 +3715,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2792,6 +3725,9 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -2808,7 +3744,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2817,13 +3753,13 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + mapvalue = &v1.Time{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -2859,7 +3795,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.PodDisruptionsAllowed |= (int32(b) & 0x7F) << shift + m.PodDisruptionsAllowed |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2878,7 +3814,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentHealthy |= (int32(b) & 0x7F) << shift + m.CurrentHealthy |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2897,7 +3833,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredHealthy |= (int32(b) & 0x7F) << shift + m.DesiredHealthy |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2916,7 +3852,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ExpectedPods |= (int32(b) & 0x7F) << shift + m.ExpectedPods |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2930,6 +3866,9 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2957,7 +3896,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2985,7 +3924,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2994,6 +3933,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3015,7 +3957,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3024,6 +3966,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3040,6 +3985,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3067,7 +4015,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3095,7 +4043,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3104,6 +4052,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3125,7 +4076,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3134,6 +4085,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3151,6 +4105,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3178,7 +4135,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3206,7 +4163,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3226,7 +4183,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3236,6 +4193,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3255,7 +4215,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3265,6 +4225,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3284,7 +4247,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3294,6 +4257,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3313,7 +4279,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3323,6 +4289,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3342,7 +4311,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3362,7 +4331,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3371,6 +4340,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3393,7 +4365,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3413,7 +4385,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3433,7 +4405,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3442,6 +4414,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3463,7 +4438,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3472,6 +4447,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3493,7 +4471,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3502,6 +4480,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3523,7 +4504,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3532,6 +4513,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3553,7 +4537,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3573,7 +4557,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3594,7 +4578,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3615,7 +4599,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3624,6 +4608,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3646,7 +4633,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3655,6 +4642,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3677,7 +4667,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3687,6 +4677,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3706,7 +4699,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3716,6 +4709,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3735,7 +4731,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3745,6 +4741,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3764,7 +4763,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3773,6 +4772,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3797,7 +4799,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3806,6 +4808,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3814,6 +4819,42 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RuntimeClass == nil { + m.RuntimeClass = &RuntimeClassStrategyOptions{} + } + if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3823,6 +4864,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3850,7 +4894,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3878,7 +4922,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3888,6 +4932,9 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3907,7 +4954,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3916,6 +4963,9 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3933,6 +4983,9 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3960,7 +5013,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3988,7 +5041,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3998,6 +5051,9 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4017,7 +5073,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4026,6 +5082,9 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4043,6 +5102,127 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.DefaultRuntimeClassName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4070,7 +5250,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4098,7 +5278,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4108,6 +5288,9 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4127,7 +5310,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4136,11 +5319,14 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.SELinuxOptions == nil { - m.SELinuxOptions = &k8s_io_api_core_v1.SELinuxOptions{} + m.SELinuxOptions = &v11.SELinuxOptions{} } if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4155,6 +5341,9 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4182,7 +5371,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4210,7 +5399,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4220,6 +5409,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4239,7 +5431,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4248,6 +5440,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4265,6 +5460,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4331,10 +5529,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -4363,6 +5564,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -4381,125 +5585,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1809 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xdd, 0x8e, 0xdb, 0xc6, - 0x15, 0x5e, 0x7a, 0xff, 0xb4, 0xb3, 0x3f, 0xd6, 0xce, 0xfe, 0x84, 0x5e, 0xd4, 0xa2, 0xc3, 0x00, - 0x85, 0x9b, 0x26, 0x54, 0xbc, 0x76, 0x52, 0xa3, 0x69, 0x8b, 0x2c, 0x57, 0xbb, 0xf6, 0x06, 0xde, - 0xac, 0x3a, 0xb2, 0x83, 0xb6, 0x70, 0x8b, 0x8e, 0xc4, 0x59, 0xed, 0x64, 0x29, 0x92, 0x9d, 0x19, - 0x2a, 0xab, 0xbb, 0x5e, 0xf4, 0xa2, 0x97, 0x7d, 0x81, 0xa0, 0x0f, 0x50, 0xf4, 0xaa, 0x2f, 0xe1, - 0x02, 0x45, 0x91, 0xcb, 0xa0, 0x05, 0x84, 0x5a, 0x45, 0x5f, 0xc2, 0x57, 0x05, 0x47, 0x43, 0x4a, - 0xfc, 0x91, 0x64, 0x1b, 0xb0, 0xef, 0xc8, 0x39, 0xdf, 0xf7, 0x9d, 0xc3, 0x33, 0x67, 0xce, 0x0c, - 0x07, 0xd8, 0x97, 0xf7, 0xb9, 0x45, 0xfd, 0xea, 0x65, 0xd8, 0x24, 0xcc, 0x23, 0x82, 0xf0, 0x6a, - 0x97, 0x78, 0x8e, 0xcf, 0xaa, 0xca, 0x80, 0x03, 0x5a, 0x0d, 0x7c, 0x97, 0xb6, 0x7a, 0xd5, 0xee, - 0x9d, 0x26, 0x11, 0xf8, 0x4e, 0xb5, 0x4d, 0x3c, 0xc2, 0xb0, 0x20, 0x8e, 0x15, 0x30, 0x5f, 0xf8, - 0xf0, 0xc6, 0x10, 0x6a, 0xe1, 0x80, 0x5a, 0x43, 0xa8, 0xa5, 0xa0, 0x7b, 0x1f, 0xb6, 0xa9, 0xb8, - 0x08, 0x9b, 0x56, 0xcb, 0xef, 0x54, 0xdb, 0x7e, 0xdb, 0xaf, 0x4a, 0x46, 0x33, 0x3c, 0x97, 0x6f, - 0xf2, 0x45, 0x3e, 0x0d, 0x95, 0xf6, 0xcc, 0x31, 0xa7, 0x2d, 0x9f, 0x91, 0x6a, 0x37, 0xe7, 0x6d, - 0xef, 0xde, 0x08, 0xd3, 0xc1, 0xad, 0x0b, 0xea, 0x11, 0xd6, 0xab, 0x06, 0x97, 0xed, 0x68, 0x80, - 0x57, 0x3b, 0x44, 0xe0, 0x22, 0x56, 0x75, 0x12, 0x8b, 0x85, 0x9e, 0xa0, 0x1d, 0x92, 0x23, 0x7c, - 0x32, 0x8b, 0xc0, 0x5b, 0x17, 0xa4, 0x83, 0x73, 0xbc, 0xbb, 0x93, 0x78, 0xa1, 0xa0, 0x6e, 0x95, - 0x7a, 0x82, 0x0b, 0x96, 0x25, 0x99, 0xf7, 0x40, 0xf9, 0xc0, 0x75, 0xfd, 0xaf, 0x89, 0x73, 0xd8, - 0x38, 0xa9, 0x31, 0xda, 0x25, 0x0c, 0xde, 0x02, 0x0b, 0x1e, 0xee, 0x10, 0x5d, 0xbb, 0xa5, 0xdd, - 0x5e, 0xb1, 0xd7, 0x9e, 0xf5, 0x8d, 0xb9, 0x41, 0xdf, 0x58, 0xf8, 0x02, 0x77, 0x08, 0x92, 0x16, - 0xf3, 0x53, 0xb0, 0xa9, 0x58, 0xc7, 0x2e, 0xb9, 0xfa, 0xd2, 0x77, 0xc3, 0x0e, 0x81, 0xdf, 0x07, - 0x4b, 0x8e, 0x14, 0x50, 0xc4, 0x0d, 0x45, 0x5c, 0x1a, 0xca, 0x22, 0x65, 0x35, 0x39, 0xb8, 0xae, - 0xc8, 0x0f, 0x7d, 0x2e, 0xea, 0x58, 0x5c, 0xc0, 0x7d, 0x00, 0x02, 0x2c, 0x2e, 0xea, 0x8c, 0x9c, - 0xd3, 0x2b, 0x45, 0x87, 0x8a, 0x0e, 0xea, 0x89, 0x05, 0x8d, 0xa1, 0xe0, 0x07, 0xa0, 0xc4, 0x08, - 0x76, 0xce, 0x3c, 0xb7, 0xa7, 0x5f, 0xbb, 0xa5, 0xdd, 0x2e, 0xd9, 0x65, 0xc5, 0x28, 0x21, 0x35, - 0x8e, 0x12, 0x84, 0xf9, 0x2f, 0x0d, 0x94, 0x8e, 0xba, 0xb4, 0x25, 0xa8, 0xef, 0xc1, 0xdf, 0x82, - 0x52, 0x34, 0x5b, 0x0e, 0x16, 0x58, 0x3a, 0x5b, 0xdd, 0xff, 0xc8, 0x1a, 0x55, 0x52, 0x92, 0x3c, - 0x2b, 0xb8, 0x6c, 0x47, 0x03, 0xdc, 0x8a, 0xd0, 0x56, 0xf7, 0x8e, 0x75, 0xd6, 0xfc, 0x8a, 0xb4, - 0xc4, 0x29, 0x11, 0x78, 0x14, 0xde, 0x68, 0x0c, 0x25, 0xaa, 0xd0, 0x05, 0xeb, 0x0e, 0x71, 0x89, - 0x20, 0x67, 0x41, 0xe4, 0x91, 0xcb, 0x08, 0x57, 0xf7, 0xef, 0xbe, 0x9c, 0x9b, 0xda, 0x38, 0xd5, - 0xde, 0x1c, 0xf4, 0x8d, 0xf5, 0xd4, 0x10, 0x4a, 0x8b, 0x9b, 0xdf, 0x68, 0x60, 0xf7, 0xb8, 0xf1, - 0x80, 0xf9, 0x61, 0xd0, 0x10, 0xd1, 0xec, 0xb6, 0x7b, 0xca, 0x04, 0x7f, 0x04, 0x16, 0x58, 0xe8, - 0xc6, 0x73, 0xf9, 0x5e, 0x3c, 0x97, 0x28, 0x74, 0xc9, 0x8b, 0xbe, 0xb1, 0x95, 0x61, 0x3d, 0xee, - 0x05, 0x04, 0x49, 0x02, 0xfc, 0x1c, 0x2c, 0x31, 0xec, 0xb5, 0x49, 0x14, 0xfa, 0xfc, 0xed, 0xd5, - 0x7d, 0xd3, 0x9a, 0xb8, 0xd6, 0xac, 0x93, 0x1a, 0x8a, 0xa0, 0xa3, 0x19, 0x97, 0xaf, 0x1c, 0x29, - 0x05, 0xf3, 0x14, 0xac, 0xcb, 0xa9, 0xf6, 0x99, 0x90, 0x16, 0x78, 0x13, 0xcc, 0x77, 0xa8, 0x27, - 0x83, 0x5a, 0xb4, 0x57, 0x15, 0x6b, 0xfe, 0x94, 0x7a, 0x28, 0x1a, 0x97, 0x66, 0x7c, 0x25, 0x73, - 0x36, 0x6e, 0xc6, 0x57, 0x28, 0x1a, 0x37, 0x1f, 0x80, 0x65, 0xe5, 0x71, 0x5c, 0x68, 0x7e, 0xba, - 0xd0, 0x7c, 0x81, 0xd0, 0x5f, 0xae, 0x81, 0xad, 0xba, 0xef, 0xd4, 0x28, 0x67, 0xa1, 0xcc, 0x97, - 0x1d, 0x3a, 0x6d, 0x22, 0xde, 0x42, 0x7d, 0x3c, 0x06, 0x0b, 0x3c, 0x20, 0x2d, 0x55, 0x16, 0xfb, - 0x53, 0x72, 0x5b, 0x10, 0x5f, 0x23, 0x20, 0xad, 0xd1, 0xb2, 0x8c, 0xde, 0x90, 0x54, 0x83, 0x4f, - 0xc1, 0x12, 0x17, 0x58, 0x84, 0x5c, 0x9f, 0x97, 0xba, 0xf7, 0x5e, 0x51, 0x57, 0x72, 0x47, 0xb3, - 0x38, 0x7c, 0x47, 0x4a, 0xd3, 0xfc, 0x87, 0x06, 0xde, 0x29, 0x60, 0x3d, 0xa2, 0x5c, 0xc0, 0xa7, - 0xb9, 0x8c, 0x59, 0x2f, 0x97, 0xb1, 0x88, 0x2d, 0xf3, 0x95, 0x2c, 0xde, 0x78, 0x64, 0x2c, 0x5b, - 0x0d, 0xb0, 0x48, 0x05, 0xe9, 0xc4, 0xa5, 0x68, 0xbd, 0xda, 0x67, 0xd9, 0xeb, 0x4a, 0x7a, 0xf1, - 0x24, 0x12, 0x41, 0x43, 0x2d, 0xf3, 0x9f, 0xd7, 0x0a, 0x3f, 0x27, 0x4a, 0x27, 0x3c, 0x07, 0x6b, - 0x1d, 0xea, 0x1d, 0x74, 0x31, 0x75, 0x71, 0x53, 0xad, 0x9e, 0x69, 0x45, 0x10, 0x75, 0x58, 0x6b, - 0xd8, 0x61, 0xad, 0x13, 0x4f, 0x9c, 0xb1, 0x86, 0x60, 0xd4, 0x6b, 0xdb, 0xe5, 0x41, 0xdf, 0x58, - 0x3b, 0x1d, 0x53, 0x42, 0x29, 0x5d, 0xf8, 0x6b, 0x50, 0xe2, 0xc4, 0x25, 0x2d, 0xe1, 0xb3, 0x57, - 0xeb, 0x10, 0x8f, 0x70, 0x93, 0xb8, 0x0d, 0x45, 0xb5, 0xd7, 0xa2, 0xbc, 0xc5, 0x6f, 0x28, 0x91, - 0x84, 0x2e, 0xd8, 0xe8, 0xe0, 0xab, 0x27, 0x1e, 0x4e, 0x3e, 0x64, 0xfe, 0x35, 0x3f, 0x04, 0x0e, - 0xfa, 0xc6, 0xc6, 0x69, 0x4a, 0x0b, 0x65, 0xb4, 0xcd, 0xff, 0x2d, 0x80, 0x1b, 0x13, 0xab, 0x0a, - 0x7e, 0x0e, 0xa0, 0xdf, 0xe4, 0x84, 0x75, 0x89, 0xf3, 0x60, 0xb8, 0x07, 0x51, 0x3f, 0x5e, 0xb8, - 0x7b, 0x6a, 0x82, 0xe0, 0x59, 0x0e, 0x81, 0x0a, 0x58, 0xf0, 0x0f, 0x1a, 0x58, 0x77, 0x86, 0x6e, - 0x88, 0x53, 0xf7, 0x9d, 0xb8, 0x30, 0x1e, 0xbc, 0x4e, 0xbd, 0x5b, 0xb5, 0x71, 0xa5, 0x23, 0x4f, - 0xb0, 0x9e, 0xbd, 0xa3, 0x02, 0x5a, 0x4f, 0xd9, 0x50, 0xda, 0x29, 0x3c, 0x05, 0xd0, 0x49, 0x24, - 0xb9, 0xda, 0xd3, 0x64, 0x8a, 0x17, 0xed, 0x9b, 0x4a, 0x61, 0x27, 0xe5, 0x37, 0x06, 0xa1, 0x02, - 0x22, 0xfc, 0x19, 0xd8, 0x68, 0x85, 0x8c, 0x11, 0x4f, 0x3c, 0x24, 0xd8, 0x15, 0x17, 0x3d, 0x7d, - 0x41, 0x4a, 0xed, 0x2a, 0xa9, 0x8d, 0xc3, 0x94, 0x15, 0x65, 0xd0, 0x11, 0xdf, 0x21, 0x9c, 0x32, - 0xe2, 0xc4, 0xfc, 0xc5, 0x34, 0xbf, 0x96, 0xb2, 0xa2, 0x0c, 0x1a, 0xde, 0x07, 0x6b, 0xe4, 0x2a, - 0x20, 0xad, 0x38, 0xa7, 0x4b, 0x92, 0xbd, 0xad, 0xd8, 0x6b, 0x47, 0x63, 0x36, 0x94, 0x42, 0xee, - 0xb9, 0x00, 0xe6, 0x93, 0x08, 0xcb, 0x60, 0xfe, 0x92, 0xf4, 0x86, 0x3b, 0x0f, 0x8a, 0x1e, 0xe1, - 0x67, 0x60, 0xb1, 0x8b, 0xdd, 0x90, 0xa8, 0x5a, 0x7f, 0xff, 0xe5, 0x6a, 0xfd, 0x31, 0xed, 0x10, - 0x34, 0x24, 0xfe, 0xf8, 0xda, 0x7d, 0xcd, 0xfc, 0xbb, 0x06, 0x36, 0xeb, 0xbe, 0xd3, 0x20, 0xad, - 0x90, 0x51, 0xd1, 0xab, 0xcb, 0x79, 0x7e, 0x0b, 0x3d, 0x1b, 0xa5, 0x7a, 0xf6, 0x47, 0xd3, 0x6b, - 0x2d, 0x1d, 0xdd, 0xa4, 0x8e, 0x6d, 0x3e, 0xd3, 0xc0, 0x4e, 0x0e, 0xfd, 0x16, 0x3a, 0xea, 0xcf, - 0xd3, 0x1d, 0xf5, 0x83, 0x57, 0xf9, 0x98, 0x09, 0xfd, 0xf4, 0xdf, 0xe5, 0x82, 0x4f, 0x91, 0xdd, - 0x34, 0x3a, 0xdd, 0x31, 0xda, 0xa5, 0x2e, 0x69, 0x13, 0x47, 0x7e, 0x4c, 0x69, 0xec, 0x74, 0x97, - 0x58, 0xd0, 0x18, 0x0a, 0x72, 0xb0, 0xeb, 0x90, 0x73, 0x1c, 0xba, 0xe2, 0xc0, 0x71, 0x0e, 0x71, - 0x80, 0x9b, 0xd4, 0xa5, 0x82, 0xaa, 0xe3, 0xc8, 0x8a, 0xfd, 0xe9, 0xa0, 0x6f, 0xec, 0xd6, 0x0a, - 0x11, 0x2f, 0xfa, 0xc6, 0xcd, 0xfc, 0x69, 0xde, 0x4a, 0x20, 0x3d, 0x34, 0x41, 0x1a, 0xf6, 0x80, - 0xce, 0xc8, 0xef, 0xc2, 0x68, 0x51, 0xd4, 0x98, 0x1f, 0xa4, 0xdc, 0xce, 0x4b, 0xb7, 0x3f, 0x1d, - 0xf4, 0x0d, 0x1d, 0x4d, 0xc0, 0xcc, 0x76, 0x3c, 0x51, 0x1e, 0x7e, 0x05, 0xb6, 0xb0, 0x3a, 0x87, - 0x8f, 0x7b, 0x5d, 0x90, 0x5e, 0xef, 0x0f, 0xfa, 0xc6, 0xd6, 0x41, 0xde, 0x3c, 0xdb, 0x61, 0x91, - 0x28, 0xac, 0x82, 0xe5, 0xae, 0x3c, 0xb2, 0x73, 0x7d, 0x51, 0xea, 0xef, 0x0c, 0xfa, 0xc6, 0xf2, - 0xf0, 0x14, 0x1f, 0x69, 0x2e, 0x1d, 0x37, 0xe4, 0x41, 0x30, 0x46, 0xc1, 0x8f, 0xc1, 0xea, 0x85, - 0xcf, 0xc5, 0x17, 0x44, 0x7c, 0xed, 0xb3, 0x4b, 0xd9, 0x18, 0x4a, 0xf6, 0x96, 0x9a, 0xc1, 0xd5, - 0x87, 0x23, 0x13, 0x1a, 0xc7, 0xc1, 0x5f, 0x82, 0x95, 0x0b, 0x75, 0xec, 0xe3, 0xfa, 0xb2, 0x2c, - 0xb4, 0xdb, 0x53, 0x0a, 0x2d, 0x75, 0x44, 0xb4, 0x37, 0x95, 0xfc, 0x4a, 0x3c, 0xcc, 0xd1, 0x48, - 0x0d, 0xfe, 0x00, 0x2c, 0xcb, 0x97, 0x93, 0x9a, 0x5e, 0x92, 0xd1, 0x5c, 0x57, 0xf0, 0xe5, 0x87, - 0xc3, 0x61, 0x14, 0xdb, 0x63, 0xe8, 0x49, 0xfd, 0x50, 0x5f, 0xc9, 0x43, 0x4f, 0xea, 0x87, 0x28, - 0xb6, 0xc3, 0xa7, 0x60, 0x99, 0x93, 0x47, 0xd4, 0x0b, 0xaf, 0x74, 0x20, 0x97, 0xdc, 0x9d, 0x29, - 0xe1, 0x36, 0x8e, 0x24, 0x32, 0x73, 0xe0, 0x1e, 0xa9, 0x2b, 0x3b, 0x8a, 0x25, 0xa1, 0x03, 0x56, - 0x58, 0xe8, 0x1d, 0xf0, 0x27, 0x9c, 0x30, 0x7d, 0x35, 0xb7, 0xdb, 0x67, 0xf5, 0x51, 0x8c, 0xcd, - 0x7a, 0x48, 0x32, 0x93, 0x20, 0xd0, 0x48, 0x18, 0xfe, 0x51, 0x03, 0x90, 0x87, 0x41, 0xe0, 0x92, - 0x0e, 0xf1, 0x04, 0x76, 0xe5, 0xf9, 0x9e, 0xeb, 0x6b, 0xd2, 0xdf, 0x4f, 0xa6, 0x7d, 0x4f, 0x8e, - 0x94, 0x75, 0x9c, 0x6c, 0xd3, 0x79, 0x28, 0x2a, 0xf0, 0x19, 0xa5, 0xf3, 0x9c, 0xcb, 0x67, 0x7d, - 0x7d, 0x66, 0x3a, 0x8b, 0xff, 0x5f, 0x46, 0xe9, 0x54, 0x76, 0x14, 0x4b, 0xc2, 0x2f, 0xc1, 0x6e, - 0xfc, 0x77, 0x87, 0x7c, 0x5f, 0x1c, 0x53, 0x97, 0xf0, 0x1e, 0x17, 0xa4, 0xa3, 0x6f, 0xc8, 0x69, - 0xae, 0x28, 0xe6, 0x2e, 0x2a, 0x44, 0xa1, 0x09, 0x6c, 0xd8, 0x01, 0x46, 0xdc, 0x1e, 0xa2, 0xb5, - 0x93, 0xf4, 0xa7, 0x23, 0xde, 0xc2, 0xee, 0xf0, 0xd4, 0x72, 0x5d, 0x3a, 0x78, 0x6f, 0xd0, 0x37, - 0x8c, 0xda, 0x74, 0x28, 0x9a, 0xa5, 0x05, 0x7f, 0x01, 0x74, 0x3c, 0xc9, 0x4f, 0x59, 0xfa, 0xf9, - 0x5e, 0xd4, 0x73, 0x26, 0x3a, 0x98, 0xc8, 0x86, 0x01, 0x28, 0xe3, 0xf4, 0x7f, 0x36, 0xd7, 0x37, - 0xe5, 0x2a, 0x7c, 0x7f, 0xca, 0x3c, 0x64, 0x7e, 0xcd, 0x6d, 0x5d, 0xa5, 0xb1, 0x9c, 0x31, 0x70, - 0x94, 0x53, 0x87, 0x57, 0x00, 0xe2, 0xec, 0xb5, 0x00, 0xd7, 0xe1, 0xcc, 0x2d, 0x26, 0x77, 0x97, - 0x30, 0x2a, 0xb5, 0x9c, 0x89, 0xa3, 0x02, 0x1f, 0xf0, 0x11, 0xd8, 0x56, 0xa3, 0x4f, 0x3c, 0x8e, - 0xcf, 0x49, 0xa3, 0xc7, 0x5b, 0xc2, 0xe5, 0xfa, 0x96, 0xec, 0x6f, 0xfa, 0xa0, 0x6f, 0x6c, 0x1f, - 0x14, 0xd8, 0x51, 0x21, 0x0b, 0x7e, 0x06, 0xca, 0xe7, 0x3e, 0x6b, 0x52, 0xc7, 0x21, 0x5e, 0xac, - 0xb4, 0x2d, 0x95, 0xb6, 0xa3, 0x4c, 0x1c, 0x67, 0x6c, 0x28, 0x87, 0x86, 0x1c, 0xec, 0x28, 0xe5, - 0x3a, 0xf3, 0x5b, 0xa7, 0x7e, 0xe8, 0x89, 0xa8, 0xa5, 0x72, 0x7d, 0x27, 0xd9, 0x46, 0x76, 0x0e, - 0x8a, 0x00, 0x2f, 0xfa, 0xc6, 0xad, 0x82, 0x96, 0x9e, 0x02, 0xa1, 0x62, 0x6d, 0xe8, 0x00, 0x20, - 0xfb, 0xc0, 0x70, 0xc9, 0xed, 0xce, 0xfc, 0x05, 0x44, 0x09, 0x38, 0xbb, 0xea, 0x36, 0xa2, 0x9d, - 0x79, 0x64, 0x46, 0x63, 0xba, 0x50, 0x80, 0x4d, 0x9c, 0xb9, 0x31, 0xe2, 0xfa, 0x3b, 0x72, 0x8e, - 0x7f, 0x38, 0x7b, 0x8e, 0x13, 0x8e, 0x7d, 0x43, 0x4d, 0xf1, 0x66, 0xd6, 0xc2, 0x51, 0xde, 0x81, - 0xf9, 0x67, 0x0d, 0xdc, 0x98, 0x18, 0x2f, 0xfc, 0x24, 0x75, 0xcb, 0x61, 0x66, 0x6e, 0x39, 0x60, - 0x9e, 0xf8, 0x06, 0x2e, 0x39, 0xbe, 0xd1, 0x80, 0x3e, 0xa9, 0x67, 0xc3, 0x8f, 0x53, 0x01, 0xbe, - 0x9b, 0x09, 0x70, 0x33, 0xc7, 0x7b, 0x03, 0xf1, 0xfd, 0x55, 0x03, 0xbb, 0xc5, 0x7b, 0x16, 0xbc, - 0x9b, 0x8a, 0xce, 0xc8, 0x44, 0x77, 0x3d, 0xc3, 0x52, 0xb1, 0xfd, 0x06, 0x6c, 0xa8, 0x9d, 0x2d, - 0x7d, 0xc7, 0x95, 0x8a, 0x31, 0xaa, 0xdf, 0xe8, 0x50, 0xaa, 0x24, 0xe2, 0xfa, 0x92, 0xbf, 0x93, - 0xe9, 0x31, 0x94, 0x51, 0x33, 0xff, 0xa6, 0x81, 0x77, 0x67, 0xee, 0x49, 0xd0, 0x4e, 0x85, 0x6e, - 0x65, 0x42, 0xaf, 0x4c, 0x16, 0x78, 0x33, 0x57, 0x5d, 0xf6, 0x87, 0xcf, 0x9e, 0x57, 0xe6, 0xbe, - 0x7d, 0x5e, 0x99, 0xfb, 0xee, 0x79, 0x65, 0xee, 0xf7, 0x83, 0x8a, 0xf6, 0x6c, 0x50, 0xd1, 0xbe, - 0x1d, 0x54, 0xb4, 0xef, 0x06, 0x15, 0xed, 0x3f, 0x83, 0x8a, 0xf6, 0xa7, 0xff, 0x56, 0xe6, 0x7e, - 0xb5, 0xac, 0xe4, 0xfe, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x6f, 0xf9, 0x85, 0x0c, 0x05, 0x17, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index 738c82c67..7be0a7f7a 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -186,7 +186,7 @@ message PodDisruptionBudgetStatus { // that will be applied to a pod and container. message PodSecurityPolicy { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -198,7 +198,7 @@ message PodSecurityPolicy { // PodSecurityPolicyList is a list of PodSecurityPolicy objects. message PodSecurityPolicyList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -299,7 +299,8 @@ message PodSecurityPolicySpec { repeated AllowedFlexVolume allowedFlexVolumes = 18; // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value means no CSI drivers can run inline within a pod spec. + // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. + // This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate. // +optional repeated AllowedCSIDriver allowedCSIDrivers = 23; @@ -329,6 +330,12 @@ message PodSecurityPolicySpec { // This requires the ProcMountType feature flag to be enabled. // +optional repeated string allowedProcMountTypes = 21; + + // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. + // If this field is omitted, the pod's runtimeClassName field is unrestricted. + // Enforcement of this field depends on the RuntimeClass feature gate being enabled. + // +optional + optional RuntimeClassStrategyOptions runtimeClass = 24; } // RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. @@ -353,6 +360,21 @@ message RunAsUserStrategyOptions { repeated IDRange ranges = 2; } +// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses +// for a pod. +message RuntimeClassStrategyOptions { + // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. + // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the + // list. An empty list requires the RuntimeClassName field to be unset. + repeated string allowedRuntimeClassNames = 1; + + // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. + // The default MUST be allowed by the allowedRuntimeClassNames list. + // A value of nil does not mutate the Pod. + // +optional + optional string defaultRuntimeClassName = 2; +} + // SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. message SELinuxStrategyOptions { // rule is the strategy that will dictate the allowable labels that may be set. diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go index 74ddd0693..b3001bb5c 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -134,7 +134,7 @@ type Eviction struct { type PodSecurityPolicy struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -217,7 +217,8 @@ type PodSecurityPolicySpec struct { // +optional AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value means no CSI drivers can run inline within a pod spec. + // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. + // This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate. // +optional AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. @@ -244,6 +245,11 @@ type PodSecurityPolicySpec struct { // This requires the ProcMountType feature flag to be enabled. // +optional AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` + // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. + // If this field is omitted, the pod's runtimeClassName field is unrestricted. + // Enforcement of this field depends on the RuntimeClass feature gate being enabled. + // +optional + RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` } // AllowedHostPath defines the host volume conditions that will be enabled by a policy @@ -449,13 +455,32 @@ const ( SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" ) +// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses +// for a pod. +type RuntimeClassStrategyOptions struct { + // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. + // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the + // list. An empty list requires the RuntimeClassName field to be unset. + AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` + // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. + // The default MUST be allowed by the allowedRuntimeClassNames list. + // A value of nil does not mutate the Pod. + // +optional + DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` +} + +// AllowAllRuntimeClassNames can be used as a value for the +// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is +// allowed. +const AllowAllRuntimeClassNames = "*" + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodSecurityPolicyList is a list of PodSecurityPolicy objects. type PodSecurityPolicyList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index 324116d5d..0bbc48f9f 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -140,7 +140,7 @@ func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string { var map_PodSecurityPolicy = map[string]string{ "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec defines the policy enforced.", } @@ -150,7 +150,7 @@ func (PodSecurityPolicy) SwaggerDoc() map[string]string { var map_PodSecurityPolicyList = map[string]string{ "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is a list of schema objects.", } @@ -179,10 +179,11 @@ var map_PodSecurityPolicySpec = map[string]string{ "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", "allowedHostPaths": "allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.", "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", - "allowedCSIDrivers": "AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value means no CSI drivers can run inline within a pod spec.", + "allowedCSIDrivers": "AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes. This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate.", "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", "allowedProcMountTypes": "AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", + "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", } func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { @@ -209,6 +210,16 @@ func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { return map_RunAsUserStrategyOptions } +var map_RuntimeClassStrategyOptions = map[string]string{ + "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", + "allowedRuntimeClassNames": "allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", + "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", +} + +func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { + return map_RuntimeClassStrategyOptions +} + var map_SELinuxStrategyOptions = map[string]string{ "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.", "rule": "rule is the strategy that will dictate the allowable labels that may be set.", diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go index 24b078eda..75851e124 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go @@ -191,7 +191,7 @@ func (in *PodDisruptionBudget) DeepCopyObject() runtime.Object { func (in *PodDisruptionBudgetList) DeepCopyInto(out *PodDisruptionBudgetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodDisruptionBudget, len(*in)) @@ -305,7 +305,7 @@ func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodSecurityPolicy, len(*in)) @@ -411,6 +411,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]corev1.ProcMountType, len(*in)) copy(*out, *in) } + if in.RuntimeClass != nil { + in, out := &in.RuntimeClass, &out.RuntimeClass + *out = new(RuntimeClassStrategyOptions) + (*in).DeepCopyInto(*out) + } return } @@ -466,6 +471,32 @@ func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { + *out = *in + if in.AllowedRuntimeClassNames != nil { + in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DefaultRuntimeClassName != nil { + in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. +func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { + if in == nil { + return nil + } + out := new(RuntimeClassStrategyOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { *out = *in diff --git a/vendor/k8s.io/api/rbac/v1/generated.pb.go b/vendor/k8s.io/api/rbac/v1/generated.pb.go index 708db3276..9bb48fc31 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1/generated.pb.go @@ -17,38 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto - - It has these top-level messages: - AggregationRule - ClusterRole - ClusterRoleBinding - ClusterRoleBindingList - ClusterRoleList - PolicyRule - Role - RoleBinding - RoleBindingList - RoleList - RoleRef - Subject -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -61,53 +44,341 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AggregationRule) Reset() { *m = AggregationRule{} } -func (*AggregationRule) ProtoMessage() {} -func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{0} +} +func (m *AggregationRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AggregationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AggregationRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_AggregationRule.Merge(m, src) +} +func (m *AggregationRule) XXX_Size() int { + return m.Size() +} +func (m *AggregationRule) XXX_DiscardUnknown() { + xxx_messageInfo_AggregationRule.DiscardUnknown(m) +} + +var xxx_messageInfo_AggregationRule proto.InternalMessageInfo -func (m *ClusterRole) Reset() { *m = ClusterRole{} } -func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{1} +} +func (m *ClusterRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRole.Merge(m, src) +} +func (m *ClusterRole) XXX_Size() int { + return m.Size() +} +func (m *ClusterRole) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRole.DiscardUnknown(m) +} -func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } -func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_ClusterRole proto.InternalMessageInfo + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{2} +} +func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBinding.Merge(m, src) +} +func (m *ClusterRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{3} +} +func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBindingList.Merge(m, src) +} +func (m *ClusterRoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m) +} -func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } -func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo -func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } -func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{4} +} +func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleList.Merge(m, src) +} +func (m *ClusterRoleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleList.DiscardUnknown(m) +} -func (m *PolicyRule) Reset() { *m = PolicyRule{} } -func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo -func (m *Role) Reset() { *m = Role{} } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{5} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} -func (m *RoleBinding) Reset() { *m = RoleBinding{} } -func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo -func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } -func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{6} +} +func (m *Role) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Role) XXX_Merge(src proto.Message) { + xxx_messageInfo_Role.Merge(m, src) +} +func (m *Role) XXX_Size() int { + return m.Size() +} +func (m *Role) XXX_DiscardUnknown() { + xxx_messageInfo_Role.DiscardUnknown(m) +} -func (m *RoleList) Reset() { *m = RoleList{} } -func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_Role proto.InternalMessageInfo -func (m *RoleRef) Reset() { *m = RoleRef{} } -func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{7} +} +func (m *RoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBinding.Merge(m, src) +} +func (m *RoleBinding) XXX_Size() int { + return m.Size() +} +func (m *RoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBinding.DiscardUnknown(m) +} -func (m *Subject) Reset() { *m = Subject{} } -func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_RoleBinding proto.InternalMessageInfo + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{8} +} +func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingList.Merge(m, src) +} +func (m *RoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{9} +} +func (m *RoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleList.Merge(m, src) +} +func (m *RoleList) XXX_Size() int { + return m.Size() +} +func (m *RoleList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleList proto.InternalMessageInfo + +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{10} +} +func (m *RoleRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleRef.Merge(m, src) +} +func (m *RoleRef) XXX_Size() int { + return m.Size() +} +func (m *RoleRef) XXX_DiscardUnknown() { + xxx_messageInfo_RoleRef.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleRef proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{11} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo func init() { proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1.AggregationRule") @@ -123,10 +394,70 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1.Subject") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto", fileDescriptor_979ffd7b30c07419) +} + +var fileDescriptor_979ffd7b30c07419 = []byte{ + // 807 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x55, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xce, 0xa4, 0x89, 0x1a, 0x4f, 0x88, 0x42, 0x87, 0x0a, 0x59, 0x05, 0x39, 0x95, 0x91, 0x50, + 0x25, 0xc0, 0x26, 0x05, 0x01, 0x12, 0xea, 0xa1, 0x2e, 0x02, 0x55, 0x2d, 0xa5, 0x9a, 0x0a, 0x0e, + 0x88, 0x03, 0x63, 0x67, 0xea, 0x0e, 0xf1, 0x2f, 0xcd, 0xd8, 0x91, 0x2a, 0x2e, 0x08, 0x89, 0x03, + 0xb7, 0x3d, 0xee, 0xfe, 0x05, 0x7b, 0xd9, 0x3d, 0xee, 0x5f, 0xb0, 0x97, 0x1e, 0x7b, 0xec, 0x29, + 0xda, 0x7a, 0xff, 0x90, 0x5d, 0xf9, 0x57, 0x9c, 0x1f, 0xee, 0x36, 0xa7, 0x48, 0xab, 0x3d, 0xb5, + 0xf3, 0xde, 0xf7, 0xbe, 0xf7, 0xcd, 0xe7, 0x79, 0x2f, 0xf0, 0xfb, 0xe1, 0x77, 0x42, 0x63, 0xbe, + 0x3e, 0x8c, 0x4c, 0xca, 0x3d, 0x1a, 0x52, 0xa1, 0x8f, 0xa8, 0x37, 0xf0, 0xb9, 0x9e, 0x27, 0x48, + 0xc0, 0x74, 0x6e, 0x12, 0x4b, 0x1f, 0xf5, 0x75, 0x9b, 0x7a, 0x94, 0x93, 0x90, 0x0e, 0xb4, 0x80, + 0xfb, 0xa1, 0x8f, 0x50, 0x86, 0xd1, 0x48, 0xc0, 0xb4, 0x04, 0xa3, 0x8d, 0xfa, 0x5b, 0x5f, 0xd8, + 0x2c, 0xbc, 0x88, 0x4c, 0xcd, 0xf2, 0x5d, 0xdd, 0xf6, 0x6d, 0x5f, 0x4f, 0xa1, 0x66, 0x74, 0x9e, + 0x9e, 0xd2, 0x43, 0xfa, 0x5f, 0x46, 0xb1, 0xf5, 0x75, 0xd9, 0xc6, 0x25, 0xd6, 0x05, 0xf3, 0x28, + 0xbf, 0xd4, 0x83, 0xa1, 0x9d, 0x04, 0x84, 0xee, 0xd2, 0x90, 0x54, 0x34, 0xde, 0xd2, 0xef, 0xaa, + 0xe2, 0x91, 0x17, 0x32, 0x97, 0x2e, 0x14, 0x7c, 0x73, 0x5f, 0x81, 0xb0, 0x2e, 0xa8, 0x4b, 0xe6, + 0xeb, 0xd4, 0x47, 0x00, 0x76, 0xf7, 0x6d, 0x9b, 0x53, 0x9b, 0x84, 0xcc, 0xf7, 0x70, 0xe4, 0x50, + 0xf4, 0x1f, 0x80, 0x9b, 0x96, 0x13, 0x89, 0x90, 0x72, 0xec, 0x3b, 0xf4, 0x8c, 0x3a, 0xd4, 0x0a, + 0x7d, 0x2e, 0x64, 0xb0, 0xbd, 0xb6, 0xd3, 0xde, 0xfd, 0x4a, 0x2b, 0x5d, 0x99, 0xf4, 0xd2, 0x82, + 0xa1, 0x9d, 0x04, 0x84, 0x96, 0x5c, 0x49, 0x1b, 0xf5, 0xb5, 0x63, 0x62, 0x52, 0xa7, 0xa8, 0x35, + 0x3e, 0xbe, 0x1a, 0xf7, 0x6a, 0xf1, 0xb8, 0xb7, 0x79, 0x50, 0x41, 0x8c, 0x2b, 0xdb, 0xa9, 0x0f, + 0xeb, 0xb0, 0x3d, 0x05, 0x47, 0x7f, 0xc2, 0x56, 0x42, 0x3e, 0x20, 0x21, 0x91, 0xc1, 0x36, 0xd8, + 0x69, 0xef, 0x7e, 0xb9, 0x9c, 0x94, 0x5f, 0xcc, 0xbf, 0xa8, 0x15, 0xfe, 0x4c, 0x43, 0x62, 0xa0, + 0x5c, 0x07, 0x2c, 0x63, 0x78, 0xc2, 0x8a, 0x0e, 0x60, 0x93, 0x47, 0x0e, 0x15, 0x72, 0x3d, 0xbd, + 0xa9, 0xa2, 0x2d, 0x7e, 0x7f, 0xed, 0xd4, 0x77, 0x98, 0x75, 0x99, 0x18, 0x65, 0x74, 0x72, 0xb2, + 0x66, 0x72, 0x12, 0x38, 0xab, 0x45, 0x26, 0xec, 0x92, 0x59, 0x47, 0xe5, 0xb5, 0x54, 0xed, 0x27, + 0x55, 0x74, 0x73, 0xe6, 0x1b, 0x1f, 0xc4, 0xe3, 0xde, 0xfc, 0x17, 0xc1, 0xf3, 0x84, 0xea, 0xff, + 0x75, 0x88, 0xa6, 0xac, 0x31, 0x98, 0x37, 0x60, 0x9e, 0xbd, 0x02, 0x87, 0x0e, 0x61, 0x4b, 0x44, + 0x69, 0xa2, 0x30, 0xe9, 0xa3, 0xaa, 0x5b, 0x9d, 0x65, 0x18, 0xe3, 0xfd, 0x9c, 0xac, 0x95, 0x07, + 0x04, 0x9e, 0x94, 0xa3, 0x1f, 0xe1, 0x3a, 0xf7, 0x1d, 0x8a, 0xe9, 0x79, 0xee, 0x4f, 0x25, 0x13, + 0xce, 0x20, 0x46, 0x37, 0x67, 0x5a, 0xcf, 0x03, 0xb8, 0x28, 0x56, 0x9f, 0x03, 0xf8, 0xe1, 0xa2, + 0x17, 0xc7, 0x4c, 0x84, 0xe8, 0x8f, 0x05, 0x3f, 0xb4, 0x25, 0x1f, 0x2f, 0x13, 0x99, 0x1b, 0x93, + 0x0b, 0x14, 0x91, 0x29, 0x2f, 0x8e, 0x60, 0x93, 0x85, 0xd4, 0x2d, 0x8c, 0xf8, 0xb4, 0x4a, 0xfe, + 0xa2, 0xb0, 0xf2, 0xd5, 0x1c, 0x26, 0xc5, 0x38, 0xe3, 0x50, 0x9f, 0x01, 0xd8, 0x9d, 0x02, 0xaf, + 0x40, 0xfe, 0x0f, 0xb3, 0xf2, 0x7b, 0xf7, 0xc9, 0xaf, 0xd6, 0xfd, 0x0a, 0x40, 0x58, 0x8e, 0x04, + 0xea, 0xc1, 0xe6, 0x88, 0x72, 0x33, 0xdb, 0x15, 0x92, 0x21, 0x25, 0xf8, 0xdf, 0x92, 0x00, 0xce, + 0xe2, 0xe8, 0x33, 0x28, 0x91, 0x80, 0xfd, 0xc4, 0xfd, 0x28, 0xc8, 0x3a, 0x4b, 0x46, 0x27, 0x1e, + 0xf7, 0xa4, 0xfd, 0xd3, 0xc3, 0x2c, 0x88, 0xcb, 0x7c, 0x02, 0xe6, 0x54, 0xf8, 0x11, 0xb7, 0xa8, + 0x90, 0xd7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x6f, 0x61, 0xa7, 0x38, 0x9c, 0x10, 0x97, + 0x0a, 0xb9, 0x91, 0x16, 0x6c, 0xc4, 0xe3, 0x5e, 0x07, 0x4f, 0x27, 0xf0, 0x2c, 0x0e, 0xed, 0xc1, + 0xae, 0xe7, 0x7b, 0x05, 0xe4, 0x57, 0x7c, 0x2c, 0xe4, 0x66, 0x5a, 0x9a, 0xce, 0xe2, 0xc9, 0x6c, + 0x0a, 0xcf, 0x63, 0xd5, 0xa7, 0x00, 0x36, 0xde, 0xa2, 0xfd, 0xa4, 0xfe, 0x5b, 0x87, 0xed, 0x77, + 0x7e, 0x69, 0x24, 0xe3, 0xb6, 0xda, 0x6d, 0xb1, 0xcc, 0xb8, 0xdd, 0xbf, 0x26, 0x1e, 0x03, 0xd8, + 0x5a, 0xd1, 0x7e, 0xd8, 0x9b, 0x15, 0x2c, 0xdf, 0x29, 0xb8, 0x5a, 0xe9, 0xdf, 0xb0, 0x70, 0x1d, + 0x7d, 0x0e, 0x5b, 0xc5, 0x4c, 0xa7, 0x3a, 0xa5, 0xb2, 0x6f, 0x31, 0xf6, 0x78, 0x82, 0x40, 0xdb, + 0xb0, 0x31, 0x64, 0xde, 0x40, 0xae, 0xa7, 0xc8, 0xf7, 0x72, 0x64, 0xe3, 0x88, 0x79, 0x03, 0x9c, + 0x66, 0x12, 0x84, 0x47, 0xdc, 0xec, 0x67, 0x75, 0x0a, 0x91, 0x4c, 0x33, 0x4e, 0x33, 0xea, 0x13, + 0x00, 0xd7, 0xf3, 0xd7, 0x33, 0xe1, 0x03, 0x77, 0xf2, 0x4d, 0xeb, 0xab, 0x2f, 0xa3, 0xef, 0xcd, + 0xdd, 0x91, 0x0e, 0xa5, 0xe4, 0xaf, 0x08, 0x88, 0x45, 0xe5, 0x46, 0x0a, 0xdb, 0xc8, 0x61, 0xd2, + 0x49, 0x91, 0xc0, 0x25, 0xc6, 0xd8, 0xb9, 0xba, 0x55, 0x6a, 0xd7, 0xb7, 0x4a, 0xed, 0xe6, 0x56, + 0xa9, 0xfd, 0x13, 0x2b, 0xe0, 0x2a, 0x56, 0xc0, 0x75, 0xac, 0x80, 0x9b, 0x58, 0x01, 0x2f, 0x62, + 0x05, 0x3c, 0x78, 0xa9, 0xd4, 0x7e, 0xaf, 0x8f, 0xfa, 0xaf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x24, + 0xa1, 0x47, 0x98, 0xcf, 0x0a, 0x00, 0x00, +} + func (m *AggregationRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -134,29 +465,36 @@ func (m *AggregationRule) Marshal() (dAtA []byte, err error) { } func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AggregationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.ClusterRoleSelectors) > 0 { - for _, msg := range m.ClusterRoleSelectors { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.ClusterRoleSelectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClusterRoleSelectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -164,47 +502,58 @@ func (m *ClusterRole) Marshal() (dAtA []byte, err error) { } func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.AggregationRule != nil { + { + size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.AggregationRule != nil { + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) - n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n2 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -212,45 +561,56 @@ func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n4, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -258,37 +618,46 @@ func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -296,37 +665,46 @@ func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PolicyRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -334,92 +712,67 @@ func (m *PolicyRule) Marshal() (dAtA []byte, err error) { } func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x2a } } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Resources) > 0 { - for _, s := range m.Resources { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *Role) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -427,37 +780,46 @@ func (m *Role) Marshal() (dAtA []byte, err error) { } func (m *Role) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -465,45 +827,56 @@ func (m *RoleBinding) Marshal() (dAtA []byte, err error) { } func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n9, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -511,37 +884,46 @@ func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -549,37 +931,46 @@ func (m *RoleList) Marshal() (dAtA []byte, err error) { } func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleRef) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -587,29 +978,37 @@ func (m *RoleRef) Marshal() (dAtA []byte, err error) { } func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x1a - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Subject) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -617,39 +1016,53 @@ func (m *Subject) Marshal() (dAtA []byte, err error) { } func (m *Subject) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AggregationRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.ClusterRoleSelectors) > 0 { @@ -662,6 +1075,9 @@ func (m *AggregationRule) Size() (n int) { } func (m *ClusterRole) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -680,6 +1096,9 @@ func (m *ClusterRole) Size() (n int) { } func (m *ClusterRoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -696,6 +1115,9 @@ func (m *ClusterRoleBinding) Size() (n int) { } func (m *ClusterRoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -710,6 +1132,9 @@ func (m *ClusterRoleBindingList) Size() (n int) { } func (m *ClusterRoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -724,6 +1149,9 @@ func (m *ClusterRoleList) Size() (n int) { } func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -760,6 +1188,9 @@ func (m *PolicyRule) Size() (n int) { } func (m *Role) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -774,6 +1205,9 @@ func (m *Role) Size() (n int) { } func (m *RoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -790,6 +1224,9 @@ func (m *RoleBinding) Size() (n int) { } func (m *RoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -804,6 +1241,9 @@ func (m *RoleBindingList) Size() (n int) { } func (m *RoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -818,6 +1258,9 @@ func (m *RoleList) Size() (n int) { } func (m *RoleRef) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.APIGroup) @@ -830,6 +1273,9 @@ func (m *RoleRef) Size() (n int) { } func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -844,14 +1290,7 @@ func (m *Subject) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -860,8 +1299,13 @@ func (this *AggregationRule) String() string { if this == nil { return "nil" } + repeatedStringForClusterRoleSelectors := "[]LabelSelector{" + for _, f := range this.ClusterRoleSelectors { + repeatedStringForClusterRoleSelectors += fmt.Sprintf("%v", f) + "," + } + repeatedStringForClusterRoleSelectors += "}" s := strings.Join([]string{`&AggregationRule{`, - `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `ClusterRoleSelectors:` + repeatedStringForClusterRoleSelectors + `,`, `}`, }, "") return s @@ -870,10 +1314,15 @@ func (this *ClusterRole) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&ClusterRole{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, - `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `AggregationRule:` + strings.Replace(this.AggregationRule.String(), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -882,9 +1331,14 @@ func (this *ClusterRoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&ClusterRoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -894,9 +1348,14 @@ func (this *ClusterRoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -905,9 +1364,14 @@ func (this *ClusterRoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRole{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -930,9 +1394,14 @@ func (this *Role) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&Role{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, `}`, }, "") return s @@ -941,9 +1410,14 @@ func (this *RoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&RoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -953,9 +1427,14 @@ func (this *RoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -964,9 +1443,14 @@ func (this *RoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Role{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1019,7 +1503,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1047,7 +1531,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1056,10 +1540,13 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, v1.LabelSelector{}) if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1073,6 +1560,9 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1100,7 +1590,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1128,7 +1618,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1137,6 +1627,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1158,7 +1651,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1167,6 +1660,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1189,7 +1685,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1198,6 +1694,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1217,6 +1716,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1244,7 +1746,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1272,7 +1774,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1281,6 +1783,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1302,7 +1807,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1311,6 +1816,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1333,7 +1841,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1342,6 +1850,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1358,6 +1869,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1385,7 +1899,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1413,7 +1927,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1422,6 +1936,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1443,7 +1960,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1452,6 +1969,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1469,6 +1989,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1496,7 +2019,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1524,7 +2047,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1533,6 +2056,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1554,7 +2080,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1563,6 +2089,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1580,6 +2109,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1607,7 +2139,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1635,7 +2167,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1645,6 +2177,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1664,7 +2199,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1674,6 +2209,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1693,7 +2231,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1703,6 +2241,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1722,7 +2263,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1732,6 +2273,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1751,7 +2295,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1761,6 +2305,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1775,6 +2322,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1802,7 +2352,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1830,7 +2380,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1839,6 +2389,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1860,7 +2413,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1869,6 +2422,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1886,6 +2442,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1913,7 +2472,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1941,7 +2500,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1950,6 +2509,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1971,7 +2533,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1980,6 +2542,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2002,7 +2567,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2011,6 +2576,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2027,6 +2595,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2054,7 +2625,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2082,7 +2653,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2091,6 +2662,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2112,7 +2686,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2121,6 +2695,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2138,6 +2715,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2165,7 +2745,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2193,7 +2773,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2202,6 +2782,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2223,7 +2806,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2232,6 +2815,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2249,6 +2835,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2276,7 +2865,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2304,7 +2893,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2314,6 +2903,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2333,7 +2925,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2343,6 +2935,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2362,7 +2957,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2372,6 +2967,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2386,6 +2984,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2413,7 +3014,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2441,7 +3042,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2451,6 +3052,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2470,7 +3074,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2480,6 +3084,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2499,7 +3106,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2509,6 +3116,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2528,7 +3138,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2538,6 +3148,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2552,6 +3165,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2618,10 +3234,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2650,6 +3269,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2668,62 +3290,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 807 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x55, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0xce, 0xa4, 0x89, 0x1a, 0x4f, 0x88, 0x42, 0x87, 0x0a, 0x59, 0x05, 0x39, 0x95, 0x91, 0x50, - 0x25, 0xc0, 0x26, 0x05, 0x01, 0x12, 0xea, 0xa1, 0x2e, 0x02, 0x55, 0x2d, 0xa5, 0x9a, 0x0a, 0x0e, - 0x88, 0x03, 0x63, 0x67, 0xea, 0x0e, 0xf1, 0x2f, 0xcd, 0xd8, 0x91, 0x2a, 0x2e, 0x08, 0x89, 0x03, - 0xb7, 0x3d, 0xee, 0xfe, 0x05, 0x7b, 0xd9, 0x3d, 0xee, 0x5f, 0xb0, 0x97, 0x1e, 0x7b, 0xec, 0x29, - 0xda, 0x7a, 0xff, 0x90, 0x5d, 0xf9, 0x57, 0x9c, 0x1f, 0xee, 0x36, 0xa7, 0x48, 0xab, 0x3d, 0xb5, - 0xf3, 0xde, 0xf7, 0xbe, 0xf7, 0xcd, 0xe7, 0x79, 0x2f, 0xf0, 0xfb, 0xe1, 0x77, 0x42, 0x63, 0xbe, - 0x3e, 0x8c, 0x4c, 0xca, 0x3d, 0x1a, 0x52, 0xa1, 0x8f, 0xa8, 0x37, 0xf0, 0xb9, 0x9e, 0x27, 0x48, - 0xc0, 0x74, 0x6e, 0x12, 0x4b, 0x1f, 0xf5, 0x75, 0x9b, 0x7a, 0x94, 0x93, 0x90, 0x0e, 0xb4, 0x80, - 0xfb, 0xa1, 0x8f, 0x50, 0x86, 0xd1, 0x48, 0xc0, 0xb4, 0x04, 0xa3, 0x8d, 0xfa, 0x5b, 0x5f, 0xd8, - 0x2c, 0xbc, 0x88, 0x4c, 0xcd, 0xf2, 0x5d, 0xdd, 0xf6, 0x6d, 0x5f, 0x4f, 0xa1, 0x66, 0x74, 0x9e, - 0x9e, 0xd2, 0x43, 0xfa, 0x5f, 0x46, 0xb1, 0xf5, 0x75, 0xd9, 0xc6, 0x25, 0xd6, 0x05, 0xf3, 0x28, - 0xbf, 0xd4, 0x83, 0xa1, 0x9d, 0x04, 0x84, 0xee, 0xd2, 0x90, 0x54, 0x34, 0xde, 0xd2, 0xef, 0xaa, - 0xe2, 0x91, 0x17, 0x32, 0x97, 0x2e, 0x14, 0x7c, 0x73, 0x5f, 0x81, 0xb0, 0x2e, 0xa8, 0x4b, 0xe6, - 0xeb, 0xd4, 0x47, 0x00, 0x76, 0xf7, 0x6d, 0x9b, 0x53, 0x9b, 0x84, 0xcc, 0xf7, 0x70, 0xe4, 0x50, - 0xf4, 0x1f, 0x80, 0x9b, 0x96, 0x13, 0x89, 0x90, 0x72, 0xec, 0x3b, 0xf4, 0x8c, 0x3a, 0xd4, 0x0a, - 0x7d, 0x2e, 0x64, 0xb0, 0xbd, 0xb6, 0xd3, 0xde, 0xfd, 0x4a, 0x2b, 0x5d, 0x99, 0xf4, 0xd2, 0x82, - 0xa1, 0x9d, 0x04, 0x84, 0x96, 0x5c, 0x49, 0x1b, 0xf5, 0xb5, 0x63, 0x62, 0x52, 0xa7, 0xa8, 0x35, - 0x3e, 0xbe, 0x1a, 0xf7, 0x6a, 0xf1, 0xb8, 0xb7, 0x79, 0x50, 0x41, 0x8c, 0x2b, 0xdb, 0xa9, 0x0f, - 0xeb, 0xb0, 0x3d, 0x05, 0x47, 0x7f, 0xc2, 0x56, 0x42, 0x3e, 0x20, 0x21, 0x91, 0xc1, 0x36, 0xd8, - 0x69, 0xef, 0x7e, 0xb9, 0x9c, 0x94, 0x5f, 0xcc, 0xbf, 0xa8, 0x15, 0xfe, 0x4c, 0x43, 0x62, 0xa0, - 0x5c, 0x07, 0x2c, 0x63, 0x78, 0xc2, 0x8a, 0x0e, 0x60, 0x93, 0x47, 0x0e, 0x15, 0x72, 0x3d, 0xbd, - 0xa9, 0xa2, 0x2d, 0x7e, 0x7f, 0xed, 0xd4, 0x77, 0x98, 0x75, 0x99, 0x18, 0x65, 0x74, 0x72, 0xb2, - 0x66, 0x72, 0x12, 0x38, 0xab, 0x45, 0x26, 0xec, 0x92, 0x59, 0x47, 0xe5, 0xb5, 0x54, 0xed, 0x27, - 0x55, 0x74, 0x73, 0xe6, 0x1b, 0x1f, 0xc4, 0xe3, 0xde, 0xfc, 0x17, 0xc1, 0xf3, 0x84, 0xea, 0xff, - 0x75, 0x88, 0xa6, 0xac, 0x31, 0x98, 0x37, 0x60, 0x9e, 0xbd, 0x02, 0x87, 0x0e, 0x61, 0x4b, 0x44, - 0x69, 0xa2, 0x30, 0xe9, 0xa3, 0xaa, 0x5b, 0x9d, 0x65, 0x18, 0xe3, 0xfd, 0x9c, 0xac, 0x95, 0x07, - 0x04, 0x9e, 0x94, 0xa3, 0x1f, 0xe1, 0x3a, 0xf7, 0x1d, 0x8a, 0xe9, 0x79, 0xee, 0x4f, 0x25, 0x13, - 0xce, 0x20, 0x46, 0x37, 0x67, 0x5a, 0xcf, 0x03, 0xb8, 0x28, 0x56, 0x9f, 0x03, 0xf8, 0xe1, 0xa2, - 0x17, 0xc7, 0x4c, 0x84, 0xe8, 0x8f, 0x05, 0x3f, 0xb4, 0x25, 0x1f, 0x2f, 0x13, 0x99, 0x1b, 0x93, - 0x0b, 0x14, 0x91, 0x29, 0x2f, 0x8e, 0x60, 0x93, 0x85, 0xd4, 0x2d, 0x8c, 0xf8, 0xb4, 0x4a, 0xfe, - 0xa2, 0xb0, 0xf2, 0xd5, 0x1c, 0x26, 0xc5, 0x38, 0xe3, 0x50, 0x9f, 0x01, 0xd8, 0x9d, 0x02, 0xaf, - 0x40, 0xfe, 0x0f, 0xb3, 0xf2, 0x7b, 0xf7, 0xc9, 0xaf, 0xd6, 0xfd, 0x0a, 0x40, 0x58, 0x8e, 0x04, - 0xea, 0xc1, 0xe6, 0x88, 0x72, 0x33, 0xdb, 0x15, 0x92, 0x21, 0x25, 0xf8, 0xdf, 0x92, 0x00, 0xce, - 0xe2, 0xe8, 0x33, 0x28, 0x91, 0x80, 0xfd, 0xc4, 0xfd, 0x28, 0xc8, 0x3a, 0x4b, 0x46, 0x27, 0x1e, - 0xf7, 0xa4, 0xfd, 0xd3, 0xc3, 0x2c, 0x88, 0xcb, 0x7c, 0x02, 0xe6, 0x54, 0xf8, 0x11, 0xb7, 0xa8, - 0x90, 0xd7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x6f, 0x61, 0xa7, 0x38, 0x9c, 0x10, 0x97, - 0x0a, 0xb9, 0x91, 0x16, 0x6c, 0xc4, 0xe3, 0x5e, 0x07, 0x4f, 0x27, 0xf0, 0x2c, 0x0e, 0xed, 0xc1, - 0xae, 0xe7, 0x7b, 0x05, 0xe4, 0x57, 0x7c, 0x2c, 0xe4, 0x66, 0x5a, 0x9a, 0xce, 0xe2, 0xc9, 0x6c, - 0x0a, 0xcf, 0x63, 0xd5, 0xa7, 0x00, 0x36, 0xde, 0xa2, 0xfd, 0xa4, 0xfe, 0x5b, 0x87, 0xed, 0x77, - 0x7e, 0x69, 0x24, 0xe3, 0xb6, 0xda, 0x6d, 0xb1, 0xcc, 0xb8, 0xdd, 0xbf, 0x26, 0x1e, 0x03, 0xd8, - 0x5a, 0xd1, 0x7e, 0xd8, 0x9b, 0x15, 0x2c, 0xdf, 0x29, 0xb8, 0x5a, 0xe9, 0xdf, 0xb0, 0x70, 0x1d, - 0x7d, 0x0e, 0x5b, 0xc5, 0x4c, 0xa7, 0x3a, 0xa5, 0xb2, 0x6f, 0x31, 0xf6, 0x78, 0x82, 0x40, 0xdb, - 0xb0, 0x31, 0x64, 0xde, 0x40, 0xae, 0xa7, 0xc8, 0xf7, 0x72, 0x64, 0xe3, 0x88, 0x79, 0x03, 0x9c, - 0x66, 0x12, 0x84, 0x47, 0xdc, 0xec, 0x67, 0x75, 0x0a, 0x91, 0x4c, 0x33, 0x4e, 0x33, 0xea, 0x13, - 0x00, 0xd7, 0xf3, 0xd7, 0x33, 0xe1, 0x03, 0x77, 0xf2, 0x4d, 0xeb, 0xab, 0x2f, 0xa3, 0xef, 0xcd, - 0xdd, 0x91, 0x0e, 0xa5, 0xe4, 0xaf, 0x08, 0x88, 0x45, 0xe5, 0x46, 0x0a, 0xdb, 0xc8, 0x61, 0xd2, - 0x49, 0x91, 0xc0, 0x25, 0xc6, 0xd8, 0xb9, 0xba, 0x55, 0x6a, 0xd7, 0xb7, 0x4a, 0xed, 0xe6, 0x56, - 0xa9, 0xfd, 0x13, 0x2b, 0xe0, 0x2a, 0x56, 0xc0, 0x75, 0xac, 0x80, 0x9b, 0x58, 0x01, 0x2f, 0x62, - 0x05, 0x3c, 0x78, 0xa9, 0xd4, 0x7e, 0xaf, 0x8f, 0xfa, 0xaf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x24, - 0xa1, 0x47, 0x98, 0xcf, 0x0a, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go index 07eb321ea..095a5e9c2 100644 --- a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go @@ -122,7 +122,7 @@ func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRoleBinding, len(*in)) @@ -155,7 +155,7 @@ func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRole, len(*in)) @@ -294,7 +294,7 @@ func (in *RoleBinding) DeepCopyObject() runtime.Object { func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RoleBinding, len(*in)) @@ -327,7 +327,7 @@ func (in *RoleBindingList) DeepCopyObject() runtime.Object { func (in *RoleList) DeepCopyInto(out *RoleList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Role, len(*in)) diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go index e035b331f..193362477 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go @@ -17,38 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto - - It has these top-level messages: - AggregationRule - ClusterRole - ClusterRoleBinding - ClusterRoleBindingList - ClusterRoleList - PolicyRule - Role - RoleBinding - RoleBindingList - RoleList - RoleRef - Subject -*/ package v1alpha1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -61,53 +44,341 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AggregationRule) Reset() { *m = AggregationRule{} } -func (*AggregationRule) ProtoMessage() {} -func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{0} +} +func (m *AggregationRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AggregationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AggregationRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_AggregationRule.Merge(m, src) +} +func (m *AggregationRule) XXX_Size() int { + return m.Size() +} +func (m *AggregationRule) XXX_DiscardUnknown() { + xxx_messageInfo_AggregationRule.DiscardUnknown(m) +} + +var xxx_messageInfo_AggregationRule proto.InternalMessageInfo -func (m *ClusterRole) Reset() { *m = ClusterRole{} } -func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{1} +} +func (m *ClusterRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRole.Merge(m, src) +} +func (m *ClusterRole) XXX_Size() int { + return m.Size() +} +func (m *ClusterRole) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRole.DiscardUnknown(m) +} -func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } -func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_ClusterRole proto.InternalMessageInfo + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{2} +} +func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBinding.Merge(m, src) +} +func (m *ClusterRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{3} +} +func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBindingList.Merge(m, src) +} +func (m *ClusterRoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m) +} -func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } -func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo -func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } -func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{4} +} +func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleList.Merge(m, src) +} +func (m *ClusterRoleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleList.DiscardUnknown(m) +} -func (m *PolicyRule) Reset() { *m = PolicyRule{} } -func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo -func (m *Role) Reset() { *m = Role{} } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{5} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} -func (m *RoleBinding) Reset() { *m = RoleBinding{} } -func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo -func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } -func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{6} +} +func (m *Role) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Role) XXX_Merge(src proto.Message) { + xxx_messageInfo_Role.Merge(m, src) +} +func (m *Role) XXX_Size() int { + return m.Size() +} +func (m *Role) XXX_DiscardUnknown() { + xxx_messageInfo_Role.DiscardUnknown(m) +} -func (m *RoleList) Reset() { *m = RoleList{} } -func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_Role proto.InternalMessageInfo -func (m *RoleRef) Reset() { *m = RoleRef{} } -func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{7} +} +func (m *RoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBinding.Merge(m, src) +} +func (m *RoleBinding) XXX_Size() int { + return m.Size() +} +func (m *RoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBinding.DiscardUnknown(m) +} -func (m *Subject) Reset() { *m = Subject{} } -func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_RoleBinding proto.InternalMessageInfo + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{8} +} +func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingList.Merge(m, src) +} +func (m *RoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{9} +} +func (m *RoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleList.Merge(m, src) +} +func (m *RoleList) XXX_Size() int { + return m.Size() +} +func (m *RoleList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleList proto.InternalMessageInfo + +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{10} +} +func (m *RoleRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleRef.Merge(m, src) +} +func (m *RoleRef) XXX_Size() int { + return m.Size() +} +func (m *RoleRef) XXX_DiscardUnknown() { + xxx_messageInfo_RoleRef.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleRef proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{11} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo func init() { proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1alpha1.AggregationRule") @@ -123,10 +394,71 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1alpha1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1alpha1.Subject") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto", fileDescriptor_b59b0bd5e7cb9590) +} + +var fileDescriptor_b59b0bd5e7cb9590 = []byte{ + // 830 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x8f, 0xe3, 0x44, + 0x14, 0xce, 0x64, 0x13, 0x36, 0x99, 0x25, 0x0a, 0x37, 0x9c, 0x90, 0xb5, 0x42, 0xce, 0x62, 0x81, + 0x74, 0x88, 0xc3, 0x66, 0x17, 0x04, 0x34, 0x14, 0xf1, 0x15, 0x28, 0x10, 0xf6, 0x96, 0x39, 0x71, + 0x05, 0xa2, 0x60, 0xe2, 0xcc, 0x39, 0x43, 0x6c, 0x8f, 0x35, 0x63, 0x47, 0x3a, 0xd1, 0xd0, 0xd0, + 0x22, 0x1a, 0x0a, 0x7a, 0x5a, 0x1a, 0x28, 0xf9, 0x07, 0x96, 0xee, 0xca, 0xad, 0x22, 0xd6, 0xfc, + 0x21, 0x20, 0x8f, 0xed, 0xd8, 0xf9, 0x45, 0x52, 0x45, 0x42, 0xba, 0x2a, 0x99, 0xf7, 0xbe, 0xf7, + 0xbd, 0xf7, 0xbe, 0x99, 0xf7, 0x0c, 0xfb, 0xd3, 0x0f, 0xa5, 0xc9, 0xb8, 0x35, 0x8d, 0x47, 0x54, + 0x04, 0x34, 0xa2, 0xd2, 0x9a, 0xd1, 0x60, 0xcc, 0x85, 0x95, 0x3b, 0x48, 0xc8, 0x2c, 0x31, 0x22, + 0x8e, 0x35, 0x3b, 0x27, 0x5e, 0x38, 0x21, 0xe7, 0x96, 0x4b, 0x03, 0x2a, 0x48, 0x44, 0xc7, 0x66, + 0x28, 0x78, 0xc4, 0x91, 0x96, 0x21, 0x4d, 0x12, 0x32, 0x33, 0x45, 0x9a, 0x05, 0xf2, 0xf4, 0x6d, + 0x97, 0x45, 0x93, 0x78, 0x64, 0x3a, 0xdc, 0xb7, 0x5c, 0xee, 0x72, 0x4b, 0x05, 0x8c, 0xe2, 0x27, + 0xea, 0xa4, 0x0e, 0xea, 0x5f, 0x46, 0x74, 0xfa, 0x5e, 0x99, 0xd2, 0x27, 0xce, 0x84, 0x05, 0x54, + 0x3c, 0xb5, 0xc2, 0xa9, 0x9b, 0x1a, 0xa4, 0xe5, 0xd3, 0x88, 0x58, 0xb3, 0xb5, 0xf4, 0xa7, 0xd6, + 0xb6, 0x28, 0x11, 0x07, 0x11, 0xf3, 0xe9, 0x5a, 0xc0, 0xfb, 0xbb, 0x02, 0xa4, 0x33, 0xa1, 0x3e, + 0x59, 0x8d, 0x33, 0x7e, 0x06, 0xb0, 0xdb, 0x77, 0x5d, 0x41, 0x5d, 0x12, 0x31, 0x1e, 0xe0, 0xd8, + 0xa3, 0xe8, 0x7b, 0x00, 0xef, 0x3a, 0x5e, 0x2c, 0x23, 0x2a, 0x30, 0xf7, 0xe8, 0x23, 0xea, 0x51, + 0x27, 0xe2, 0x42, 0x6a, 0xe0, 0xec, 0xe8, 0xde, 0xc9, 0xc5, 0xbb, 0x66, 0xa9, 0xcd, 0x22, 0x97, + 0x19, 0x4e, 0xdd, 0xd4, 0x20, 0xcd, 0xb4, 0x25, 0x73, 0x76, 0x6e, 0x0e, 0xc9, 0x88, 0x7a, 0x45, + 0xac, 0xfd, 0xea, 0xf5, 0xbc, 0x57, 0x4b, 0xe6, 0xbd, 0xbb, 0x0f, 0x36, 0x10, 0xe3, 0x8d, 0xe9, + 0x8c, 0x5f, 0xea, 0xf0, 0xa4, 0x02, 0x47, 0x5f, 0xc3, 0x56, 0x4a, 0x3e, 0x26, 0x11, 0xd1, 0xc0, + 0x19, 0xb8, 0x77, 0x72, 0xf1, 0xce, 0x7e, 0xa5, 0x3c, 0x1c, 0x7d, 0x43, 0x9d, 0xe8, 0x33, 0x1a, + 0x11, 0x1b, 0xe5, 0x75, 0xc0, 0xd2, 0x86, 0x17, 0xac, 0x68, 0x00, 0x9b, 0x22, 0xf6, 0xa8, 0xd4, + 0xea, 0xaa, 0xd3, 0xd7, 0xcd, 0x6d, 0xaf, 0xc0, 0xbc, 0xe2, 0x1e, 0x73, 0x9e, 0xa6, 0x72, 0xd9, + 0x9d, 0x9c, 0xb2, 0x99, 0x9e, 0x24, 0xce, 0x18, 0xd0, 0x04, 0x76, 0xc9, 0xb2, 0xae, 0xda, 0x91, + 0xaa, 0xf9, 0xcd, 0xed, 0xa4, 0x2b, 0x17, 0x61, 0xbf, 0x9c, 0xcc, 0x7b, 0xab, 0xb7, 0x83, 0x57, + 0x69, 0x8d, 0x9f, 0xea, 0x10, 0x55, 0x64, 0xb2, 0x59, 0x30, 0x66, 0x81, 0x7b, 0x00, 0xb5, 0x1e, + 0xc2, 0x96, 0x8c, 0x95, 0xa3, 0x10, 0xec, 0xb5, 0xed, 0xbd, 0x3d, 0xca, 0x90, 0xf6, 0x4b, 0x39, + 0x65, 0x2b, 0x37, 0x48, 0xbc, 0x20, 0x41, 0x43, 0x78, 0x2c, 0xb8, 0x47, 0x31, 0x7d, 0x92, 0x6b, + 0xf5, 0x1f, 0x7c, 0x38, 0x03, 0xda, 0xdd, 0x9c, 0xef, 0x38, 0x37, 0xe0, 0x82, 0xc2, 0xf8, 0x13, + 0xc0, 0x57, 0xd6, 0x75, 0x19, 0x32, 0x19, 0xa1, 0xaf, 0xd6, 0xb4, 0x31, 0xf7, 0x7c, 0xd4, 0x4c, + 0x66, 0xca, 0x2c, 0xda, 0x28, 0x2c, 0x15, 0x5d, 0x3e, 0x87, 0x4d, 0x16, 0x51, 0xbf, 0x10, 0xe5, + 0xfe, 0xf6, 0x26, 0xd6, 0xcb, 0x2b, 0x5f, 0xd3, 0x20, 0xa5, 0xc0, 0x19, 0x93, 0xf1, 0x07, 0x80, + 0xdd, 0x0a, 0xf8, 0x00, 0x4d, 0x7c, 0xb2, 0xdc, 0xc4, 0x1b, 0xfb, 0x35, 0xb1, 0xb9, 0xfa, 0x7f, + 0x00, 0x84, 0xe5, 0xc0, 0xa0, 0x1e, 0x6c, 0xce, 0xa8, 0x18, 0x65, 0xfb, 0xa4, 0x6d, 0xb7, 0x53, + 0xfc, 0xe3, 0xd4, 0x80, 0x33, 0x3b, 0x7a, 0x0b, 0xb6, 0x49, 0xc8, 0x3e, 0x16, 0x3c, 0x0e, 0xa5, + 0x76, 0xa4, 0x40, 0x9d, 0x64, 0xde, 0x6b, 0xf7, 0xaf, 0x06, 0x99, 0x11, 0x97, 0xfe, 0x14, 0x2c, + 0xa8, 0xe4, 0xb1, 0x70, 0xa8, 0xd4, 0x1a, 0x25, 0x18, 0x17, 0x46, 0x5c, 0xfa, 0xd1, 0x07, 0xb0, + 0x53, 0x1c, 0x2e, 0x89, 0x4f, 0xa5, 0xd6, 0x54, 0x01, 0x77, 0x92, 0x79, 0xaf, 0x83, 0xab, 0x0e, + 0xbc, 0x8c, 0x43, 0x1f, 0xc1, 0x6e, 0xc0, 0x83, 0x02, 0xf2, 0x05, 0x1e, 0x4a, 0xed, 0x05, 0x15, + 0xaa, 0x66, 0xf4, 0x72, 0xd9, 0x85, 0x57, 0xb1, 0xc6, 0xef, 0x00, 0x36, 0xfe, 0x77, 0x3b, 0xcc, + 0xf8, 0xa1, 0x0e, 0x4f, 0x9e, 0xaf, 0x94, 0xca, 0x4a, 0x49, 0xc7, 0xf0, 0xb0, 0xbb, 0x64, 0xff, + 0x31, 0xdc, 0xbd, 0x44, 0x7e, 0x05, 0xb0, 0x75, 0xa0, 0xed, 0xf1, 0x60, 0xb9, 0x6c, 0x7d, 0x47, + 0xd9, 0x9b, 0xeb, 0xfd, 0x16, 0x16, 0x37, 0x80, 0xee, 0xc3, 0x56, 0x31, 0xf1, 0xaa, 0xda, 0x76, + 0x99, 0xbd, 0x58, 0x0a, 0x78, 0x81, 0x40, 0x67, 0xb0, 0x31, 0x65, 0xc1, 0x58, 0xab, 0x2b, 0xe4, + 0x8b, 0x39, 0xb2, 0xf1, 0x29, 0x0b, 0xc6, 0x58, 0x79, 0x52, 0x44, 0x40, 0xfc, 0xec, 0x93, 0x5c, + 0x41, 0xa4, 0xb3, 0x8e, 0x95, 0xc7, 0xf8, 0x0d, 0xc0, 0xe3, 0xfc, 0x3d, 0x2d, 0xf8, 0xc0, 0x56, + 0xbe, 0x0b, 0x08, 0x49, 0xc8, 0x1e, 0x53, 0x21, 0x19, 0x0f, 0xf2, 0xbc, 0x8b, 0x97, 0xde, 0xbf, + 0x1a, 0xe4, 0x1e, 0x5c, 0x41, 0xed, 0xae, 0x01, 0x59, 0xb0, 0x9d, 0xfe, 0xca, 0x90, 0x38, 0x54, + 0x6b, 0x28, 0xd8, 0x9d, 0x1c, 0xd6, 0xbe, 0x2c, 0x1c, 0xb8, 0xc4, 0xd8, 0xe6, 0xf5, 0xad, 0x5e, + 0x7b, 0x76, 0xab, 0xd7, 0x6e, 0x6e, 0xf5, 0xda, 0x77, 0x89, 0x0e, 0xae, 0x13, 0x1d, 0x3c, 0x4b, + 0x74, 0x70, 0x93, 0xe8, 0xe0, 0xaf, 0x44, 0x07, 0x3f, 0xfe, 0xad, 0xd7, 0xbe, 0x6c, 0x15, 0xe2, + 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x73, 0x15, 0x10, 0x29, 0x0b, 0x00, 0x00, +} + func (m *AggregationRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -134,29 +466,36 @@ func (m *AggregationRule) Marshal() (dAtA []byte, err error) { } func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AggregationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.ClusterRoleSelectors) > 0 { - for _, msg := range m.ClusterRoleSelectors { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.ClusterRoleSelectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClusterRoleSelectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -164,47 +503,58 @@ func (m *ClusterRole) Marshal() (dAtA []byte, err error) { } func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.AggregationRule != nil { + { + size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.AggregationRule != nil { + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) - n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n2 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -212,45 +562,56 @@ func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n4, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -258,37 +619,46 @@ func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -296,37 +666,46 @@ func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PolicyRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -334,92 +713,67 @@ func (m *PolicyRule) Marshal() (dAtA []byte, err error) { } func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x32 } } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x2a } } if len(m.Resources) > 0 { - for _, s := range m.Resources { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - dAtA[i] = 0x32 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *Role) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -427,37 +781,46 @@ func (m *Role) Marshal() (dAtA []byte, err error) { } func (m *Role) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -465,45 +828,56 @@ func (m *RoleBinding) Marshal() (dAtA []byte, err error) { } func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n9, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -511,37 +885,46 @@ func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -549,37 +932,46 @@ func (m *RoleList) Marshal() (dAtA []byte, err error) { } func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleRef) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -587,29 +979,37 @@ func (m *RoleRef) Marshal() (dAtA []byte, err error) { } func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x1a - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Subject) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -617,39 +1017,53 @@ func (m *Subject) Marshal() (dAtA []byte, err error) { } func (m *Subject) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AggregationRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.ClusterRoleSelectors) > 0 { @@ -662,6 +1076,9 @@ func (m *AggregationRule) Size() (n int) { } func (m *ClusterRole) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -680,6 +1097,9 @@ func (m *ClusterRole) Size() (n int) { } func (m *ClusterRoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -696,6 +1116,9 @@ func (m *ClusterRoleBinding) Size() (n int) { } func (m *ClusterRoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -710,6 +1133,9 @@ func (m *ClusterRoleBindingList) Size() (n int) { } func (m *ClusterRoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -724,6 +1150,9 @@ func (m *ClusterRoleList) Size() (n int) { } func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -760,6 +1189,9 @@ func (m *PolicyRule) Size() (n int) { } func (m *Role) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -774,6 +1206,9 @@ func (m *Role) Size() (n int) { } func (m *RoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -790,6 +1225,9 @@ func (m *RoleBinding) Size() (n int) { } func (m *RoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -804,6 +1242,9 @@ func (m *RoleBindingList) Size() (n int) { } func (m *RoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -818,6 +1259,9 @@ func (m *RoleList) Size() (n int) { } func (m *RoleRef) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.APIGroup) @@ -830,6 +1274,9 @@ func (m *RoleRef) Size() (n int) { } func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -844,14 +1291,7 @@ func (m *Subject) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -860,8 +1300,13 @@ func (this *AggregationRule) String() string { if this == nil { return "nil" } + repeatedStringForClusterRoleSelectors := "[]LabelSelector{" + for _, f := range this.ClusterRoleSelectors { + repeatedStringForClusterRoleSelectors += fmt.Sprintf("%v", f) + "," + } + repeatedStringForClusterRoleSelectors += "}" s := strings.Join([]string{`&AggregationRule{`, - `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `ClusterRoleSelectors:` + repeatedStringForClusterRoleSelectors + `,`, `}`, }, "") return s @@ -870,10 +1315,15 @@ func (this *ClusterRole) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&ClusterRole{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, - `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `AggregationRule:` + strings.Replace(this.AggregationRule.String(), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -882,9 +1332,14 @@ func (this *ClusterRoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&ClusterRoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -894,9 +1349,14 @@ func (this *ClusterRoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -905,9 +1365,14 @@ func (this *ClusterRoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRole{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -930,9 +1395,14 @@ func (this *Role) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&Role{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, `}`, }, "") return s @@ -941,9 +1411,14 @@ func (this *RoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&RoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -953,9 +1428,14 @@ func (this *RoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -964,9 +1444,14 @@ func (this *RoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Role{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1019,7 +1504,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1047,7 +1532,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1056,10 +1541,13 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, v1.LabelSelector{}) if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1073,6 +1561,9 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1100,7 +1591,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1128,7 +1619,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1137,6 +1628,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1158,7 +1652,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1167,6 +1661,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1189,7 +1686,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1198,6 +1695,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1217,6 +1717,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1244,7 +1747,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1272,7 +1775,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1281,6 +1784,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1302,7 +1808,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1311,6 +1817,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1333,7 +1842,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1342,6 +1851,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1358,6 +1870,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1385,7 +1900,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1413,7 +1928,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1422,6 +1937,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1443,7 +1961,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1452,6 +1970,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1469,6 +1990,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1496,7 +2020,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1524,7 +2048,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1533,6 +2057,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1554,7 +2081,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1563,6 +2090,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1580,6 +2110,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1607,7 +2140,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1635,7 +2168,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1645,6 +2178,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1664,7 +2200,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1674,6 +2210,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1693,7 +2232,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1703,6 +2242,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1722,7 +2264,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1732,6 +2274,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1751,7 +2296,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1761,6 +2306,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1775,6 +2323,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1802,7 +2353,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1830,7 +2381,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1839,6 +2390,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1860,7 +2414,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1869,6 +2423,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1886,6 +2443,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1913,7 +2473,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1941,7 +2501,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1950,6 +2510,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1971,7 +2534,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1980,6 +2543,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2002,7 +2568,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2011,6 +2577,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2027,6 +2596,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2054,7 +2626,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2082,7 +2654,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2091,6 +2663,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2112,7 +2687,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2121,6 +2696,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2138,6 +2716,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2165,7 +2746,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2193,7 +2774,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2202,6 +2783,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2223,7 +2807,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2232,6 +2816,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2249,6 +2836,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2276,7 +2866,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2304,7 +2894,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2314,6 +2904,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2333,7 +2926,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2343,6 +2936,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2362,7 +2958,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2372,6 +2968,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2386,6 +2985,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2413,7 +3015,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2441,7 +3043,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2451,6 +3053,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2470,7 +3075,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2480,6 +3085,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2499,7 +3107,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2509,6 +3117,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2528,7 +3139,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2538,6 +3149,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2552,6 +3166,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2618,10 +3235,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2650,6 +3270,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2668,63 +3291,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 830 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x8f, 0xe3, 0x44, - 0x14, 0xce, 0x64, 0x13, 0x36, 0x99, 0x25, 0x0a, 0x37, 0x9c, 0x90, 0xb5, 0x42, 0xce, 0x62, 0x81, - 0x74, 0x88, 0xc3, 0x66, 0x17, 0x04, 0x34, 0x14, 0xf1, 0x15, 0x28, 0x10, 0xf6, 0x96, 0x39, 0x71, - 0x05, 0xa2, 0x60, 0xe2, 0xcc, 0x39, 0x43, 0x6c, 0x8f, 0x35, 0x63, 0x47, 0x3a, 0xd1, 0xd0, 0xd0, - 0x22, 0x1a, 0x0a, 0x7a, 0x5a, 0x1a, 0x28, 0xf9, 0x07, 0x96, 0xee, 0xca, 0xad, 0x22, 0xd6, 0xfc, - 0x21, 0x20, 0x8f, 0xed, 0xd8, 0xf9, 0x45, 0x52, 0x45, 0x42, 0xba, 0x2a, 0x99, 0xf7, 0xbe, 0xf7, - 0xbd, 0xf7, 0xbe, 0x99, 0xf7, 0x0c, 0xfb, 0xd3, 0x0f, 0xa5, 0xc9, 0xb8, 0x35, 0x8d, 0x47, 0x54, - 0x04, 0x34, 0xa2, 0xd2, 0x9a, 0xd1, 0x60, 0xcc, 0x85, 0x95, 0x3b, 0x48, 0xc8, 0x2c, 0x31, 0x22, - 0x8e, 0x35, 0x3b, 0x27, 0x5e, 0x38, 0x21, 0xe7, 0x96, 0x4b, 0x03, 0x2a, 0x48, 0x44, 0xc7, 0x66, - 0x28, 0x78, 0xc4, 0x91, 0x96, 0x21, 0x4d, 0x12, 0x32, 0x33, 0x45, 0x9a, 0x05, 0xf2, 0xf4, 0x6d, - 0x97, 0x45, 0x93, 0x78, 0x64, 0x3a, 0xdc, 0xb7, 0x5c, 0xee, 0x72, 0x4b, 0x05, 0x8c, 0xe2, 0x27, - 0xea, 0xa4, 0x0e, 0xea, 0x5f, 0x46, 0x74, 0xfa, 0x5e, 0x99, 0xd2, 0x27, 0xce, 0x84, 0x05, 0x54, - 0x3c, 0xb5, 0xc2, 0xa9, 0x9b, 0x1a, 0xa4, 0xe5, 0xd3, 0x88, 0x58, 0xb3, 0xb5, 0xf4, 0xa7, 0xd6, - 0xb6, 0x28, 0x11, 0x07, 0x11, 0xf3, 0xe9, 0x5a, 0xc0, 0xfb, 0xbb, 0x02, 0xa4, 0x33, 0xa1, 0x3e, - 0x59, 0x8d, 0x33, 0x7e, 0x06, 0xb0, 0xdb, 0x77, 0x5d, 0x41, 0x5d, 0x12, 0x31, 0x1e, 0xe0, 0xd8, - 0xa3, 0xe8, 0x7b, 0x00, 0xef, 0x3a, 0x5e, 0x2c, 0x23, 0x2a, 0x30, 0xf7, 0xe8, 0x23, 0xea, 0x51, - 0x27, 0xe2, 0x42, 0x6a, 0xe0, 0xec, 0xe8, 0xde, 0xc9, 0xc5, 0xbb, 0x66, 0xa9, 0xcd, 0x22, 0x97, - 0x19, 0x4e, 0xdd, 0xd4, 0x20, 0xcd, 0xb4, 0x25, 0x73, 0x76, 0x6e, 0x0e, 0xc9, 0x88, 0x7a, 0x45, - 0xac, 0xfd, 0xea, 0xf5, 0xbc, 0x57, 0x4b, 0xe6, 0xbd, 0xbb, 0x0f, 0x36, 0x10, 0xe3, 0x8d, 0xe9, - 0x8c, 0x5f, 0xea, 0xf0, 0xa4, 0x02, 0x47, 0x5f, 0xc3, 0x56, 0x4a, 0x3e, 0x26, 0x11, 0xd1, 0xc0, - 0x19, 0xb8, 0x77, 0x72, 0xf1, 0xce, 0x7e, 0xa5, 0x3c, 0x1c, 0x7d, 0x43, 0x9d, 0xe8, 0x33, 0x1a, - 0x11, 0x1b, 0xe5, 0x75, 0xc0, 0xd2, 0x86, 0x17, 0xac, 0x68, 0x00, 0x9b, 0x22, 0xf6, 0xa8, 0xd4, - 0xea, 0xaa, 0xd3, 0xd7, 0xcd, 0x6d, 0xaf, 0xc0, 0xbc, 0xe2, 0x1e, 0x73, 0x9e, 0xa6, 0x72, 0xd9, - 0x9d, 0x9c, 0xb2, 0x99, 0x9e, 0x24, 0xce, 0x18, 0xd0, 0x04, 0x76, 0xc9, 0xb2, 0xae, 0xda, 0x91, - 0xaa, 0xf9, 0xcd, 0xed, 0xa4, 0x2b, 0x17, 0x61, 0xbf, 0x9c, 0xcc, 0x7b, 0xab, 0xb7, 0x83, 0x57, - 0x69, 0x8d, 0x9f, 0xea, 0x10, 0x55, 0x64, 0xb2, 0x59, 0x30, 0x66, 0x81, 0x7b, 0x00, 0xb5, 0x1e, - 0xc2, 0x96, 0x8c, 0x95, 0xa3, 0x10, 0xec, 0xb5, 0xed, 0xbd, 0x3d, 0xca, 0x90, 0xf6, 0x4b, 0x39, - 0x65, 0x2b, 0x37, 0x48, 0xbc, 0x20, 0x41, 0x43, 0x78, 0x2c, 0xb8, 0x47, 0x31, 0x7d, 0x92, 0x6b, - 0xf5, 0x1f, 0x7c, 0x38, 0x03, 0xda, 0xdd, 0x9c, 0xef, 0x38, 0x37, 0xe0, 0x82, 0xc2, 0xf8, 0x13, - 0xc0, 0x57, 0xd6, 0x75, 0x19, 0x32, 0x19, 0xa1, 0xaf, 0xd6, 0xb4, 0x31, 0xf7, 0x7c, 0xd4, 0x4c, - 0x66, 0xca, 0x2c, 0xda, 0x28, 0x2c, 0x15, 0x5d, 0x3e, 0x87, 0x4d, 0x16, 0x51, 0xbf, 0x10, 0xe5, - 0xfe, 0xf6, 0x26, 0xd6, 0xcb, 0x2b, 0x5f, 0xd3, 0x20, 0xa5, 0xc0, 0x19, 0x93, 0xf1, 0x07, 0x80, - 0xdd, 0x0a, 0xf8, 0x00, 0x4d, 0x7c, 0xb2, 0xdc, 0xc4, 0x1b, 0xfb, 0x35, 0xb1, 0xb9, 0xfa, 0x7f, - 0x00, 0x84, 0xe5, 0xc0, 0xa0, 0x1e, 0x6c, 0xce, 0xa8, 0x18, 0x65, 0xfb, 0xa4, 0x6d, 0xb7, 0x53, - 0xfc, 0xe3, 0xd4, 0x80, 0x33, 0x3b, 0x7a, 0x0b, 0xb6, 0x49, 0xc8, 0x3e, 0x16, 0x3c, 0x0e, 0xa5, - 0x76, 0xa4, 0x40, 0x9d, 0x64, 0xde, 0x6b, 0xf7, 0xaf, 0x06, 0x99, 0x11, 0x97, 0xfe, 0x14, 0x2c, - 0xa8, 0xe4, 0xb1, 0x70, 0xa8, 0xd4, 0x1a, 0x25, 0x18, 0x17, 0x46, 0x5c, 0xfa, 0xd1, 0x07, 0xb0, - 0x53, 0x1c, 0x2e, 0x89, 0x4f, 0xa5, 0xd6, 0x54, 0x01, 0x77, 0x92, 0x79, 0xaf, 0x83, 0xab, 0x0e, - 0xbc, 0x8c, 0x43, 0x1f, 0xc1, 0x6e, 0xc0, 0x83, 0x02, 0xf2, 0x05, 0x1e, 0x4a, 0xed, 0x05, 0x15, - 0xaa, 0x66, 0xf4, 0x72, 0xd9, 0x85, 0x57, 0xb1, 0xc6, 0xef, 0x00, 0x36, 0xfe, 0x77, 0x3b, 0xcc, - 0xf8, 0xa1, 0x0e, 0x4f, 0x9e, 0xaf, 0x94, 0xca, 0x4a, 0x49, 0xc7, 0xf0, 0xb0, 0xbb, 0x64, 0xff, - 0x31, 0xdc, 0xbd, 0x44, 0x7e, 0x05, 0xb0, 0x75, 0xa0, 0xed, 0xf1, 0x60, 0xb9, 0x6c, 0x7d, 0x47, - 0xd9, 0x9b, 0xeb, 0xfd, 0x16, 0x16, 0x37, 0x80, 0xee, 0xc3, 0x56, 0x31, 0xf1, 0xaa, 0xda, 0x76, - 0x99, 0xbd, 0x58, 0x0a, 0x78, 0x81, 0x40, 0x67, 0xb0, 0x31, 0x65, 0xc1, 0x58, 0xab, 0x2b, 0xe4, - 0x8b, 0x39, 0xb2, 0xf1, 0x29, 0x0b, 0xc6, 0x58, 0x79, 0x52, 0x44, 0x40, 0xfc, 0xec, 0x93, 0x5c, - 0x41, 0xa4, 0xb3, 0x8e, 0x95, 0xc7, 0xf8, 0x0d, 0xc0, 0xe3, 0xfc, 0x3d, 0x2d, 0xf8, 0xc0, 0x56, - 0xbe, 0x0b, 0x08, 0x49, 0xc8, 0x1e, 0x53, 0x21, 0x19, 0x0f, 0xf2, 0xbc, 0x8b, 0x97, 0xde, 0xbf, - 0x1a, 0xe4, 0x1e, 0x5c, 0x41, 0xed, 0xae, 0x01, 0x59, 0xb0, 0x9d, 0xfe, 0xca, 0x90, 0x38, 0x54, - 0x6b, 0x28, 0xd8, 0x9d, 0x1c, 0xd6, 0xbe, 0x2c, 0x1c, 0xb8, 0xc4, 0xd8, 0xe6, 0xf5, 0xad, 0x5e, - 0x7b, 0x76, 0xab, 0xd7, 0x6e, 0x6e, 0xf5, 0xda, 0x77, 0x89, 0x0e, 0xae, 0x13, 0x1d, 0x3c, 0x4b, - 0x74, 0x70, 0x93, 0xe8, 0xe0, 0xaf, 0x44, 0x07, 0x3f, 0xfe, 0xad, 0xd7, 0xbe, 0x6c, 0x15, 0xe2, - 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x73, 0x15, 0x10, 0x29, 0x0b, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go index 97f63331e..0358227fa 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go @@ -122,7 +122,7 @@ func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRoleBinding, len(*in)) @@ -155,7 +155,7 @@ func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRole, len(*in)) @@ -294,7 +294,7 @@ func (in *RoleBinding) DeepCopyObject() runtime.Object { func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RoleBinding, len(*in)) @@ -327,7 +327,7 @@ func (in *RoleBindingList) DeepCopyObject() runtime.Object { func (in *RoleList) DeepCopyInto(out *RoleList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Role, len(*in)) diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go index 904a6e7a2..6c80f52f9 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go @@ -17,38 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto - - It has these top-level messages: - AggregationRule - ClusterRole - ClusterRoleBinding - ClusterRoleBindingList - ClusterRoleList - PolicyRule - Role - RoleBinding - RoleBindingList - RoleList - RoleRef - Subject -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -61,53 +44,341 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AggregationRule) Reset() { *m = AggregationRule{} } -func (*AggregationRule) ProtoMessage() {} -func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{0} +} +func (m *AggregationRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AggregationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AggregationRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_AggregationRule.Merge(m, src) +} +func (m *AggregationRule) XXX_Size() int { + return m.Size() +} +func (m *AggregationRule) XXX_DiscardUnknown() { + xxx_messageInfo_AggregationRule.DiscardUnknown(m) +} + +var xxx_messageInfo_AggregationRule proto.InternalMessageInfo -func (m *ClusterRole) Reset() { *m = ClusterRole{} } -func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{1} +} +func (m *ClusterRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRole.Merge(m, src) +} +func (m *ClusterRole) XXX_Size() int { + return m.Size() +} +func (m *ClusterRole) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRole.DiscardUnknown(m) +} -func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } -func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_ClusterRole proto.InternalMessageInfo + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{2} +} +func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBinding.Merge(m, src) +} +func (m *ClusterRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{3} +} +func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBindingList.Merge(m, src) +} +func (m *ClusterRoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m) +} -func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } -func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo -func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } -func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{4} +} +func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleList.Merge(m, src) +} +func (m *ClusterRoleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleList.DiscardUnknown(m) +} -func (m *PolicyRule) Reset() { *m = PolicyRule{} } -func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo -func (m *Role) Reset() { *m = Role{} } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{5} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} -func (m *RoleBinding) Reset() { *m = RoleBinding{} } -func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo -func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } -func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{6} +} +func (m *Role) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Role) XXX_Merge(src proto.Message) { + xxx_messageInfo_Role.Merge(m, src) +} +func (m *Role) XXX_Size() int { + return m.Size() +} +func (m *Role) XXX_DiscardUnknown() { + xxx_messageInfo_Role.DiscardUnknown(m) +} -func (m *RoleList) Reset() { *m = RoleList{} } -func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_Role proto.InternalMessageInfo -func (m *RoleRef) Reset() { *m = RoleRef{} } -func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{7} +} +func (m *RoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBinding.Merge(m, src) +} +func (m *RoleBinding) XXX_Size() int { + return m.Size() +} +func (m *RoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBinding.DiscardUnknown(m) +} -func (m *Subject) Reset() { *m = Subject{} } -func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_RoleBinding proto.InternalMessageInfo + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{8} +} +func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingList.Merge(m, src) +} +func (m *RoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{9} +} +func (m *RoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleList.Merge(m, src) +} +func (m *RoleList) XXX_Size() int { + return m.Size() +} +func (m *RoleList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleList proto.InternalMessageInfo + +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{10} +} +func (m *RoleRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleRef.Merge(m, src) +} +func (m *RoleRef) XXX_Size() int { + return m.Size() +} +func (m *RoleRef) XXX_DiscardUnknown() { + xxx_messageInfo_RoleRef.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleRef proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{11} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo func init() { proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1beta1.AggregationRule") @@ -123,10 +394,70 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1beta1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1beta1.Subject") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto", fileDescriptor_99f6bec96facc83d) +} + +var fileDescriptor_99f6bec96facc83d = []byte{ + // 808 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbd, 0x6f, 0xfb, 0x44, + 0x18, 0xce, 0xa5, 0x89, 0x12, 0x5f, 0x88, 0xc2, 0xef, 0xa8, 0xc0, 0xaa, 0xc0, 0x89, 0x02, 0x43, + 0xa5, 0x52, 0x9b, 0x16, 0x04, 0x2c, 0x48, 0xd4, 0x0c, 0x50, 0xb5, 0x84, 0xea, 0x2a, 0x18, 0x10, + 0x03, 0x67, 0xe7, 0xea, 0x1e, 0xf1, 0x97, 0xee, 0xec, 0x48, 0x15, 0x0b, 0x0b, 0x1b, 0x03, 0x12, + 0x13, 0x2b, 0x33, 0x13, 0x23, 0x7f, 0x41, 0xc6, 0x8e, 0x9d, 0x22, 0x6a, 0xfe, 0x10, 0xd0, 0xf9, + 0x23, 0xce, 0x67, 0x9b, 0x29, 0x12, 0x12, 0x53, 0x7b, 0xef, 0xfb, 0xbc, 0xcf, 0xfb, 0xbc, 0x8f, + 0xef, 0xde, 0xc0, 0x8f, 0x47, 0x1f, 0x0a, 0x9d, 0x05, 0xc6, 0x28, 0xb6, 0x28, 0xf7, 0x69, 0x44, + 0x85, 0x31, 0xa6, 0xfe, 0x30, 0xe0, 0x46, 0x9e, 0x20, 0x21, 0x33, 0xb8, 0x45, 0x6c, 0x63, 0x7c, + 0x62, 0xd1, 0x88, 0x9c, 0x18, 0x0e, 0xf5, 0x29, 0x27, 0x11, 0x1d, 0xea, 0x21, 0x0f, 0xa2, 0x00, + 0xbd, 0x96, 0x01, 0x75, 0x12, 0x32, 0x5d, 0x02, 0xf5, 0x1c, 0x78, 0x70, 0xec, 0xb0, 0xe8, 0x36, + 0xb6, 0x74, 0x3b, 0xf0, 0x0c, 0x27, 0x70, 0x02, 0x23, 0xc5, 0x5b, 0xf1, 0x4d, 0x7a, 0x4a, 0x0f, + 0xe9, 0x7f, 0x19, 0xcf, 0xc1, 0x7b, 0x65, 0x43, 0x8f, 0xd8, 0xb7, 0xcc, 0xa7, 0xfc, 0xce, 0x08, + 0x47, 0x8e, 0x0c, 0x08, 0xc3, 0xa3, 0x11, 0x31, 0xc6, 0x2b, 0xdd, 0x0f, 0x8c, 0x4d, 0x55, 0x3c, + 0xf6, 0x23, 0xe6, 0xd1, 0x95, 0x82, 0xf7, 0x9f, 0x2b, 0x10, 0xf6, 0x2d, 0xf5, 0xc8, 0x72, 0x5d, + 0xff, 0x57, 0x00, 0x3b, 0x67, 0x8e, 0xc3, 0xa9, 0x43, 0x22, 0x16, 0xf8, 0x38, 0x76, 0x29, 0xfa, + 0x11, 0xc0, 0x7d, 0xdb, 0x8d, 0x45, 0x44, 0x39, 0x0e, 0x5c, 0x7a, 0x4d, 0x5d, 0x6a, 0x47, 0x01, + 0x17, 0x2a, 0xe8, 0xed, 0x1d, 0xb6, 0x4e, 0xdf, 0xd5, 0x4b, 0x6b, 0x66, 0xbd, 0xf4, 0x70, 0xe4, + 0xc8, 0x80, 0xd0, 0xe5, 0x48, 0xfa, 0xf8, 0x44, 0xbf, 0x24, 0x16, 0x75, 0x8b, 0x5a, 0xf3, 0xf5, + 0xc9, 0xb4, 0x5b, 0x49, 0xa6, 0xdd, 0xfd, 0x4f, 0xd6, 0x10, 0xe3, 0xb5, 0xed, 0xfa, 0xbf, 0x55, + 0x61, 0x6b, 0x0e, 0x8e, 0xbe, 0x85, 0x4d, 0x49, 0x3e, 0x24, 0x11, 0x51, 0x41, 0x0f, 0x1c, 0xb6, + 0x4e, 0xdf, 0xd9, 0x4e, 0xca, 0x17, 0xd6, 0x77, 0xd4, 0x8e, 0x3e, 0xa7, 0x11, 0x31, 0x51, 0xae, + 0x03, 0x96, 0x31, 0x3c, 0x63, 0x45, 0x9f, 0xc1, 0x3a, 0x8f, 0x5d, 0x2a, 0xd4, 0x6a, 0x3a, 0xe9, + 0x9b, 0xfa, 0x86, 0x4b, 0xa0, 0x5f, 0x05, 0x2e, 0xb3, 0xef, 0xa4, 0x5b, 0x66, 0x3b, 0x67, 0xac, + 0xcb, 0x93, 0xc0, 0x19, 0x01, 0x72, 0x60, 0x87, 0x2c, 0xda, 0xaa, 0xee, 0xa5, 0x92, 0x0f, 0x37, + 0x72, 0x2e, 0x7d, 0x06, 0xf3, 0x95, 0x64, 0xda, 0x5d, 0xfe, 0x36, 0x78, 0x99, 0xb5, 0xff, 0x4b, + 0x15, 0xa2, 0x39, 0x93, 0x4c, 0xe6, 0x0f, 0x99, 0xef, 0xec, 0xc0, 0xab, 0x01, 0x6c, 0x8a, 0x38, + 0x4d, 0x14, 0x76, 0xf5, 0x36, 0x8e, 0x76, 0x9d, 0x01, 0xcd, 0x97, 0x73, 0xc6, 0x66, 0x1e, 0x10, + 0x78, 0xc6, 0x81, 0x2e, 0x60, 0x83, 0x07, 0x2e, 0xc5, 0xf4, 0x26, 0x77, 0x6a, 0x33, 0x1d, 0xce, + 0x70, 0x66, 0x27, 0xa7, 0x6b, 0xe4, 0x01, 0x5c, 0x30, 0xf4, 0x27, 0x00, 0xbe, 0xba, 0xea, 0xca, + 0x25, 0x13, 0x11, 0xfa, 0x66, 0xc5, 0x19, 0x7d, 0xcb, 0x0b, 0xcd, 0x44, 0xe6, 0xcb, 0x6c, 0x8a, + 0x22, 0x32, 0xe7, 0xca, 0x15, 0xac, 0xb3, 0x88, 0x7a, 0x85, 0x25, 0x47, 0x1b, 0x67, 0x58, 0x55, + 0x57, 0xde, 0xa4, 0x73, 0xc9, 0x80, 0x33, 0xa2, 0xfe, 0x9f, 0x00, 0x76, 0xe6, 0xc0, 0x3b, 0x98, + 0xe1, 0x7c, 0x71, 0x86, 0xb7, 0xb6, 0x9a, 0x61, 0xbd, 0xf8, 0x7f, 0x00, 0x84, 0xe5, 0x5b, 0x41, + 0x5d, 0x58, 0x1f, 0x53, 0x6e, 0x65, 0x9b, 0x44, 0x31, 0x15, 0x89, 0xff, 0x4a, 0x06, 0x70, 0x16, + 0x47, 0x47, 0x50, 0x21, 0x21, 0xfb, 0x94, 0x07, 0x71, 0x98, 0xb5, 0x57, 0xcc, 0x76, 0x32, 0xed, + 0x2a, 0x67, 0x57, 0xe7, 0x59, 0x10, 0x97, 0x79, 0x09, 0xe6, 0x54, 0x04, 0x31, 0xb7, 0xa9, 0x50, + 0xf7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x0f, 0x60, 0xbb, 0x38, 0x0c, 0x88, 0x47, 0x85, + 0x5a, 0x4b, 0x0b, 0x5e, 0x24, 0xd3, 0x6e, 0x1b, 0xcf, 0x27, 0xf0, 0x22, 0x0e, 0x7d, 0x04, 0x3b, + 0x7e, 0xe0, 0x17, 0x90, 0x2f, 0xf1, 0xa5, 0x50, 0xeb, 0x69, 0x69, 0xfa, 0x3e, 0x07, 0x8b, 0x29, + 0xbc, 0x8c, 0xed, 0xff, 0x01, 0x60, 0xed, 0xbf, 0xb6, 0xbd, 0xfa, 0x3f, 0x55, 0x61, 0xeb, 0xff, + 0x6d, 0x32, 0xdb, 0x26, 0xf2, 0x09, 0xee, 0x76, 0x8d, 0x6c, 0xfd, 0x04, 0x9f, 0xdf, 0x1f, 0xbf, + 0x03, 0xd8, 0xdc, 0xd1, 0xe2, 0x30, 0x17, 0x55, 0xbf, 0xf1, 0xb4, 0xea, 0xf5, 0x72, 0xbf, 0x87, + 0x85, 0xff, 0xe8, 0x6d, 0xd8, 0x2c, 0x1e, 0x7b, 0x2a, 0x56, 0x29, 0x9b, 0x17, 0xfb, 0x00, 0xcf, + 0x10, 0xa8, 0x07, 0x6b, 0x23, 0xe6, 0x0f, 0xd5, 0x6a, 0x8a, 0x7c, 0x29, 0x47, 0xd6, 0x2e, 0x98, + 0x3f, 0xc4, 0x69, 0x46, 0x22, 0x7c, 0xe2, 0x65, 0x3f, 0xc4, 0x73, 0x08, 0xf9, 0xcc, 0x71, 0x9a, + 0x91, 0x5e, 0x35, 0xf2, 0xcb, 0x34, 0xe3, 0x03, 0x1b, 0xf9, 0xe6, 0xf5, 0x55, 0xb7, 0xd1, 0xf7, + 0x74, 0x77, 0x64, 0x40, 0x45, 0xfe, 0x15, 0x21, 0xb1, 0xa9, 0x5a, 0x4b, 0x61, 0x2f, 0x72, 0x98, + 0x32, 0x28, 0x12, 0xb8, 0xc4, 0x98, 0xc7, 0x93, 0x47, 0xad, 0x72, 0xff, 0xa8, 0x55, 0x1e, 0x1e, + 0xb5, 0xca, 0x0f, 0x89, 0x06, 0x26, 0x89, 0x06, 0xee, 0x13, 0x0d, 0x3c, 0x24, 0x1a, 0xf8, 0x2b, + 0xd1, 0xc0, 0xcf, 0x7f, 0x6b, 0x95, 0xaf, 0x1b, 0xb9, 0xeb, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, + 0x37, 0x8f, 0x77, 0xcd, 0x15, 0x0b, 0x00, 0x00, +} + func (m *AggregationRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -134,29 +465,36 @@ func (m *AggregationRule) Marshal() (dAtA []byte, err error) { } func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AggregationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.ClusterRoleSelectors) > 0 { - for _, msg := range m.ClusterRoleSelectors { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.ClusterRoleSelectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClusterRoleSelectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -164,47 +502,58 @@ func (m *ClusterRole) Marshal() (dAtA []byte, err error) { } func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.AggregationRule != nil { + { + size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.AggregationRule != nil { + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) - n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n2 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -212,45 +561,56 @@ func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n4, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -258,37 +618,46 @@ func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -296,37 +665,46 @@ func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PolicyRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -334,92 +712,67 @@ func (m *PolicyRule) Marshal() (dAtA []byte, err error) { } func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x2a } } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Resources) > 0 { - for _, s := range m.Resources { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *Role) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -427,37 +780,46 @@ func (m *Role) Marshal() (dAtA []byte, err error) { } func (m *Role) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -465,45 +827,56 @@ func (m *RoleBinding) Marshal() (dAtA []byte, err error) { } func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n9, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -511,37 +884,46 @@ func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -549,37 +931,46 @@ func (m *RoleList) Marshal() (dAtA []byte, err error) { } func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleRef) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -587,29 +978,37 @@ func (m *RoleRef) Marshal() (dAtA []byte, err error) { } func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x1a - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Subject) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -617,39 +1016,53 @@ func (m *Subject) Marshal() (dAtA []byte, err error) { } func (m *Subject) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AggregationRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.ClusterRoleSelectors) > 0 { @@ -662,6 +1075,9 @@ func (m *AggregationRule) Size() (n int) { } func (m *ClusterRole) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -680,6 +1096,9 @@ func (m *ClusterRole) Size() (n int) { } func (m *ClusterRoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -696,6 +1115,9 @@ func (m *ClusterRoleBinding) Size() (n int) { } func (m *ClusterRoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -710,6 +1132,9 @@ func (m *ClusterRoleBindingList) Size() (n int) { } func (m *ClusterRoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -724,6 +1149,9 @@ func (m *ClusterRoleList) Size() (n int) { } func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -760,6 +1188,9 @@ func (m *PolicyRule) Size() (n int) { } func (m *Role) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -774,6 +1205,9 @@ func (m *Role) Size() (n int) { } func (m *RoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -790,6 +1224,9 @@ func (m *RoleBinding) Size() (n int) { } func (m *RoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -804,6 +1241,9 @@ func (m *RoleBindingList) Size() (n int) { } func (m *RoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -818,6 +1258,9 @@ func (m *RoleList) Size() (n int) { } func (m *RoleRef) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.APIGroup) @@ -830,6 +1273,9 @@ func (m *RoleRef) Size() (n int) { } func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -844,14 +1290,7 @@ func (m *Subject) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -860,8 +1299,13 @@ func (this *AggregationRule) String() string { if this == nil { return "nil" } + repeatedStringForClusterRoleSelectors := "[]LabelSelector{" + for _, f := range this.ClusterRoleSelectors { + repeatedStringForClusterRoleSelectors += fmt.Sprintf("%v", f) + "," + } + repeatedStringForClusterRoleSelectors += "}" s := strings.Join([]string{`&AggregationRule{`, - `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `ClusterRoleSelectors:` + repeatedStringForClusterRoleSelectors + `,`, `}`, }, "") return s @@ -870,10 +1314,15 @@ func (this *ClusterRole) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&ClusterRole{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, - `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `AggregationRule:` + strings.Replace(this.AggregationRule.String(), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -882,9 +1331,14 @@ func (this *ClusterRoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&ClusterRoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -894,9 +1348,14 @@ func (this *ClusterRoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -905,9 +1364,14 @@ func (this *ClusterRoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRole{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -930,9 +1394,14 @@ func (this *Role) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&Role{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, `}`, }, "") return s @@ -941,9 +1410,14 @@ func (this *RoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&RoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -953,9 +1427,14 @@ func (this *RoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -964,9 +1443,14 @@ func (this *RoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Role{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1019,7 +1503,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1047,7 +1531,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1056,10 +1540,13 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, v1.LabelSelector{}) if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1073,6 +1560,9 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1100,7 +1590,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1128,7 +1618,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1137,6 +1627,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1158,7 +1651,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1167,6 +1660,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1189,7 +1685,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1198,6 +1694,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1217,6 +1716,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1244,7 +1746,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1272,7 +1774,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1281,6 +1783,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1302,7 +1807,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1311,6 +1816,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1333,7 +1841,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1342,6 +1850,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1358,6 +1869,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1385,7 +1899,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1413,7 +1927,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1422,6 +1936,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1443,7 +1960,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1452,6 +1969,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1469,6 +1989,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1496,7 +2019,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1524,7 +2047,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1533,6 +2056,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1554,7 +2080,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1563,6 +2089,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1580,6 +2109,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1607,7 +2139,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1635,7 +2167,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1645,6 +2177,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1664,7 +2199,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1674,6 +2209,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1693,7 +2231,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1703,6 +2241,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1722,7 +2263,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1732,6 +2273,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1751,7 +2295,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1761,6 +2305,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1775,6 +2322,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1802,7 +2352,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1830,7 +2380,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1839,6 +2389,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1860,7 +2413,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1869,6 +2422,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1886,6 +2442,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1913,7 +2472,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1941,7 +2500,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1950,6 +2509,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1971,7 +2533,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1980,6 +2542,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2002,7 +2567,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2011,6 +2576,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2027,6 +2595,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2054,7 +2625,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2082,7 +2653,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2091,6 +2662,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2112,7 +2686,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2121,6 +2695,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2138,6 +2715,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2165,7 +2745,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2193,7 +2773,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2202,6 +2782,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2223,7 +2806,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2232,6 +2815,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2249,6 +2835,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2276,7 +2865,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2304,7 +2893,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2314,6 +2903,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2333,7 +2925,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2343,6 +2935,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2362,7 +2957,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2372,6 +2967,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2386,6 +2984,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2413,7 +3014,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2441,7 +3042,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2451,6 +3052,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2470,7 +3074,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2480,6 +3084,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2499,7 +3106,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2509,6 +3116,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2528,7 +3138,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2538,6 +3148,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2552,6 +3165,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2618,10 +3234,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2650,6 +3269,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2668,62 +3290,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 808 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbd, 0x6f, 0xfb, 0x44, - 0x18, 0xce, 0xa5, 0x89, 0x12, 0x5f, 0x88, 0xc2, 0xef, 0xa8, 0xc0, 0xaa, 0xc0, 0x89, 0x02, 0x43, - 0xa5, 0x52, 0x9b, 0x16, 0x04, 0x2c, 0x48, 0xd4, 0x0c, 0x50, 0xb5, 0x84, 0xea, 0x2a, 0x18, 0x10, - 0x03, 0x67, 0xe7, 0xea, 0x1e, 0xf1, 0x97, 0xee, 0xec, 0x48, 0x15, 0x0b, 0x0b, 0x1b, 0x03, 0x12, - 0x13, 0x2b, 0x33, 0x13, 0x23, 0x7f, 0x41, 0xc6, 0x8e, 0x9d, 0x22, 0x6a, 0xfe, 0x10, 0xd0, 0xf9, - 0x23, 0xce, 0x67, 0x9b, 0x29, 0x12, 0x12, 0x53, 0x7b, 0xef, 0xfb, 0xbc, 0xcf, 0xfb, 0xbc, 0x8f, - 0xef, 0xde, 0xc0, 0x8f, 0x47, 0x1f, 0x0a, 0x9d, 0x05, 0xc6, 0x28, 0xb6, 0x28, 0xf7, 0x69, 0x44, - 0x85, 0x31, 0xa6, 0xfe, 0x30, 0xe0, 0x46, 0x9e, 0x20, 0x21, 0x33, 0xb8, 0x45, 0x6c, 0x63, 0x7c, - 0x62, 0xd1, 0x88, 0x9c, 0x18, 0x0e, 0xf5, 0x29, 0x27, 0x11, 0x1d, 0xea, 0x21, 0x0f, 0xa2, 0x00, - 0xbd, 0x96, 0x01, 0x75, 0x12, 0x32, 0x5d, 0x02, 0xf5, 0x1c, 0x78, 0x70, 0xec, 0xb0, 0xe8, 0x36, - 0xb6, 0x74, 0x3b, 0xf0, 0x0c, 0x27, 0x70, 0x02, 0x23, 0xc5, 0x5b, 0xf1, 0x4d, 0x7a, 0x4a, 0x0f, - 0xe9, 0x7f, 0x19, 0xcf, 0xc1, 0x7b, 0x65, 0x43, 0x8f, 0xd8, 0xb7, 0xcc, 0xa7, 0xfc, 0xce, 0x08, - 0x47, 0x8e, 0x0c, 0x08, 0xc3, 0xa3, 0x11, 0x31, 0xc6, 0x2b, 0xdd, 0x0f, 0x8c, 0x4d, 0x55, 0x3c, - 0xf6, 0x23, 0xe6, 0xd1, 0x95, 0x82, 0xf7, 0x9f, 0x2b, 0x10, 0xf6, 0x2d, 0xf5, 0xc8, 0x72, 0x5d, - 0xff, 0x57, 0x00, 0x3b, 0x67, 0x8e, 0xc3, 0xa9, 0x43, 0x22, 0x16, 0xf8, 0x38, 0x76, 0x29, 0xfa, - 0x11, 0xc0, 0x7d, 0xdb, 0x8d, 0x45, 0x44, 0x39, 0x0e, 0x5c, 0x7a, 0x4d, 0x5d, 0x6a, 0x47, 0x01, - 0x17, 0x2a, 0xe8, 0xed, 0x1d, 0xb6, 0x4e, 0xdf, 0xd5, 0x4b, 0x6b, 0x66, 0xbd, 0xf4, 0x70, 0xe4, - 0xc8, 0x80, 0xd0, 0xe5, 0x48, 0xfa, 0xf8, 0x44, 0xbf, 0x24, 0x16, 0x75, 0x8b, 0x5a, 0xf3, 0xf5, - 0xc9, 0xb4, 0x5b, 0x49, 0xa6, 0xdd, 0xfd, 0x4f, 0xd6, 0x10, 0xe3, 0xb5, 0xed, 0xfa, 0xbf, 0x55, - 0x61, 0x6b, 0x0e, 0x8e, 0xbe, 0x85, 0x4d, 0x49, 0x3e, 0x24, 0x11, 0x51, 0x41, 0x0f, 0x1c, 0xb6, - 0x4e, 0xdf, 0xd9, 0x4e, 0xca, 0x17, 0xd6, 0x77, 0xd4, 0x8e, 0x3e, 0xa7, 0x11, 0x31, 0x51, 0xae, - 0x03, 0x96, 0x31, 0x3c, 0x63, 0x45, 0x9f, 0xc1, 0x3a, 0x8f, 0x5d, 0x2a, 0xd4, 0x6a, 0x3a, 0xe9, - 0x9b, 0xfa, 0x86, 0x4b, 0xa0, 0x5f, 0x05, 0x2e, 0xb3, 0xef, 0xa4, 0x5b, 0x66, 0x3b, 0x67, 0xac, - 0xcb, 0x93, 0xc0, 0x19, 0x01, 0x72, 0x60, 0x87, 0x2c, 0xda, 0xaa, 0xee, 0xa5, 0x92, 0x0f, 0x37, - 0x72, 0x2e, 0x7d, 0x06, 0xf3, 0x95, 0x64, 0xda, 0x5d, 0xfe, 0x36, 0x78, 0x99, 0xb5, 0xff, 0x4b, - 0x15, 0xa2, 0x39, 0x93, 0x4c, 0xe6, 0x0f, 0x99, 0xef, 0xec, 0xc0, 0xab, 0x01, 0x6c, 0x8a, 0x38, - 0x4d, 0x14, 0x76, 0xf5, 0x36, 0x8e, 0x76, 0x9d, 0x01, 0xcd, 0x97, 0x73, 0xc6, 0x66, 0x1e, 0x10, - 0x78, 0xc6, 0x81, 0x2e, 0x60, 0x83, 0x07, 0x2e, 0xc5, 0xf4, 0x26, 0x77, 0x6a, 0x33, 0x1d, 0xce, - 0x70, 0x66, 0x27, 0xa7, 0x6b, 0xe4, 0x01, 0x5c, 0x30, 0xf4, 0x27, 0x00, 0xbe, 0xba, 0xea, 0xca, - 0x25, 0x13, 0x11, 0xfa, 0x66, 0xc5, 0x19, 0x7d, 0xcb, 0x0b, 0xcd, 0x44, 0xe6, 0xcb, 0x6c, 0x8a, - 0x22, 0x32, 0xe7, 0xca, 0x15, 0xac, 0xb3, 0x88, 0x7a, 0x85, 0x25, 0x47, 0x1b, 0x67, 0x58, 0x55, - 0x57, 0xde, 0xa4, 0x73, 0xc9, 0x80, 0x33, 0xa2, 0xfe, 0x9f, 0x00, 0x76, 0xe6, 0xc0, 0x3b, 0x98, - 0xe1, 0x7c, 0x71, 0x86, 0xb7, 0xb6, 0x9a, 0x61, 0xbd, 0xf8, 0x7f, 0x00, 0x84, 0xe5, 0x5b, 0x41, - 0x5d, 0x58, 0x1f, 0x53, 0x6e, 0x65, 0x9b, 0x44, 0x31, 0x15, 0x89, 0xff, 0x4a, 0x06, 0x70, 0x16, - 0x47, 0x47, 0x50, 0x21, 0x21, 0xfb, 0x94, 0x07, 0x71, 0x98, 0xb5, 0x57, 0xcc, 0x76, 0x32, 0xed, - 0x2a, 0x67, 0x57, 0xe7, 0x59, 0x10, 0x97, 0x79, 0x09, 0xe6, 0x54, 0x04, 0x31, 0xb7, 0xa9, 0x50, - 0xf7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x0f, 0x60, 0xbb, 0x38, 0x0c, 0x88, 0x47, 0x85, - 0x5a, 0x4b, 0x0b, 0x5e, 0x24, 0xd3, 0x6e, 0x1b, 0xcf, 0x27, 0xf0, 0x22, 0x0e, 0x7d, 0x04, 0x3b, - 0x7e, 0xe0, 0x17, 0x90, 0x2f, 0xf1, 0xa5, 0x50, 0xeb, 0x69, 0x69, 0xfa, 0x3e, 0x07, 0x8b, 0x29, - 0xbc, 0x8c, 0xed, 0xff, 0x01, 0x60, 0xed, 0xbf, 0xb6, 0xbd, 0xfa, 0x3f, 0x55, 0x61, 0xeb, 0xff, - 0x6d, 0x32, 0xdb, 0x26, 0xf2, 0x09, 0xee, 0x76, 0x8d, 0x6c, 0xfd, 0x04, 0x9f, 0xdf, 0x1f, 0xbf, - 0x03, 0xd8, 0xdc, 0xd1, 0xe2, 0x30, 0x17, 0x55, 0xbf, 0xf1, 0xb4, 0xea, 0xf5, 0x72, 0xbf, 0x87, - 0x85, 0xff, 0xe8, 0x6d, 0xd8, 0x2c, 0x1e, 0x7b, 0x2a, 0x56, 0x29, 0x9b, 0x17, 0xfb, 0x00, 0xcf, - 0x10, 0xa8, 0x07, 0x6b, 0x23, 0xe6, 0x0f, 0xd5, 0x6a, 0x8a, 0x7c, 0x29, 0x47, 0xd6, 0x2e, 0x98, - 0x3f, 0xc4, 0x69, 0x46, 0x22, 0x7c, 0xe2, 0x65, 0x3f, 0xc4, 0x73, 0x08, 0xf9, 0xcc, 0x71, 0x9a, - 0x91, 0x5e, 0x35, 0xf2, 0xcb, 0x34, 0xe3, 0x03, 0x1b, 0xf9, 0xe6, 0xf5, 0x55, 0xb7, 0xd1, 0xf7, - 0x74, 0x77, 0x64, 0x40, 0x45, 0xfe, 0x15, 0x21, 0xb1, 0xa9, 0x5a, 0x4b, 0x61, 0x2f, 0x72, 0x98, - 0x32, 0x28, 0x12, 0xb8, 0xc4, 0x98, 0xc7, 0x93, 0x47, 0xad, 0x72, 0xff, 0xa8, 0x55, 0x1e, 0x1e, - 0xb5, 0xca, 0x0f, 0x89, 0x06, 0x26, 0x89, 0x06, 0xee, 0x13, 0x0d, 0x3c, 0x24, 0x1a, 0xf8, 0x2b, - 0xd1, 0xc0, 0xcf, 0x7f, 0x6b, 0x95, 0xaf, 0x1b, 0xb9, 0xeb, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, - 0x37, 0x8f, 0x77, 0xcd, 0x15, 0x0b, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go index c085c90b1..7ffe58106 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go @@ -122,7 +122,7 @@ func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRoleBinding, len(*in)) @@ -155,7 +155,7 @@ func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRole, len(*in)) @@ -294,7 +294,7 @@ func (in *RoleBinding) DeepCopyObject() runtime.Object { func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RoleBinding, len(*in)) @@ -327,7 +327,7 @@ func (in *RoleBindingList) DeepCopyObject() runtime.Object { func (in *RoleList) DeepCopyInto(out *RoleList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Role, len(*in)) diff --git a/vendor/k8s.io/api/scheduling/v1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1/generated.pb.go index 5adf978ef..7e9764f12 100644 --- a/vendor/k8s.io/api/scheduling/v1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1/generated.pb.go @@ -17,26 +17,22 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. +package v1 - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto +import ( + fmt "fmt" - It has these top-level messages: - PriorityClass - PriorityClassList -*/ -package v1 + io "io" -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import reflect "reflect" + k8s_io_api_core_v1 "k8s.io/api/core/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -49,22 +45,110 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *PriorityClass) Reset() { *m = PriorityClass{} } -func (*PriorityClass) ProtoMessage() {} -func (*PriorityClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *PriorityClass) Reset() { *m = PriorityClass{} } +func (*PriorityClass) ProtoMessage() {} +func (*PriorityClass) Descriptor() ([]byte, []int) { + return fileDescriptor_277b2f43b72fffd5, []int{0} +} +func (m *PriorityClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClass.Merge(m, src) +} +func (m *PriorityClass) XXX_Size() int { + return m.Size() +} +func (m *PriorityClass) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClass.DiscardUnknown(m) +} -func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } -func (*PriorityClassList) ProtoMessage() {} -func (*PriorityClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_PriorityClass proto.InternalMessageInfo + +func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } +func (*PriorityClassList) ProtoMessage() {} +func (*PriorityClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_277b2f43b72fffd5, []int{1} +} +func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClassList.Merge(m, src) +} +func (m *PriorityClassList) XXX_Size() int { + return m.Size() +} +func (m *PriorityClassList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityClassList proto.InternalMessageInfo func init() { proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1.PriorityClass") proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1.PriorityClassList") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto", fileDescriptor_277b2f43b72fffd5) +} + +var fileDescriptor_277b2f43b72fffd5 = []byte{ + // 488 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3f, 0x8f, 0xd3, 0x30, + 0x18, 0xc6, 0xeb, 0x1e, 0x95, 0x0e, 0x57, 0x95, 0x4a, 0x10, 0x52, 0xd4, 0x21, 0xad, 0x7a, 0x03, + 0x59, 0xb0, 0xe9, 0x09, 0x10, 0xd2, 0x4d, 0x84, 0x93, 0x10, 0xd2, 0x21, 0xaa, 0x0c, 0x0c, 0x88, + 0x01, 0x27, 0x79, 0x2f, 0x35, 0x4d, 0xe2, 0xc8, 0x76, 0x22, 0x75, 0xe3, 0x23, 0xf0, 0x8d, 0x58, + 0x3b, 0xde, 0x78, 0x53, 0x45, 0xc3, 0x47, 0x60, 0x63, 0x42, 0x49, 0xc3, 0xa5, 0x7f, 0xee, 0x04, + 0x5b, 0xfc, 0x3e, 0xcf, 0xef, 0xb1, 0xfd, 0x24, 0xc1, 0xaf, 0xe6, 0x2f, 0x15, 0xe1, 0x82, 0xce, + 0x33, 0x0f, 0x64, 0x02, 0x1a, 0x14, 0xcd, 0x21, 0x09, 0x84, 0xa4, 0xb5, 0xc0, 0x52, 0x4e, 0x95, + 0x3f, 0x83, 0x20, 0x8b, 0x78, 0x12, 0xd2, 0x7c, 0x42, 0x43, 0x48, 0x40, 0x32, 0x0d, 0x01, 0x49, + 0xa5, 0xd0, 0xc2, 0x30, 0x37, 0x4e, 0xc2, 0x52, 0x4e, 0x1a, 0x27, 0xc9, 0x27, 0x83, 0x27, 0x21, + 0xd7, 0xb3, 0xcc, 0x23, 0xbe, 0x88, 0x69, 0x28, 0x42, 0x41, 0x2b, 0xc0, 0xcb, 0x2e, 0xab, 0x55, + 0xb5, 0xa8, 0x9e, 0x36, 0x41, 0x83, 0xf1, 0xd6, 0x96, 0xbe, 0x90, 0x70, 0xcb, 0x66, 0x83, 0x67, + 0x8d, 0x27, 0x66, 0xfe, 0x8c, 0x27, 0x20, 0x17, 0x34, 0x9d, 0x87, 0xe5, 0x40, 0xd1, 0x18, 0x34, + 0xbb, 0x8d, 0xa2, 0x77, 0x51, 0x32, 0x4b, 0x34, 0x8f, 0xe1, 0x00, 0x78, 0xf1, 0x2f, 0xa0, 0xbc, + 0x68, 0xcc, 0xf6, 0xb9, 0xf1, 0xaf, 0x36, 0xee, 0x4d, 0x25, 0x17, 0x92, 0xeb, 0xc5, 0xeb, 0x88, + 0x29, 0x65, 0x7c, 0xc6, 0xc7, 0xe5, 0xa9, 0x02, 0xa6, 0x99, 0x89, 0x46, 0xc8, 0xee, 0x9e, 0x3e, + 0x25, 0x4d, 0x61, 0x37, 0xe1, 0x24, 0x9d, 0x87, 0xe5, 0x40, 0x91, 0xd2, 0x4d, 0xf2, 0x09, 0x79, + 0xef, 0x7d, 0x01, 0x5f, 0xbf, 0x03, 0xcd, 0x1c, 0x63, 0xb9, 0x1a, 0xb6, 0x8a, 0xd5, 0x10, 0x37, + 0x33, 0xf7, 0x26, 0xd5, 0x38, 0xc1, 0x9d, 0x9c, 0x45, 0x19, 0x98, 0xed, 0x11, 0xb2, 0x3b, 0x4e, + 0xaf, 0x36, 0x77, 0x3e, 0x94, 0x43, 0x77, 0xa3, 0x19, 0x67, 0xb8, 0x17, 0x46, 0xc2, 0x63, 0xd1, + 0x39, 0x5c, 0xb2, 0x2c, 0xd2, 0xe6, 0xd1, 0x08, 0xd9, 0xc7, 0xce, 0xa3, 0xda, 0xdc, 0x7b, 0xb3, + 0x2d, 0xba, 0xbb, 0x5e, 0xe3, 0x39, 0xee, 0x06, 0xa0, 0x7c, 0xc9, 0x53, 0xcd, 0x45, 0x62, 0xde, + 0x1b, 0x21, 0xfb, 0xbe, 0xf3, 0xb0, 0x46, 0xbb, 0xe7, 0x8d, 0xe4, 0x6e, 0xfb, 0x8c, 0x10, 0xf7, + 0x53, 0x09, 0x10, 0x57, 0xab, 0xa9, 0x88, 0xb8, 0xbf, 0x30, 0x3b, 0x15, 0x7b, 0x56, 0xac, 0x86, + 0xfd, 0xe9, 0x9e, 0xf6, 0x7b, 0x35, 0x3c, 0x39, 0xfc, 0x02, 0xc8, 0xbe, 0xcd, 0x3d, 0x08, 0x1d, + 0x7f, 0x47, 0xf8, 0xc1, 0x4e, 0xeb, 0x17, 0x5c, 0x69, 0xe3, 0xd3, 0x41, 0xf3, 0xe4, 0xff, 0x9a, + 0x2f, 0xe9, 0xaa, 0xf7, 0x7e, 0x7d, 0xc5, 0xe3, 0xbf, 0x93, 0xad, 0xd6, 0x2f, 0x70, 0x87, 0x6b, + 0x88, 0x95, 0xd9, 0x1e, 0x1d, 0xd9, 0xdd, 0xd3, 0xc7, 0xe4, 0xae, 0xbf, 0x80, 0xec, 0x9c, 0xac, + 0x79, 0x3d, 0x6f, 0x4b, 0xda, 0xdd, 0x84, 0x38, 0xf6, 0x72, 0x6d, 0xb5, 0xae, 0xd6, 0x56, 0xeb, + 0x7a, 0x6d, 0xb5, 0xbe, 0x16, 0x16, 0x5a, 0x16, 0x16, 0xba, 0x2a, 0x2c, 0x74, 0x5d, 0x58, 0xe8, + 0x47, 0x61, 0xa1, 0x6f, 0x3f, 0xad, 0xd6, 0xc7, 0x76, 0x3e, 0xf9, 0x13, 0x00, 0x00, 0xff, 0xff, + 0x53, 0xd9, 0x28, 0x30, 0xb1, 0x03, 0x00, 0x00, +} + func (m *PriorityClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -72,40 +156,55 @@ func (m *PriorityClass) Marshal() (dAtA []byte, err error) { } func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PreemptionPolicy != nil { + i -= len(*m.PreemptionPolicy) + copy(dAtA[i:], *m.PreemptionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) + i-- + dAtA[i] = 0x2a } - i += n1 - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) - dAtA[i] = 0x18 - i++ + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i-- if m.GlobalDefault { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - return i, nil + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -113,43 +212,57 @@ func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { } func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *PriorityClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -158,10 +271,17 @@ func (m *PriorityClass) Size() (n int) { n += 2 l = len(m.Description) n += 1 + l + sovGenerated(uint64(l)) + if m.PreemptionPolicy != nil { + l = len(*m.PreemptionPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *PriorityClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -176,14 +296,7 @@ func (m *PriorityClassList) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -193,10 +306,11 @@ func (this *PriorityClass) String() string { return "nil" } s := strings.Join([]string{`&PriorityClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, `}`, }, "") return s @@ -205,9 +319,14 @@ func (this *PriorityClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PriorityClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PriorityClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -235,7 +354,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -263,7 +382,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -272,6 +391,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -293,7 +415,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Value |= (int32(b) & 0x7F) << shift + m.Value |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -312,7 +434,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -332,7 +454,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -342,11 +464,47 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Description = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.PreemptionPolicy(dAtA[iNdEx:postIndex]) + m.PreemptionPolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -356,6 +514,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -383,7 +544,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -411,7 +572,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -420,6 +581,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -441,7 +605,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -450,6 +614,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -467,6 +634,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -533,10 +703,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -565,6 +738,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -583,39 +759,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 442 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x3f, 0x8b, 0xd4, 0x40, - 0x18, 0xc6, 0x33, 0x7b, 0x2e, 0xac, 0xb3, 0x2c, 0x68, 0x44, 0x08, 0x5b, 0xcc, 0x85, 0xb3, 0x30, - 0x8d, 0x33, 0xee, 0xa1, 0x22, 0x58, 0x19, 0x0f, 0x44, 0x38, 0x51, 0x52, 0x58, 0x88, 0x85, 0x93, - 0xe4, 0xbd, 0xec, 0xb8, 0x49, 0x26, 0xcc, 0x4c, 0x02, 0xd7, 0x59, 0x5b, 0xf9, 0x8d, 0x6c, 0xb7, - 0xbc, 0xf2, 0xaa, 0xc3, 0x8d, 0x5f, 0x44, 0xf2, 0xc7, 0xcb, 0xae, 0xe7, 0xe1, 0x75, 0x99, 0xe7, - 0x7d, 0x7e, 0xcf, 0x3b, 0x79, 0x18, 0xfc, 0x72, 0xf5, 0x5c, 0x53, 0x21, 0xd9, 0xaa, 0x0c, 0x41, - 0xe5, 0x60, 0x40, 0xb3, 0x0a, 0xf2, 0x58, 0x2a, 0xd6, 0x0f, 0x78, 0x21, 0x98, 0x8e, 0x96, 0x10, - 0x97, 0xa9, 0xc8, 0x13, 0x56, 0x2d, 0x58, 0x02, 0x39, 0x28, 0x6e, 0x20, 0xa6, 0x85, 0x92, 0x46, - 0xda, 0x4e, 0xe7, 0xa4, 0xbc, 0x10, 0x74, 0x70, 0xd2, 0x6a, 0x31, 0x7f, 0x94, 0x08, 0xb3, 0x2c, - 0x43, 0x1a, 0xc9, 0x8c, 0x25, 0x32, 0x91, 0xac, 0x05, 0xc2, 0xf2, 0xa4, 0x3d, 0xb5, 0x87, 0xf6, - 0xab, 0x0b, 0x9a, 0x3f, 0x19, 0x56, 0x66, 0x3c, 0x5a, 0x8a, 0x1c, 0xd4, 0x29, 0x2b, 0x56, 0x49, - 0x23, 0x68, 0x96, 0x81, 0xe1, 0xff, 0x58, 0x3f, 0x67, 0xd7, 0x51, 0xaa, 0xcc, 0x8d, 0xc8, 0xe0, - 0x0a, 0xf0, 0xec, 0x7f, 0x40, 0xf3, 0x13, 0x19, 0xff, 0x9b, 0x3b, 0xf8, 0x36, 0xc2, 0xb3, 0xf7, - 0x4a, 0x48, 0x25, 0xcc, 0xe9, 0xab, 0x94, 0x6b, 0x6d, 0x7f, 0xc6, 0x93, 0xe6, 0x56, 0x31, 0x37, - 0xdc, 0x41, 0x2e, 0xf2, 0xa6, 0x87, 0x8f, 0xe9, 0x50, 0xc6, 0x65, 0x38, 0x2d, 0x56, 0x49, 0x23, - 0x68, 0xda, 0xb8, 0x69, 0xb5, 0xa0, 0xef, 0xc2, 0x2f, 0x10, 0x99, 0xb7, 0x60, 0xb8, 0x6f, 0xaf, - 0x2f, 0xf6, 0xad, 0xfa, 0x62, 0x1f, 0x0f, 0x5a, 0x70, 0x99, 0x6a, 0x3f, 0xc0, 0xe3, 0x8a, 0xa7, - 0x25, 0x38, 0x23, 0x17, 0x79, 0x63, 0x7f, 0xd6, 0x9b, 0xc7, 0x1f, 0x1a, 0x31, 0xe8, 0x66, 0xf6, - 0x0b, 0x3c, 0x4b, 0x52, 0x19, 0xf2, 0xf4, 0x08, 0x4e, 0x78, 0x99, 0x1a, 0x67, 0xcf, 0x45, 0xde, - 0xc4, 0xbf, 0xdf, 0x9b, 0x67, 0xaf, 0xb7, 0x87, 0xc1, 0xae, 0xd7, 0x7e, 0x8a, 0xa7, 0x31, 0xe8, - 0x48, 0x89, 0xc2, 0x08, 0x99, 0x3b, 0xb7, 0x5c, 0xe4, 0xdd, 0xf6, 0xef, 0xf5, 0xe8, 0xf4, 0x68, - 0x18, 0x05, 0xdb, 0xbe, 0x83, 0x1f, 0x08, 0xdf, 0xdd, 0x29, 0xe3, 0x58, 0x68, 0x63, 0x7f, 0xba, - 0x52, 0x08, 0xbd, 0x59, 0x21, 0x0d, 0xdd, 0xd6, 0x71, 0xa7, 0xdf, 0x3c, 0xf9, 0xa3, 0x6c, 0x95, - 0x71, 0x8c, 0xc7, 0xc2, 0x40, 0xa6, 0x9d, 0x91, 0xbb, 0xe7, 0x4d, 0x0f, 0x1f, 0xd2, 0xeb, 0x1e, - 0x1e, 0xdd, 0xb9, 0xd9, 0xd0, 0xda, 0x9b, 0x86, 0x0e, 0xba, 0x10, 0xdf, 0x5b, 0x6f, 0x88, 0x75, - 0xb6, 0x21, 0xd6, 0xf9, 0x86, 0x58, 0x5f, 0x6b, 0x82, 0xd6, 0x35, 0x41, 0x67, 0x35, 0x41, 0xe7, - 0x35, 0x41, 0x3f, 0x6b, 0x82, 0xbe, 0xff, 0x22, 0xd6, 0xc7, 0x51, 0xb5, 0xf8, 0x1d, 0x00, 0x00, - 0xff, 0xff, 0x32, 0xe8, 0x23, 0x88, 0x24, 0x03, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/scheduling/v1/generated.proto b/vendor/k8s.io/api/scheduling/v1/generated.proto index 791ba8dc7..82f6e0a21 100644 --- a/vendor/k8s.io/api/scheduling/v1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1/generated.proto @@ -21,6 +21,7 @@ syntax = 'proto2'; package k8s.io.api.scheduling.v1; +import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -32,7 +33,7 @@ option go_package = "v1"; // integer value. The value can be any valid integer. message PriorityClass { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -52,12 +53,19 @@ message PriorityClass { // when this priority class should be used. // +optional optional string description = 4; + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + optional string preemptionPolicy = 5; } // PriorityClassList is a collection of priority classes. message PriorityClassList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/scheduling/v1/types.go b/vendor/k8s.io/api/scheduling/v1/types.go index d33e0085a..087ee10d8 100644 --- a/vendor/k8s.io/api/scheduling/v1/types.go +++ b/vendor/k8s.io/api/scheduling/v1/types.go @@ -17,6 +17,7 @@ limitations under the License. package v1 import ( + apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -29,7 +30,7 @@ import ( type PriorityClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -49,6 +50,13 @@ type PriorityClass struct { // when this priority class should be used. // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + PreemptionPolicy *apiv1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,5,opt,name=preemptionPolicy"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -57,7 +65,7 @@ type PriorityClass struct { type PriorityClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go index 6f3999a91..4cfb9d3e3 100644 --- a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go @@ -28,11 +28,12 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ - "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", - "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", - "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", + "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", } func (PriorityClass) SwaggerDoc() map[string]string { @@ -41,7 +42,7 @@ func (PriorityClass) SwaggerDoc() map[string]string { var map_PriorityClassList = map[string]string{ "": "PriorityClassList is a collection of priority classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of PriorityClasses", } diff --git a/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go index 09bfc3775..63bfe6404 100644 --- a/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package v1 import ( + corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -29,6 +30,11 @@ func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.PreemptionPolicy != nil { + in, out := &in.PreemptionPolicy, &out.PreemptionPolicy + *out = new(corev1.PreemptionPolicy) + **out = **in + } return } @@ -54,7 +60,7 @@ func (in *PriorityClass) DeepCopyObject() runtime.Object { func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PriorityClass, len(*in)) diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go index 0a0d481a2..05bff0ff4 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go @@ -17,26 +17,22 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto -/* - Package v1alpha1 is a generated protocol buffer package. +package v1alpha1 - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +import ( + fmt "fmt" - It has these top-level messages: - PriorityClass - PriorityClassList -*/ -package v1alpha1 + io "io" -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import reflect "reflect" + k8s_io_api_core_v1 "k8s.io/api/core/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -49,22 +45,110 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *PriorityClass) Reset() { *m = PriorityClass{} } -func (*PriorityClass) ProtoMessage() {} -func (*PriorityClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *PriorityClass) Reset() { *m = PriorityClass{} } +func (*PriorityClass) ProtoMessage() {} +func (*PriorityClass) Descriptor() ([]byte, []int) { + return fileDescriptor_f033641dd0b95dce, []int{0} +} +func (m *PriorityClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClass.Merge(m, src) +} +func (m *PriorityClass) XXX_Size() int { + return m.Size() +} +func (m *PriorityClass) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClass.DiscardUnknown(m) +} -func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } -func (*PriorityClassList) ProtoMessage() {} -func (*PriorityClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_PriorityClass proto.InternalMessageInfo + +func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } +func (*PriorityClassList) ProtoMessage() {} +func (*PriorityClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_f033641dd0b95dce, []int{1} +} +func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClassList.Merge(m, src) +} +func (m *PriorityClassList) XXX_Size() int { + return m.Size() +} +func (m *PriorityClassList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityClassList proto.InternalMessageInfo func init() { proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1alpha1.PriorityClass") proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1alpha1.PriorityClassList") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto", fileDescriptor_f033641dd0b95dce) +} + +var fileDescriptor_f033641dd0b95dce = []byte{ + // 494 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x4f, 0x8b, 0xd3, 0x40, + 0x18, 0xc6, 0x3b, 0x5d, 0x0b, 0x75, 0x4a, 0xa1, 0x46, 0x84, 0xd0, 0xc3, 0xb4, 0x74, 0x2f, 0xbd, + 0xec, 0x8c, 0x5d, 0x54, 0x84, 0xbd, 0xd5, 0x85, 0x45, 0x50, 0x2c, 0x39, 0x78, 0x10, 0x0f, 0x4e, + 0xd3, 0x77, 0xd3, 0xb1, 0x49, 0x26, 0xcc, 0x4c, 0x02, 0xbd, 0xf9, 0x11, 0xfc, 0x52, 0x42, 0x8f, + 0x7b, 0xdc, 0x53, 0xb1, 0xf1, 0x23, 0x78, 0xf3, 0x24, 0x49, 0xd3, 0x4d, 0xdb, 0xf8, 0x67, 0x6f, + 0x99, 0xf7, 0xf9, 0x3d, 0xcf, 0xcc, 0x3c, 0x49, 0xf0, 0xd5, 0xe2, 0xa5, 0xa6, 0x42, 0xb2, 0x45, + 0x3c, 0x05, 0x15, 0x82, 0x01, 0xcd, 0x12, 0x08, 0x67, 0x52, 0xb1, 0x42, 0xe0, 0x91, 0x60, 0xda, + 0x9d, 0xc3, 0x2c, 0xf6, 0x45, 0xe8, 0xb1, 0x64, 0xc4, 0xfd, 0x68, 0xce, 0x47, 0xcc, 0x83, 0x10, + 0x14, 0x37, 0x30, 0xa3, 0x91, 0x92, 0x46, 0x5a, 0x64, 0xcb, 0x53, 0x1e, 0x09, 0x5a, 0xf2, 0x74, + 0xc7, 0x77, 0xcf, 0x3c, 0x61, 0xe6, 0xf1, 0x94, 0xba, 0x32, 0x60, 0x9e, 0xf4, 0x24, 0xcb, 0x6d, + 0xd3, 0xf8, 0x3a, 0x5f, 0xe5, 0x8b, 0xfc, 0x69, 0x1b, 0xd7, 0x1d, 0xec, 0x6d, 0xef, 0x4a, 0x05, + 0x2c, 0xa9, 0x6c, 0xd9, 0x7d, 0x56, 0x32, 0x01, 0x77, 0xe7, 0x22, 0x04, 0xb5, 0x64, 0xd1, 0xc2, + 0xcb, 0x06, 0x9a, 0x05, 0x60, 0xf8, 0x9f, 0x5c, 0xec, 0x6f, 0x2e, 0x15, 0x87, 0x46, 0x04, 0x50, + 0x31, 0xbc, 0xf8, 0x9f, 0x21, 0xbb, 0x6e, 0xc0, 0x8f, 0x7d, 0x83, 0x9f, 0x75, 0xdc, 0x9e, 0x28, + 0x21, 0x95, 0x30, 0xcb, 0x57, 0x3e, 0xd7, 0xda, 0xfa, 0x84, 0x9b, 0xd9, 0xa9, 0x66, 0xdc, 0x70, + 0x1b, 0xf5, 0xd1, 0xb0, 0x75, 0xfe, 0x94, 0x96, 0xb5, 0xdd, 0x85, 0xd3, 0x68, 0xe1, 0x65, 0x03, + 0x4d, 0x33, 0x9a, 0x26, 0x23, 0xfa, 0x6e, 0xfa, 0x19, 0x5c, 0xf3, 0x16, 0x0c, 0x1f, 0x5b, 0xab, + 0x75, 0xaf, 0x96, 0xae, 0x7b, 0xb8, 0x9c, 0x39, 0x77, 0xa9, 0xd6, 0x29, 0x6e, 0x24, 0xdc, 0x8f, + 0xc1, 0xae, 0xf7, 0xd1, 0xb0, 0x31, 0x6e, 0x17, 0x70, 0xe3, 0x7d, 0x36, 0x74, 0xb6, 0x9a, 0x75, + 0x81, 0xdb, 0x9e, 0x2f, 0xa7, 0xdc, 0xbf, 0x84, 0x6b, 0x1e, 0xfb, 0xc6, 0x3e, 0xe9, 0xa3, 0x61, + 0x73, 0xfc, 0xa4, 0x80, 0xdb, 0x57, 0xfb, 0xa2, 0x73, 0xc8, 0x5a, 0xcf, 0x71, 0x6b, 0x06, 0xda, + 0x55, 0x22, 0x32, 0x42, 0x86, 0xf6, 0x83, 0x3e, 0x1a, 0x3e, 0x1c, 0x3f, 0x2e, 0xac, 0xad, 0xcb, + 0x52, 0x72, 0xf6, 0x39, 0xcb, 0xc3, 0x9d, 0x48, 0x01, 0x04, 0xf9, 0x6a, 0x22, 0x7d, 0xe1, 0x2e, + 0xed, 0x46, 0xee, 0xbd, 0x48, 0xd7, 0xbd, 0xce, 0xe4, 0x48, 0xfb, 0xb5, 0xee, 0x9d, 0x56, 0xbf, + 0x00, 0x7a, 0x8c, 0x39, 0x95, 0xd0, 0xc1, 0x37, 0x84, 0x1f, 0x1d, 0xb4, 0xfe, 0x46, 0x68, 0x63, + 0x7d, 0xac, 0x34, 0x4f, 0xef, 0xd7, 0x7c, 0xe6, 0xce, 0x7b, 0xef, 0x14, 0x57, 0x6c, 0xee, 0x26, + 0x7b, 0xad, 0x3b, 0xb8, 0x21, 0x0c, 0x04, 0xda, 0xae, 0xf7, 0x4f, 0x86, 0xad, 0xf3, 0x33, 0xfa, + 0xef, 0x7f, 0x81, 0x1e, 0x9c, 0xaf, 0x7c, 0x49, 0xaf, 0xb3, 0x0c, 0x67, 0x1b, 0x35, 0xa6, 0xab, + 0x0d, 0xa9, 0xdd, 0x6c, 0x48, 0xed, 0x76, 0x43, 0x6a, 0x5f, 0x52, 0x82, 0x56, 0x29, 0x41, 0x37, + 0x29, 0x41, 0xb7, 0x29, 0x41, 0xdf, 0x53, 0x82, 0xbe, 0xfe, 0x20, 0xb5, 0x0f, 0xcd, 0x5d, 0xe6, + 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x5c, 0x1a, 0x39, 0xc9, 0x03, 0x00, 0x00, +} + func (m *PriorityClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -72,40 +156,55 @@ func (m *PriorityClass) Marshal() (dAtA []byte, err error) { } func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PreemptionPolicy != nil { + i -= len(*m.PreemptionPolicy) + copy(dAtA[i:], *m.PreemptionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) + i-- + dAtA[i] = 0x2a } - i += n1 - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) - dAtA[i] = 0x18 - i++ + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i-- if m.GlobalDefault { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - return i, nil + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -113,43 +212,57 @@ func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { } func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *PriorityClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -158,10 +271,17 @@ func (m *PriorityClass) Size() (n int) { n += 2 l = len(m.Description) n += 1 + l + sovGenerated(uint64(l)) + if m.PreemptionPolicy != nil { + l = len(*m.PreemptionPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *PriorityClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -176,14 +296,7 @@ func (m *PriorityClassList) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -193,10 +306,11 @@ func (this *PriorityClass) String() string { return "nil" } s := strings.Join([]string{`&PriorityClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, `}`, }, "") return s @@ -205,9 +319,14 @@ func (this *PriorityClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PriorityClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PriorityClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -235,7 +354,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -263,7 +382,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -272,6 +391,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -293,7 +415,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Value |= (int32(b) & 0x7F) << shift + m.Value |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -312,7 +434,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -332,7 +454,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -342,11 +464,47 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Description = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.PreemptionPolicy(dAtA[iNdEx:postIndex]) + m.PreemptionPolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -356,6 +514,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -383,7 +544,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -411,7 +572,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -420,6 +581,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -441,7 +605,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -450,6 +614,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -467,6 +634,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -533,10 +703,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -565,6 +738,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -583,39 +759,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 447 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x4f, 0x8b, 0xd3, 0x40, - 0x18, 0xc6, 0x33, 0x5d, 0x0b, 0x75, 0x4a, 0x41, 0x23, 0x42, 0xe8, 0x61, 0x36, 0xac, 0x97, 0x5c, - 0x76, 0xc6, 0x2e, 0x2a, 0x82, 0xb7, 0xb8, 0xb0, 0x08, 0x8a, 0x92, 0x83, 0x07, 0xf1, 0xe0, 0x24, - 0x79, 0x37, 0x1d, 0x9b, 0x64, 0xc2, 0xcc, 0x24, 0xb0, 0x37, 0xcf, 0x9e, 0xfc, 0x52, 0x42, 0x8f, - 0x7b, 0xdc, 0xd3, 0x62, 0xe3, 0x17, 0x91, 0xa4, 0x69, 0xd3, 0x5a, 0xfc, 0x73, 0xcb, 0x3c, 0xef, - 0xef, 0x79, 0xe6, 0xcd, 0xc3, 0xe0, 0x8b, 0xc5, 0x73, 0x4d, 0x85, 0x64, 0x8b, 0x32, 0x04, 0x95, - 0x83, 0x01, 0xcd, 0x2a, 0xc8, 0x63, 0xa9, 0x58, 0x37, 0xe0, 0x85, 0x60, 0x3a, 0x9a, 0x43, 0x5c, - 0xa6, 0x22, 0x4f, 0x58, 0x35, 0xe3, 0x69, 0x31, 0xe7, 0x33, 0x96, 0x40, 0x0e, 0x8a, 0x1b, 0x88, - 0x69, 0xa1, 0xa4, 0x91, 0x36, 0x59, 0xf3, 0x94, 0x17, 0x82, 0xf6, 0x3c, 0xdd, 0xf0, 0xd3, 0xd3, - 0x44, 0x98, 0x79, 0x19, 0xd2, 0x48, 0x66, 0x2c, 0x91, 0x89, 0x64, 0xad, 0x2d, 0x2c, 0x2f, 0xdb, - 0x53, 0x7b, 0x68, 0xbf, 0xd6, 0x71, 0xd3, 0x27, 0xfd, 0xf5, 0x19, 0x8f, 0xe6, 0x22, 0x07, 0x75, - 0xc5, 0x8a, 0x45, 0xd2, 0x08, 0x9a, 0x65, 0x60, 0x38, 0xab, 0x0e, 0x96, 0x98, 0xb2, 0x3f, 0xb9, - 0x54, 0x99, 0x1b, 0x91, 0xc1, 0x81, 0xe1, 0xd9, 0xbf, 0x0c, 0xcd, 0xaf, 0x64, 0xfc, 0x77, 0xdf, - 0xc9, 0xd7, 0x01, 0x9e, 0xbc, 0x53, 0x42, 0x2a, 0x61, 0xae, 0x5e, 0xa6, 0x5c, 0x6b, 0xfb, 0x13, - 0x1e, 0x35, 0x5b, 0xc5, 0xdc, 0x70, 0x07, 0xb9, 0xc8, 0x1b, 0x9f, 0x3d, 0xa6, 0x7d, 0x25, 0xdb, - 0x70, 0x5a, 0x2c, 0x92, 0x46, 0xd0, 0xb4, 0xa1, 0x69, 0x35, 0xa3, 0x6f, 0xc3, 0xcf, 0x10, 0x99, - 0x37, 0x60, 0xb8, 0x6f, 0x2f, 0x6f, 0x8f, 0xad, 0xfa, 0xf6, 0x18, 0xf7, 0x5a, 0xb0, 0x4d, 0xb5, - 0x1f, 0xe1, 0x61, 0xc5, 0xd3, 0x12, 0x9c, 0x81, 0x8b, 0xbc, 0xa1, 0x3f, 0xe9, 0xe0, 0xe1, 0xfb, - 0x46, 0x0c, 0xd6, 0x33, 0xfb, 0x05, 0x9e, 0x24, 0xa9, 0x0c, 0x79, 0x7a, 0x0e, 0x97, 0xbc, 0x4c, - 0x8d, 0x73, 0xe4, 0x22, 0x6f, 0xe4, 0x3f, 0xec, 0xe0, 0xc9, 0xc5, 0xee, 0x30, 0xd8, 0x67, 0xed, - 0xa7, 0x78, 0x1c, 0x83, 0x8e, 0x94, 0x28, 0x8c, 0x90, 0xb9, 0x73, 0xc7, 0x45, 0xde, 0x5d, 0xff, - 0x41, 0x67, 0x1d, 0x9f, 0xf7, 0xa3, 0x60, 0x97, 0x3b, 0xf9, 0x8e, 0xf0, 0xfd, 0xbd, 0x32, 0x5e, - 0x0b, 0x6d, 0xec, 0x8f, 0x07, 0x85, 0xd0, 0xff, 0x2b, 0xa4, 0x71, 0xb7, 0x75, 0xdc, 0xeb, 0x6e, - 0x1e, 0x6d, 0x94, 0x9d, 0x32, 0x02, 0x3c, 0x14, 0x06, 0x32, 0xed, 0x0c, 0xdc, 0x23, 0x6f, 0x7c, - 0x76, 0x4a, 0xff, 0xfe, 0xfc, 0xe8, 0xde, 0x7e, 0x7d, 0x77, 0xaf, 0x9a, 0x8c, 0x60, 0x1d, 0xe5, - 0xd3, 0xe5, 0x8a, 0x58, 0xd7, 0x2b, 0x62, 0xdd, 0xac, 0x88, 0xf5, 0xa5, 0x26, 0x68, 0x59, 0x13, - 0x74, 0x5d, 0x13, 0x74, 0x53, 0x13, 0xf4, 0xa3, 0x26, 0xe8, 0xdb, 0x4f, 0x62, 0x7d, 0x18, 0x6d, - 0x32, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0xab, 0x20, 0x12, 0x63, 0x3c, 0x03, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto index bfd85f559..682fb8736 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -21,6 +21,7 @@ syntax = 'proto2'; package k8s.io.api.scheduling.v1alpha1; +import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -33,7 +34,7 @@ option go_package = "v1alpha1"; // integer value. The value can be any valid integer. message PriorityClass { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -53,12 +54,19 @@ message PriorityClass { // when this priority class should be used. // +optional optional string description = 4; + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + optional string preemptionPolicy = 5; } // PriorityClassList is a collection of priority classes. message PriorityClassList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types.go b/vendor/k8s.io/api/scheduling/v1alpha1/types.go index 6103ea4e7..86a2c5130 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types.go @@ -17,6 +17,7 @@ limitations under the License. package v1alpha1 import ( + apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -30,7 +31,7 @@ import ( type PriorityClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -50,6 +51,13 @@ type PriorityClass struct { // when this priority class should be used. // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + PreemptionPolicy *apiv1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,5,opt,name=preemptionPolicy"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -58,7 +66,7 @@ type PriorityClass struct { type PriorityClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go index 89565012f..63a9a353c 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go @@ -28,11 +28,12 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ - "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", - "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", - "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", + "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", } func (PriorityClass) SwaggerDoc() map[string]string { @@ -41,7 +42,7 @@ func (PriorityClass) SwaggerDoc() map[string]string { var map_PriorityClassList = map[string]string{ "": "PriorityClassList is a collection of priority classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of PriorityClasses", } diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go index fe0c86040..039282397 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package v1alpha1 import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -29,6 +30,11 @@ func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.PreemptionPolicy != nil { + in, out := &in.PreemptionPolicy, &out.PreemptionPolicy + *out = new(v1.PreemptionPolicy) + **out = **in + } return } @@ -54,7 +60,7 @@ func (in *PriorityClass) DeepCopyObject() runtime.Object { func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PriorityClass, len(*in)) diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go index ddb285446..198fcd029 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go @@ -17,26 +17,22 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. +package v1beta1 - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +import ( + fmt "fmt" - It has these top-level messages: - PriorityClass - PriorityClassList -*/ -package v1beta1 + io "io" -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import reflect "reflect" + k8s_io_api_core_v1 "k8s.io/api/core/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -49,22 +45,110 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *PriorityClass) Reset() { *m = PriorityClass{} } -func (*PriorityClass) ProtoMessage() {} -func (*PriorityClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *PriorityClass) Reset() { *m = PriorityClass{} } +func (*PriorityClass) ProtoMessage() {} +func (*PriorityClass) Descriptor() ([]byte, []int) { + return fileDescriptor_6cd406dede2d3f42, []int{0} +} +func (m *PriorityClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClass.Merge(m, src) +} +func (m *PriorityClass) XXX_Size() int { + return m.Size() +} +func (m *PriorityClass) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClass.DiscardUnknown(m) +} -func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } -func (*PriorityClassList) ProtoMessage() {} -func (*PriorityClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_PriorityClass proto.InternalMessageInfo + +func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } +func (*PriorityClassList) ProtoMessage() {} +func (*PriorityClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_6cd406dede2d3f42, []int{1} +} +func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClassList.Merge(m, src) +} +func (m *PriorityClassList) XXX_Size() int { + return m.Size() +} +func (m *PriorityClassList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityClassList proto.InternalMessageInfo func init() { proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1beta1.PriorityClass") proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1beta1.PriorityClassList") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto", fileDescriptor_6cd406dede2d3f42) +} + +var fileDescriptor_6cd406dede2d3f42 = []byte{ + // 494 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3f, 0x8f, 0xd3, 0x30, + 0x18, 0xc6, 0xeb, 0x1e, 0x15, 0xc5, 0x55, 0xa5, 0x12, 0x84, 0x14, 0x55, 0x22, 0xad, 0x7a, 0x4b, + 0x07, 0xce, 0xa6, 0x27, 0x40, 0x48, 0xb7, 0x95, 0x13, 0x08, 0x09, 0x44, 0xc9, 0xc0, 0x80, 0x18, + 0x70, 0x92, 0xf7, 0x52, 0xd3, 0x24, 0x8e, 0x6c, 0x27, 0x52, 0x37, 0x3e, 0x02, 0x1f, 0x8a, 0xa1, + 0xe3, 0x8d, 0x37, 0x55, 0x34, 0x7c, 0x04, 0x36, 0x26, 0x94, 0x34, 0x5c, 0xda, 0x86, 0x7f, 0x5b, + 0xfc, 0x3e, 0xbf, 0xe7, 0xb1, 0xfd, 0x24, 0xc1, 0xcf, 0x16, 0x4f, 0x14, 0xe1, 0x82, 0x2e, 0x12, + 0x07, 0x64, 0x04, 0x1a, 0x14, 0x4d, 0x21, 0xf2, 0x84, 0xa4, 0xa5, 0xc0, 0x62, 0x4e, 0x95, 0x3b, + 0x07, 0x2f, 0x09, 0x78, 0xe4, 0xd3, 0x74, 0xe2, 0x80, 0x66, 0x13, 0xea, 0x43, 0x04, 0x92, 0x69, + 0xf0, 0x48, 0x2c, 0x85, 0x16, 0xc6, 0xbd, 0x2d, 0x4e, 0x58, 0xcc, 0x49, 0x85, 0x93, 0x12, 0xef, + 0x9f, 0xf8, 0x5c, 0xcf, 0x13, 0x87, 0xb8, 0x22, 0xa4, 0xbe, 0xf0, 0x05, 0x2d, 0x5c, 0x4e, 0x72, + 0x51, 0xac, 0x8a, 0x45, 0xf1, 0xb4, 0x4d, 0xeb, 0x8f, 0x76, 0x36, 0x77, 0x85, 0x04, 0x9a, 0xd6, + 0x76, 0xec, 0x3f, 0xac, 0x98, 0x90, 0xb9, 0x73, 0x1e, 0x81, 0x5c, 0xd2, 0x78, 0xe1, 0xe7, 0x03, + 0x45, 0x43, 0xd0, 0xec, 0x77, 0x2e, 0xfa, 0x27, 0x97, 0x4c, 0x22, 0xcd, 0x43, 0xa8, 0x19, 0x1e, + 0xff, 0xcb, 0x90, 0xdf, 0x36, 0x64, 0x87, 0xbe, 0xd1, 0xf7, 0x26, 0xee, 0xce, 0x24, 0x17, 0x92, + 0xeb, 0xe5, 0xd3, 0x80, 0x29, 0x65, 0x7c, 0xc0, 0xed, 0xfc, 0x54, 0x1e, 0xd3, 0xcc, 0x44, 0x43, + 0x34, 0xee, 0x9c, 0x3e, 0x20, 0x55, 0x6b, 0xd7, 0xe1, 0x24, 0x5e, 0xf8, 0xf9, 0x40, 0x91, 0x9c, + 0x26, 0xe9, 0x84, 0xbc, 0x76, 0x3e, 0x82, 0xab, 0x5f, 0x81, 0x66, 0x53, 0x63, 0xb5, 0x1e, 0x34, + 0xb2, 0xf5, 0x00, 0x57, 0x33, 0xfb, 0x3a, 0xd5, 0x38, 0xc6, 0xad, 0x94, 0x05, 0x09, 0x98, 0xcd, + 0x21, 0x1a, 0xb7, 0xa6, 0xdd, 0x12, 0x6e, 0xbd, 0xcd, 0x87, 0xf6, 0x56, 0x33, 0xce, 0x70, 0xd7, + 0x0f, 0x84, 0xc3, 0x82, 0x73, 0xb8, 0x60, 0x49, 0xa0, 0xcd, 0xa3, 0x21, 0x1a, 0xb7, 0xa7, 0x77, + 0x4b, 0xb8, 0xfb, 0x7c, 0x57, 0xb4, 0xf7, 0x59, 0xe3, 0x11, 0xee, 0x78, 0xa0, 0x5c, 0xc9, 0x63, + 0xcd, 0x45, 0x64, 0xde, 0x18, 0xa2, 0xf1, 0xad, 0xe9, 0x9d, 0xd2, 0xda, 0x39, 0xaf, 0x24, 0x7b, + 0x97, 0x33, 0x7c, 0xdc, 0x8b, 0x25, 0x40, 0x58, 0xac, 0x66, 0x22, 0xe0, 0xee, 0xd2, 0x6c, 0x15, + 0xde, 0xb3, 0x6c, 0x3d, 0xe8, 0xcd, 0x0e, 0xb4, 0x1f, 0xeb, 0xc1, 0x71, 0xfd, 0x0b, 0x20, 0x87, + 0x98, 0x5d, 0x0b, 0x1d, 0x7d, 0x41, 0xf8, 0xf6, 0x5e, 0xeb, 0x2f, 0xb9, 0xd2, 0xc6, 0xfb, 0x5a, + 0xf3, 0xe4, 0xff, 0x9a, 0xcf, 0xdd, 0x45, 0xef, 0xbd, 0xf2, 0x8a, 0xed, 0x5f, 0x93, 0x9d, 0xd6, + 0xdf, 0xe0, 0x16, 0xd7, 0x10, 0x2a, 0xb3, 0x39, 0x3c, 0x1a, 0x77, 0x4e, 0xef, 0x93, 0xbf, 0xfe, + 0x0a, 0x64, 0xef, 0x78, 0xd5, 0x3b, 0x7a, 0x91, 0x47, 0xd8, 0xdb, 0xa4, 0xe9, 0xc9, 0x6a, 0x63, + 0x35, 0x2e, 0x37, 0x56, 0xe3, 0x6a, 0x63, 0x35, 0x3e, 0x65, 0x16, 0x5a, 0x65, 0x16, 0xba, 0xcc, + 0x2c, 0x74, 0x95, 0x59, 0xe8, 0x6b, 0x66, 0xa1, 0xcf, 0xdf, 0xac, 0xc6, 0xbb, 0x9b, 0x65, 0xe4, + 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1a, 0xc2, 0xc0, 0x1f, 0xc5, 0x03, 0x00, 0x00, +} + func (m *PriorityClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -72,40 +156,55 @@ func (m *PriorityClass) Marshal() (dAtA []byte, err error) { } func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PreemptionPolicy != nil { + i -= len(*m.PreemptionPolicy) + copy(dAtA[i:], *m.PreemptionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) + i-- + dAtA[i] = 0x2a } - i += n1 - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) - dAtA[i] = 0x18 - i++ + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i-- if m.GlobalDefault { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - return i, nil + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -113,43 +212,57 @@ func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { } func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *PriorityClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -158,10 +271,17 @@ func (m *PriorityClass) Size() (n int) { n += 2 l = len(m.Description) n += 1 + l + sovGenerated(uint64(l)) + if m.PreemptionPolicy != nil { + l = len(*m.PreemptionPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *PriorityClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -176,14 +296,7 @@ func (m *PriorityClassList) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -193,10 +306,11 @@ func (this *PriorityClass) String() string { return "nil" } s := strings.Join([]string{`&PriorityClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, `}`, }, "") return s @@ -205,9 +319,14 @@ func (this *PriorityClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PriorityClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PriorityClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -235,7 +354,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -263,7 +382,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -272,6 +391,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -293,7 +415,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Value |= (int32(b) & 0x7F) << shift + m.Value |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -312,7 +434,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -332,7 +454,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -342,11 +464,47 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Description = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.PreemptionPolicy(dAtA[iNdEx:postIndex]) + m.PreemptionPolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -356,6 +514,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -383,7 +544,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -411,7 +572,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -420,6 +581,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -441,7 +605,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -450,6 +614,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -467,6 +634,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -533,10 +703,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -565,6 +738,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -583,39 +759,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 448 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8b, 0xd3, 0x40, - 0x18, 0xc5, 0x33, 0x5d, 0x8b, 0x75, 0x4a, 0x41, 0x23, 0x42, 0x28, 0x38, 0x1b, 0xd6, 0x4b, 0x0e, - 0xee, 0x8c, 0x5d, 0x54, 0x04, 0x6f, 0x71, 0x51, 0x04, 0x45, 0xcd, 0xc1, 0x83, 0x78, 0x70, 0x92, - 0x7c, 0x9b, 0x8e, 0x4d, 0x32, 0x61, 0x66, 0x12, 0xd8, 0x9b, 0x67, 0x4f, 0xfe, 0x51, 0x1e, 0x7a, - 0xdc, 0xe3, 0x9e, 0x16, 0x1b, 0xff, 0x11, 0x49, 0x1a, 0x37, 0xad, 0x45, 0xdd, 0x5b, 0xe6, 0x7d, - 0xbf, 0xf7, 0xe6, 0xcb, 0x63, 0xf0, 0xf3, 0xc5, 0x13, 0x4d, 0x85, 0x64, 0x8b, 0x32, 0x04, 0x95, - 0x83, 0x01, 0xcd, 0x2a, 0xc8, 0x63, 0xa9, 0x58, 0x37, 0xe0, 0x85, 0x60, 0x3a, 0x9a, 0x43, 0x5c, - 0xa6, 0x22, 0x4f, 0x58, 0x35, 0x0b, 0xc1, 0xf0, 0x19, 0x4b, 0x20, 0x07, 0xc5, 0x0d, 0xc4, 0xb4, - 0x50, 0xd2, 0x48, 0xfb, 0xee, 0x1a, 0xa7, 0xbc, 0x10, 0xb4, 0xc7, 0x69, 0x87, 0x4f, 0x0f, 0x13, - 0x61, 0xe6, 0x65, 0x48, 0x23, 0x99, 0xb1, 0x44, 0x26, 0x92, 0xb5, 0xae, 0xb0, 0x3c, 0x69, 0x4f, - 0xed, 0xa1, 0xfd, 0x5a, 0xa7, 0x4d, 0x1f, 0xf6, 0x97, 0x67, 0x3c, 0x9a, 0x8b, 0x1c, 0xd4, 0x29, - 0x2b, 0x16, 0x49, 0x23, 0x68, 0x96, 0x81, 0xe1, 0xac, 0xda, 0xd9, 0x61, 0xca, 0xfe, 0xe6, 0x52, - 0x65, 0x6e, 0x44, 0x06, 0x3b, 0x86, 0xc7, 0xff, 0x33, 0x34, 0x7f, 0x92, 0xf1, 0x3f, 0x7d, 0x07, - 0x5f, 0x07, 0x78, 0xf2, 0x56, 0x09, 0xa9, 0x84, 0x39, 0x7d, 0x96, 0x72, 0xad, 0xed, 0x4f, 0x78, - 0xd4, 0x6c, 0x15, 0x73, 0xc3, 0x1d, 0xe4, 0x22, 0x6f, 0x7c, 0xf4, 0x80, 0xf6, 0x8d, 0x5c, 0x86, - 0xd3, 0x62, 0x91, 0x34, 0x82, 0xa6, 0x0d, 0x4d, 0xab, 0x19, 0x7d, 0x13, 0x7e, 0x86, 0xc8, 0xbc, - 0x06, 0xc3, 0x7d, 0x7b, 0x79, 0xb1, 0x6f, 0xd5, 0x17, 0xfb, 0xb8, 0xd7, 0x82, 0xcb, 0x54, 0xfb, - 0x1e, 0x1e, 0x56, 0x3c, 0x2d, 0xc1, 0x19, 0xb8, 0xc8, 0x1b, 0xfa, 0x93, 0x0e, 0x1e, 0xbe, 0x6f, - 0xc4, 0x60, 0x3d, 0xb3, 0x9f, 0xe2, 0x49, 0x92, 0xca, 0x90, 0xa7, 0xc7, 0x70, 0xc2, 0xcb, 0xd4, - 0x38, 0x7b, 0x2e, 0xf2, 0x46, 0xfe, 0x9d, 0x0e, 0x9e, 0xbc, 0xd8, 0x1c, 0x06, 0xdb, 0xac, 0xfd, - 0x08, 0x8f, 0x63, 0xd0, 0x91, 0x12, 0x85, 0x11, 0x32, 0x77, 0xae, 0xb9, 0xc8, 0xbb, 0xe1, 0xdf, - 0xee, 0xac, 0xe3, 0xe3, 0x7e, 0x14, 0x6c, 0x72, 0x07, 0xdf, 0x11, 0xbe, 0xb5, 0x55, 0xc6, 0x2b, - 0xa1, 0x8d, 0xfd, 0x71, 0xa7, 0x10, 0x7a, 0xb5, 0x42, 0x1a, 0x77, 0x5b, 0xc7, 0xcd, 0xee, 0xe6, - 0xd1, 0x6f, 0x65, 0xa3, 0x8c, 0x77, 0x78, 0x28, 0x0c, 0x64, 0xda, 0x19, 0xb8, 0x7b, 0xde, 0xf8, - 0xe8, 0x3e, 0xfd, 0xe7, 0xeb, 0xa3, 0x5b, 0xeb, 0xf5, 0xd5, 0xbd, 0x6c, 0x22, 0x82, 0x75, 0x92, - 0x7f, 0xb8, 0x5c, 0x11, 0xeb, 0x6c, 0x45, 0xac, 0xf3, 0x15, 0xb1, 0xbe, 0xd4, 0x04, 0x2d, 0x6b, - 0x82, 0xce, 0x6a, 0x82, 0xce, 0x6b, 0x82, 0x7e, 0xd4, 0x04, 0x7d, 0xfb, 0x49, 0xac, 0x0f, 0xd7, - 0xbb, 0xc8, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x41, 0x74, 0x8a, 0x60, 0x38, 0x03, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto index 3f15dc1d5..2582891bb 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto @@ -21,6 +21,7 @@ syntax = 'proto2'; package k8s.io.api.scheduling.v1beta1; +import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -53,6 +54,13 @@ message PriorityClass { // when this priority class should be used. // +optional optional string description = 4; + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + optional string preemptionPolicy = 5; } // PriorityClassList is a collection of priority classes. diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types.go b/vendor/k8s.io/api/scheduling/v1beta1/types.go index 2f6b3c968..f806ecd4c 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types.go @@ -17,6 +17,7 @@ limitations under the License. package v1beta1 import ( + apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -50,6 +51,13 @@ type PriorityClass struct { // when this priority class should be used. // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + PreemptionPolicy *apiv1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,5,opt,name=preemptionPolicy"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go index e99ed8fc4..ffded9df0 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go @@ -28,11 +28,12 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ - "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", - "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", - "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", + "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", } func (PriorityClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go index 6f68e4ac5..6e2008578 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -29,6 +30,11 @@ func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.PreemptionPolicy != nil { + in, out := &in.PreemptionPolicy, &out.PreemptionPolicy + *out = new(v1.PreemptionPolicy) + **out = **in + } return } @@ -54,7 +60,7 @@ func (in *PriorityClass) DeepCopyObject() runtime.Object { func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PriorityClass, len(*in)) diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go index c84213105..7469f74a4 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go @@ -17,29 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto - - It has these top-level messages: - PodPreset - PodPresetList - PodPresetSpec -*/ package v1alpha1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -52,27 +44,142 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *PodPreset) Reset() { *m = PodPreset{} } -func (*PodPreset) ProtoMessage() {} -func (*PodPreset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *PodPreset) Reset() { *m = PodPreset{} } +func (*PodPreset) ProtoMessage() {} +func (*PodPreset) Descriptor() ([]byte, []int) { + return fileDescriptor_48fab0a6ea4b79ce, []int{0} +} +func (m *PodPreset) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodPreset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodPreset) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodPreset.Merge(m, src) +} +func (m *PodPreset) XXX_Size() int { + return m.Size() +} +func (m *PodPreset) XXX_DiscardUnknown() { + xxx_messageInfo_PodPreset.DiscardUnknown(m) +} + +var xxx_messageInfo_PodPreset proto.InternalMessageInfo + +func (m *PodPresetList) Reset() { *m = PodPresetList{} } +func (*PodPresetList) ProtoMessage() {} +func (*PodPresetList) Descriptor() ([]byte, []int) { + return fileDescriptor_48fab0a6ea4b79ce, []int{1} +} +func (m *PodPresetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodPresetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodPresetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodPresetList.Merge(m, src) +} +func (m *PodPresetList) XXX_Size() int { + return m.Size() +} +func (m *PodPresetList) XXX_DiscardUnknown() { + xxx_messageInfo_PodPresetList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodPresetList proto.InternalMessageInfo -func (m *PodPresetList) Reset() { *m = PodPresetList{} } -func (*PodPresetList) ProtoMessage() {} -func (*PodPresetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *PodPresetSpec) Reset() { *m = PodPresetSpec{} } +func (*PodPresetSpec) ProtoMessage() {} +func (*PodPresetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_48fab0a6ea4b79ce, []int{2} +} +func (m *PodPresetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodPresetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodPresetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodPresetSpec.Merge(m, src) +} +func (m *PodPresetSpec) XXX_Size() int { + return m.Size() +} +func (m *PodPresetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodPresetSpec.DiscardUnknown(m) +} -func (m *PodPresetSpec) Reset() { *m = PodPresetSpec{} } -func (*PodPresetSpec) ProtoMessage() {} -func (*PodPresetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_PodPresetSpec proto.InternalMessageInfo func init() { proto.RegisterType((*PodPreset)(nil), "k8s.io.api.settings.v1alpha1.PodPreset") proto.RegisterType((*PodPresetList)(nil), "k8s.io.api.settings.v1alpha1.PodPresetList") proto.RegisterType((*PodPresetSpec)(nil), "k8s.io.api.settings.v1alpha1.PodPresetSpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto", fileDescriptor_48fab0a6ea4b79ce) +} + +var fileDescriptor_48fab0a6ea4b79ce = []byte{ + // 542 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8e, 0xd2, 0x40, + 0x1c, 0xc6, 0xe9, 0xb2, 0x04, 0x1c, 0xd8, 0x68, 0x1a, 0x0f, 0x0d, 0x31, 0x65, 0xe5, 0xe2, 0x26, + 0xc6, 0x19, 0x59, 0x8d, 0xd1, 0x6b, 0x13, 0x4c, 0x4c, 0x20, 0x6e, 0x4a, 0xb2, 0x89, 0xc6, 0x83, + 0x43, 0xf9, 0x5b, 0x2a, 0xb4, 0xd3, 0xcc, 0x4c, 0x9b, 0x78, 0xf3, 0x11, 0x7c, 0x01, 0x9f, 0x44, + 0x1f, 0x80, 0xe3, 0x1e, 0xf7, 0xb4, 0x91, 0xfa, 0x22, 0x66, 0x86, 0x29, 0xa0, 0x88, 0x72, 0x9b, + 0xff, 0x9f, 0xef, 0xfb, 0xcd, 0xf7, 0x31, 0x45, 0xfd, 0xd9, 0x73, 0x81, 0x23, 0x46, 0x66, 0xd9, + 0x18, 0x78, 0x02, 0x12, 0x04, 0xc9, 0x21, 0x99, 0x30, 0x4e, 0xcc, 0x0f, 0x34, 0x8d, 0x88, 0x00, + 0x29, 0xa3, 0x24, 0x14, 0x24, 0xef, 0xd1, 0x79, 0x3a, 0xa5, 0x3d, 0x12, 0x42, 0x02, 0x9c, 0x4a, + 0x98, 0xe0, 0x94, 0x33, 0xc9, 0xec, 0x7b, 0x2b, 0x35, 0xa6, 0x69, 0x84, 0x4b, 0x35, 0x2e, 0xd5, + 0xed, 0x47, 0x61, 0x24, 0xa7, 0xd9, 0x18, 0x07, 0x2c, 0x26, 0x21, 0x0b, 0x19, 0xd1, 0xa6, 0x71, + 0xf6, 0x41, 0x4f, 0x7a, 0xd0, 0xa7, 0x15, 0xac, 0xdd, 0xdd, 0xba, 0x3a, 0x60, 0x1c, 0x48, 0xbe, + 0x73, 0x61, 0xfb, 0xe9, 0x46, 0x13, 0xd3, 0x60, 0x1a, 0x25, 0xc0, 0x3f, 0x91, 0x74, 0x16, 0xaa, + 0x85, 0x20, 0x31, 0x48, 0xfa, 0x37, 0x17, 0xd9, 0xe7, 0xe2, 0x59, 0x22, 0xa3, 0x18, 0x76, 0x0c, + 0xcf, 0xfe, 0x67, 0x10, 0xc1, 0x14, 0x62, 0xfa, 0xa7, 0xaf, 0xfb, 0xdd, 0x42, 0xb7, 0x2e, 0xd8, + 0xe4, 0x82, 0x83, 0x00, 0x69, 0xbf, 0x47, 0x0d, 0x95, 0x68, 0x42, 0x25, 0x75, 0xac, 0x53, 0xeb, + 0xac, 0x79, 0xfe, 0x18, 0x6f, 0xfe, 0xb0, 0x35, 0x18, 0xa7, 0xb3, 0x50, 0x2d, 0x04, 0x56, 0x6a, + 0x9c, 0xf7, 0xf0, 0xeb, 0xf1, 0x47, 0x08, 0xe4, 0x10, 0x24, 0xf5, 0xec, 0xc5, 0x4d, 0xa7, 0x52, + 0xdc, 0x74, 0xd0, 0x66, 0xe7, 0xaf, 0xa9, 0xf6, 0x10, 0x1d, 0x8b, 0x14, 0x02, 0xe7, 0x48, 0xd3, + 0x1f, 0xe2, 0x7f, 0x3d, 0x07, 0x5e, 0x07, 0x1b, 0xa5, 0x10, 0x78, 0x2d, 0x03, 0x3e, 0x56, 0x93, + 0xaf, 0x31, 0xdd, 0x6f, 0x16, 0x3a, 0x59, 0xab, 0x06, 0x91, 0x90, 0xf6, 0xbb, 0x9d, 0x0a, 0xf8, + 0xb0, 0x0a, 0xca, 0xad, 0x0b, 0xdc, 0x31, 0xf7, 0x34, 0xca, 0xcd, 0x56, 0xfc, 0x01, 0xaa, 0x45, + 0x12, 0x62, 0xe1, 0x1c, 0x9d, 0x56, 0xcf, 0x9a, 0xe7, 0x0f, 0x0e, 0xcc, 0xef, 0x9d, 0x18, 0x66, + 0xed, 0x95, 0x72, 0xfb, 0x2b, 0x48, 0xf7, 0x6b, 0x75, 0x2b, 0xbd, 0x6a, 0x65, 0x53, 0xd4, 0x10, + 0x30, 0x87, 0x40, 0x32, 0x6e, 0xd2, 0x3f, 0x39, 0x30, 0x3d, 0x1d, 0xc3, 0x7c, 0x64, 0xac, 0x9b, + 0x0a, 0xe5, 0xc6, 0x5f, 0x63, 0xed, 0x17, 0xa8, 0x0a, 0x49, 0x6e, 0x0a, 0xb4, 0xb7, 0x0b, 0xa8, + 0x4f, 0x58, 0xb1, 0xfa, 0x49, 0x7e, 0x49, 0xb9, 0xd7, 0x34, 0x90, 0x6a, 0x3f, 0xc9, 0x7d, 0xe5, + 0xb1, 0x07, 0xa8, 0x0e, 0x49, 0xfe, 0x92, 0xb3, 0xd8, 0xa9, 0x6a, 0xfb, 0xfd, 0x3d, 0x76, 0x25, + 0x19, 0xb1, 0x8c, 0x07, 0xe0, 0xdd, 0x36, 0x94, 0xba, 0x59, 0xfb, 0x25, 0xc2, 0xee, 0xa3, 0x7a, + 0xce, 0xe6, 0x59, 0x0c, 0xc2, 0x39, 0xde, 0x1f, 0xe6, 0x52, 0x4b, 0x36, 0x98, 0xd5, 0x2c, 0xfc, + 0xd2, 0x6b, 0xbf, 0x41, 0xad, 0xd5, 0x71, 0xc8, 0xb2, 0x44, 0x0a, 0xa7, 0xa6, 0x59, 0x9d, 0xfd, + 0x2c, 0xad, 0xf3, 0xee, 0x1a, 0x60, 0x6b, 0x6b, 0x29, 0xfc, 0xdf, 0x50, 0x1e, 0x5e, 0x2c, 0xdd, + 0xca, 0xd5, 0xd2, 0xad, 0x5c, 0x2f, 0xdd, 0xca, 0xe7, 0xc2, 0xb5, 0x16, 0x85, 0x6b, 0x5d, 0x15, + 0xae, 0x75, 0x5d, 0xb8, 0xd6, 0x8f, 0xc2, 0xb5, 0xbe, 0xfc, 0x74, 0x2b, 0x6f, 0x1b, 0xe5, 0x7b, + 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x46, 0x15, 0xf2, 0x97, 0xa4, 0x04, 0x00, 0x00, +} + func (m *PodPreset) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -80,33 +187,42 @@ func (m *PodPreset) Marshal() (dAtA []byte, err error) { } func (m *PodPreset) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodPreset) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodPresetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -114,37 +230,46 @@ func (m *PodPresetList) Marshal() (dAtA []byte, err error) { } func (m *PodPresetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodPresetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodPresetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -152,79 +277,99 @@ func (m *PodPresetSpec) Marshal() (dAtA []byte, err error) { } func (m *PodPresetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodPresetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n4, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } } - i += n4 - if len(m.Env) > 0 { - for _, msg := range m.Env { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x22 } } if len(m.EnvFrom) > 0 { - for _, msg := range m.EnvFrom { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1a } } - if len(m.Volumes) > 0 { - for _, msg := range m.Volumes { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.VolumeMounts) > 0 { - for _, msg := range m.VolumeMounts { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *PodPreset) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -235,6 +380,9 @@ func (m *PodPreset) Size() (n int) { } func (m *PodPresetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -249,6 +397,9 @@ func (m *PodPresetList) Size() (n int) { } func (m *PodPresetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Selector.Size() @@ -281,14 +432,7 @@ func (m *PodPresetSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -298,7 +442,7 @@ func (this *PodPreset) String() string { return "nil" } s := strings.Join([]string{`&PodPreset{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodPresetSpec", "PodPresetSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -308,9 +452,14 @@ func (this *PodPresetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PodPreset{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodPreset", "PodPreset", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PodPresetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodPreset", "PodPreset", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -319,12 +468,32 @@ func (this *PodPresetSpec) String() string { if this == nil { return "nil" } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForEnvFrom := "[]EnvFromSource{" + for _, f := range this.EnvFrom { + repeatedStringForEnvFrom += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnvFrom += "}" + repeatedStringForVolumes := "[]Volume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumes += "}" + repeatedStringForVolumeMounts := "[]VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeMounts += "}" s := strings.Join([]string{`&PodPresetSpec{`, - `Selector:` + strings.Replace(strings.Replace(this.Selector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "k8s_io_api_core_v1.EnvVar", 1), `&`, ``, 1) + `,`, - `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "k8s_io_api_core_v1.EnvFromSource", 1), `&`, ``, 1) + `,`, - `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "k8s_io_api_core_v1.Volume", 1), `&`, ``, 1) + `,`, - `VolumeMounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeMounts), "VolumeMount", "k8s_io_api_core_v1.VolumeMount", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `EnvFrom:` + repeatedStringForEnvFrom + `,`, + `Volumes:` + repeatedStringForVolumes + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, `}`, }, "") return s @@ -352,7 +521,7 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -380,7 +549,7 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -389,6 +558,9 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -410,7 +582,7 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -419,6 +591,9 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -435,6 +610,9 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -462,7 +640,7 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -490,7 +668,7 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -499,6 +677,9 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -520,7 +701,7 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -529,6 +710,9 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -546,6 +730,9 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -573,7 +760,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -601,7 +788,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -610,6 +797,9 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -631,7 +821,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -640,10 +830,13 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, k8s_io_api_core_v1.EnvVar{}) + m.Env = append(m.Env, v11.EnvVar{}) if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -662,7 +855,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -671,10 +864,13 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.EnvFrom = append(m.EnvFrom, k8s_io_api_core_v1.EnvFromSource{}) + m.EnvFrom = append(m.EnvFrom, v11.EnvFromSource{}) if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -693,7 +889,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -702,10 +898,13 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Volumes = append(m.Volumes, k8s_io_api_core_v1.Volume{}) + m.Volumes = append(m.Volumes, v11.Volume{}) if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -724,7 +923,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -733,10 +932,13 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeMounts = append(m.VolumeMounts, k8s_io_api_core_v1.VolumeMount{}) + m.VolumeMounts = append(m.VolumeMounts, v11.VolumeMount{}) if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -750,6 +952,9 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -816,10 +1021,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -848,6 +1056,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -866,45 +1077,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 542 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8e, 0xd2, 0x40, - 0x1c, 0xc6, 0xe9, 0xb2, 0x04, 0x1c, 0xd8, 0x68, 0x1a, 0x0f, 0x0d, 0x31, 0x65, 0xe5, 0xe2, 0x26, - 0xc6, 0x19, 0x59, 0x8d, 0xd1, 0x6b, 0x13, 0x4c, 0x4c, 0x20, 0x6e, 0x4a, 0xb2, 0x89, 0xc6, 0x83, - 0x43, 0xf9, 0x5b, 0x2a, 0xb4, 0xd3, 0xcc, 0x4c, 0x9b, 0x78, 0xf3, 0x11, 0x7c, 0x01, 0x9f, 0x44, - 0x1f, 0x80, 0xe3, 0x1e, 0xf7, 0xb4, 0x91, 0xfa, 0x22, 0x66, 0x86, 0x29, 0xa0, 0x88, 0x72, 0x9b, - 0xff, 0x9f, 0xef, 0xfb, 0xcd, 0xf7, 0x31, 0x45, 0xfd, 0xd9, 0x73, 0x81, 0x23, 0x46, 0x66, 0xd9, - 0x18, 0x78, 0x02, 0x12, 0x04, 0xc9, 0x21, 0x99, 0x30, 0x4e, 0xcc, 0x0f, 0x34, 0x8d, 0x88, 0x00, - 0x29, 0xa3, 0x24, 0x14, 0x24, 0xef, 0xd1, 0x79, 0x3a, 0xa5, 0x3d, 0x12, 0x42, 0x02, 0x9c, 0x4a, - 0x98, 0xe0, 0x94, 0x33, 0xc9, 0xec, 0x7b, 0x2b, 0x35, 0xa6, 0x69, 0x84, 0x4b, 0x35, 0x2e, 0xd5, - 0xed, 0x47, 0x61, 0x24, 0xa7, 0xd9, 0x18, 0x07, 0x2c, 0x26, 0x21, 0x0b, 0x19, 0xd1, 0xa6, 0x71, - 0xf6, 0x41, 0x4f, 0x7a, 0xd0, 0xa7, 0x15, 0xac, 0xdd, 0xdd, 0xba, 0x3a, 0x60, 0x1c, 0x48, 0xbe, - 0x73, 0x61, 0xfb, 0xe9, 0x46, 0x13, 0xd3, 0x60, 0x1a, 0x25, 0xc0, 0x3f, 0x91, 0x74, 0x16, 0xaa, - 0x85, 0x20, 0x31, 0x48, 0xfa, 0x37, 0x17, 0xd9, 0xe7, 0xe2, 0x59, 0x22, 0xa3, 0x18, 0x76, 0x0c, - 0xcf, 0xfe, 0x67, 0x10, 0xc1, 0x14, 0x62, 0xfa, 0xa7, 0xaf, 0xfb, 0xdd, 0x42, 0xb7, 0x2e, 0xd8, - 0xe4, 0x82, 0x83, 0x00, 0x69, 0xbf, 0x47, 0x0d, 0x95, 0x68, 0x42, 0x25, 0x75, 0xac, 0x53, 0xeb, - 0xac, 0x79, 0xfe, 0x18, 0x6f, 0xfe, 0xb0, 0x35, 0x18, 0xa7, 0xb3, 0x50, 0x2d, 0x04, 0x56, 0x6a, - 0x9c, 0xf7, 0xf0, 0xeb, 0xf1, 0x47, 0x08, 0xe4, 0x10, 0x24, 0xf5, 0xec, 0xc5, 0x4d, 0xa7, 0x52, - 0xdc, 0x74, 0xd0, 0x66, 0xe7, 0xaf, 0xa9, 0xf6, 0x10, 0x1d, 0x8b, 0x14, 0x02, 0xe7, 0x48, 0xd3, - 0x1f, 0xe2, 0x7f, 0x3d, 0x07, 0x5e, 0x07, 0x1b, 0xa5, 0x10, 0x78, 0x2d, 0x03, 0x3e, 0x56, 0x93, - 0xaf, 0x31, 0xdd, 0x6f, 0x16, 0x3a, 0x59, 0xab, 0x06, 0x91, 0x90, 0xf6, 0xbb, 0x9d, 0x0a, 0xf8, - 0xb0, 0x0a, 0xca, 0xad, 0x0b, 0xdc, 0x31, 0xf7, 0x34, 0xca, 0xcd, 0x56, 0xfc, 0x01, 0xaa, 0x45, - 0x12, 0x62, 0xe1, 0x1c, 0x9d, 0x56, 0xcf, 0x9a, 0xe7, 0x0f, 0x0e, 0xcc, 0xef, 0x9d, 0x18, 0x66, - 0xed, 0x95, 0x72, 0xfb, 0x2b, 0x48, 0xf7, 0x6b, 0x75, 0x2b, 0xbd, 0x6a, 0x65, 0x53, 0xd4, 0x10, - 0x30, 0x87, 0x40, 0x32, 0x6e, 0xd2, 0x3f, 0x39, 0x30, 0x3d, 0x1d, 0xc3, 0x7c, 0x64, 0xac, 0x9b, - 0x0a, 0xe5, 0xc6, 0x5f, 0x63, 0xed, 0x17, 0xa8, 0x0a, 0x49, 0x6e, 0x0a, 0xb4, 0xb7, 0x0b, 0xa8, - 0x4f, 0x58, 0xb1, 0xfa, 0x49, 0x7e, 0x49, 0xb9, 0xd7, 0x34, 0x90, 0x6a, 0x3f, 0xc9, 0x7d, 0xe5, - 0xb1, 0x07, 0xa8, 0x0e, 0x49, 0xfe, 0x92, 0xb3, 0xd8, 0xa9, 0x6a, 0xfb, 0xfd, 0x3d, 0x76, 0x25, - 0x19, 0xb1, 0x8c, 0x07, 0xe0, 0xdd, 0x36, 0x94, 0xba, 0x59, 0xfb, 0x25, 0xc2, 0xee, 0xa3, 0x7a, - 0xce, 0xe6, 0x59, 0x0c, 0xc2, 0x39, 0xde, 0x1f, 0xe6, 0x52, 0x4b, 0x36, 0x98, 0xd5, 0x2c, 0xfc, - 0xd2, 0x6b, 0xbf, 0x41, 0xad, 0xd5, 0x71, 0xc8, 0xb2, 0x44, 0x0a, 0xa7, 0xa6, 0x59, 0x9d, 0xfd, - 0x2c, 0xad, 0xf3, 0xee, 0x1a, 0x60, 0x6b, 0x6b, 0x29, 0xfc, 0xdf, 0x50, 0x1e, 0x5e, 0x2c, 0xdd, - 0xca, 0xd5, 0xd2, 0xad, 0x5c, 0x2f, 0xdd, 0xca, 0xe7, 0xc2, 0xb5, 0x16, 0x85, 0x6b, 0x5d, 0x15, - 0xae, 0x75, 0x5d, 0xb8, 0xd6, 0x8f, 0xc2, 0xb5, 0xbe, 0xfc, 0x74, 0x2b, 0x6f, 0x1b, 0xe5, 0x7b, - 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x46, 0x15, 0xf2, 0x97, 0xa4, 0x04, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go index 6397a88ab..ed6c31a32 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go @@ -56,7 +56,7 @@ func (in *PodPreset) DeepCopyObject() runtime.Object { func (in *PodPresetList) DeepCopyInto(out *PodPresetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodPreset, len(*in)) diff --git a/vendor/k8s.io/api/storage/v1/generated.pb.go b/vendor/k8s.io/api/storage/v1/generated.pb.go index e4b29311b..782f50693 100644 --- a/vendor/k8s.io/api/storage/v1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1/generated.pb.go @@ -17,36 +17,23 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto - - It has these top-level messages: - StorageClass - StorageClassList - VolumeAttachment - VolumeAttachmentList - VolumeAttachmentSource - VolumeAttachmentSpec - VolumeAttachmentStatus - VolumeError -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + io "io" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" -import strings "strings" -import reflect "reflect" - -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -59,52 +46,319 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *StorageClass) Reset() { *m = StorageClass{} } -func (*StorageClass) ProtoMessage() {} -func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *StorageClass) Reset() { *m = StorageClass{} } +func (*StorageClass) ProtoMessage() {} +func (*StorageClass) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{0} +} +func (m *StorageClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageClass.Merge(m, src) +} +func (m *StorageClass) XXX_Size() int { + return m.Size() +} +func (m *StorageClass) XXX_DiscardUnknown() { + xxx_messageInfo_StorageClass.DiscardUnknown(m) +} -func (m *StorageClassList) Reset() { *m = StorageClassList{} } -func (*StorageClassList) ProtoMessage() {} -func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_StorageClass proto.InternalMessageInfo -func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } -func (*VolumeAttachment) ProtoMessage() {} -func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *StorageClassList) Reset() { *m = StorageClassList{} } +func (*StorageClassList) ProtoMessage() {} +func (*StorageClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{1} +} +func (m *StorageClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageClassList.Merge(m, src) +} +func (m *StorageClassList) XXX_Size() int { + return m.Size() +} +func (m *StorageClassList) XXX_DiscardUnknown() { + xxx_messageInfo_StorageClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageClassList proto.InternalMessageInfo + +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{2} +} +func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachment) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachment.Merge(m, src) +} +func (m *VolumeAttachment) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachment) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachment.DiscardUnknown(m) +} -func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } -func (*VolumeAttachmentList) ProtoMessage() {} -func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo -func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } -func (*VolumeAttachmentSource) ProtoMessage() {} -func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{3} +} +func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentList.Merge(m, src) +} +func (m *VolumeAttachmentList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentList.DiscardUnknown(m) +} -func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } -func (*VolumeAttachmentSpec) ProtoMessage() {} -func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo -func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } -func (*VolumeAttachmentStatus) ProtoMessage() {} -func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{4} +} +func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSource.Merge(m, src) +} +func (m *VolumeAttachmentSource) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSource.DiscardUnknown(m) +} -func (m *VolumeError) Reset() { *m = VolumeError{} } -func (*VolumeError) ProtoMessage() {} -func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo + +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{5} +} +func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSpec.Merge(m, src) +} +func (m *VolumeAttachmentSpec) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo + +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{6} +} +func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentStatus.Merge(m, src) +} +func (m *VolumeAttachmentStatus) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo + +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{7} +} +func (m *VolumeError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeError) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeError.Merge(m, src) +} +func (m *VolumeError) XXX_Size() int { + return m.Size() +} +func (m *VolumeError) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeError.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeError proto.InternalMessageInfo func init() { proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1.StorageClass") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1.StorageClass.ParametersEntry") proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1.StorageClassList") proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1.VolumeAttachment") proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1.VolumeAttachmentList") proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSource") proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSpec") proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1.VolumeAttachmentStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1.VolumeAttachmentStatus.AttachmentMetadataEntry") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1.VolumeError") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto", fileDescriptor_3b530c1983504d8d) +} + +var fileDescriptor_3b530c1983504d8d = []byte{ + // 1018 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x3d, 0x6f, 0x23, 0xc5, + 0x1b, 0xcf, 0xc6, 0x79, 0x71, 0xc6, 0xc9, 0xff, 0x9c, 0xf9, 0x07, 0x30, 0x2e, 0xec, 0xc8, 0x14, + 0x98, 0x83, 0xdb, 0xbd, 0x84, 0x03, 0x9d, 0x90, 0x40, 0xf2, 0x82, 0x25, 0x4e, 0x8a, 0xef, 0xa2, + 0x49, 0x38, 0x21, 0x44, 0xc1, 0x64, 0xf7, 0x61, 0xb3, 0x67, 0xef, 0xce, 0x32, 0x33, 0x36, 0xa4, + 0xa3, 0xa2, 0x43, 0x82, 0x96, 0x8f, 0x42, 0x49, 0x15, 0xba, 0x13, 0xd5, 0x55, 0x16, 0x59, 0x6a, + 0xbe, 0x40, 0x2a, 0x34, 0xb3, 0x13, 0x7b, 0x63, 0x6f, 0xc0, 0x69, 0xae, 0xf3, 0xf3, 0xf2, 0xfb, + 0x3d, 0xef, 0xb3, 0x46, 0x1f, 0xf5, 0x1f, 0x0a, 0x3b, 0x64, 0x4e, 0x7f, 0x78, 0x02, 0x3c, 0x06, + 0x09, 0xc2, 0x19, 0x41, 0xec, 0x33, 0xee, 0x18, 0x03, 0x4d, 0x42, 0x47, 0x48, 0xc6, 0x69, 0x00, + 0xce, 0x68, 0xcf, 0x09, 0x20, 0x06, 0x4e, 0x25, 0xf8, 0x76, 0xc2, 0x99, 0x64, 0xf8, 0x95, 0xcc, + 0xcd, 0xa6, 0x49, 0x68, 0x1b, 0x37, 0x7b, 0xb4, 0x57, 0xbf, 0x17, 0x84, 0xf2, 0x74, 0x78, 0x62, + 0x7b, 0x2c, 0x72, 0x02, 0x16, 0x30, 0x47, 0x7b, 0x9f, 0x0c, 0xbf, 0xd6, 0x92, 0x16, 0xf4, 0xaf, + 0x8c, 0xa5, 0xde, 0xca, 0x05, 0xf3, 0x18, 0x2f, 0x8a, 0x54, 0x7f, 0x30, 0xf5, 0x89, 0xa8, 0x77, + 0x1a, 0xc6, 0xc0, 0xcf, 0x9c, 0xa4, 0x1f, 0x28, 0x85, 0x70, 0x22, 0x90, 0xb4, 0x08, 0xe5, 0xdc, + 0x84, 0xe2, 0xc3, 0x58, 0x86, 0x11, 0xcc, 0x01, 0xde, 0xff, 0x2f, 0x80, 0xf0, 0x4e, 0x21, 0xa2, + 0xb3, 0xb8, 0xd6, 0x8f, 0x6b, 0x68, 0xf3, 0x28, 0x6b, 0xc0, 0xc7, 0x03, 0x2a, 0x04, 0xfe, 0x0a, + 0x95, 0x55, 0x52, 0x3e, 0x95, 0xb4, 0x66, 0xed, 0x5a, 0xed, 0xca, 0xfe, 0x7d, 0x7b, 0xda, 0xac, + 0x09, 0xb7, 0x9d, 0xf4, 0x03, 0xa5, 0x10, 0xb6, 0xf2, 0xb6, 0x47, 0x7b, 0xf6, 0x93, 0x93, 0x67, + 0xe0, 0xc9, 0x1e, 0x48, 0xea, 0xe2, 0xf3, 0x71, 0x73, 0x29, 0x1d, 0x37, 0xd1, 0x54, 0x47, 0x26, + 0xac, 0xf8, 0x3d, 0x54, 0x49, 0x38, 0x1b, 0x85, 0x22, 0x64, 0x31, 0xf0, 0xda, 0xf2, 0xae, 0xd5, + 0xde, 0x70, 0xff, 0x6f, 0x20, 0x95, 0xc3, 0xa9, 0x89, 0xe4, 0xfd, 0x70, 0x80, 0x50, 0x42, 0x39, + 0x8d, 0x40, 0x02, 0x17, 0xb5, 0xd2, 0x6e, 0xa9, 0x5d, 0xd9, 0x7f, 0xd7, 0x2e, 0x9c, 0xa3, 0x9d, + 0xaf, 0xc8, 0x3e, 0x9c, 0xa0, 0xba, 0xb1, 0xe4, 0x67, 0xd3, 0xec, 0xa6, 0x06, 0x92, 0xa3, 0xc6, + 0x7d, 0xb4, 0xc5, 0xc1, 0x1b, 0xd0, 0x30, 0x3a, 0x64, 0x83, 0xd0, 0x3b, 0xab, 0xad, 0xe8, 0x0c, + 0xbb, 0xe9, 0xb8, 0xb9, 0x45, 0xf2, 0x86, 0xcb, 0x71, 0xf3, 0xfe, 0xfc, 0x06, 0xd8, 0x87, 0xc0, + 0x45, 0x28, 0x24, 0xc4, 0xf2, 0x29, 0x1b, 0x0c, 0x23, 0xb8, 0x86, 0x21, 0xd7, 0xb9, 0xf1, 0x03, + 0xb4, 0x19, 0xb1, 0x61, 0x2c, 0x9f, 0x24, 0x32, 0x64, 0xb1, 0xa8, 0xad, 0xee, 0x96, 0xda, 0x1b, + 0x6e, 0x35, 0x1d, 0x37, 0x37, 0x7b, 0x39, 0x3d, 0xb9, 0xe6, 0x85, 0x0f, 0xd0, 0x0e, 0x1d, 0x0c, + 0xd8, 0xb7, 0x59, 0x80, 0xee, 0x77, 0x09, 0x8d, 0x55, 0x97, 0x6a, 0x6b, 0xbb, 0x56, 0xbb, 0xec, + 0xd6, 0xd2, 0x71, 0x73, 0xa7, 0x53, 0x60, 0x27, 0x85, 0x28, 0xfc, 0x39, 0xda, 0x1e, 0x69, 0x95, + 0x1b, 0xc6, 0x7e, 0x18, 0x07, 0x3d, 0xe6, 0x43, 0x6d, 0x5d, 0x17, 0x7d, 0x37, 0x1d, 0x37, 0xb7, + 0x9f, 0xce, 0x1a, 0x2f, 0x8b, 0x94, 0x64, 0x9e, 0x04, 0x7f, 0x83, 0xb6, 0x75, 0x44, 0xf0, 0x8f, + 0x59, 0xc2, 0x06, 0x2c, 0x08, 0x41, 0xd4, 0xca, 0x7a, 0x74, 0xed, 0xfc, 0xe8, 0x54, 0xeb, 0xd4, + 0xdc, 0x8c, 0xd7, 0xd9, 0x11, 0x0c, 0xc0, 0x93, 0x8c, 0x1f, 0x03, 0x8f, 0xdc, 0xd7, 0xcd, 0xbc, + 0xb6, 0x3b, 0xb3, 0x54, 0x64, 0x9e, 0xbd, 0xfe, 0x21, 0xba, 0x33, 0x33, 0x70, 0x5c, 0x45, 0xa5, + 0x3e, 0x9c, 0xe9, 0x6d, 0xde, 0x20, 0xea, 0x27, 0xde, 0x41, 0xab, 0x23, 0x3a, 0x18, 0x42, 0xb6, + 0x7c, 0x24, 0x13, 0x3e, 0x58, 0x7e, 0x68, 0xb5, 0x7e, 0xb5, 0x50, 0x35, 0xbf, 0x3d, 0x07, 0xa1, + 0x90, 0xf8, 0xcb, 0xb9, 0x9b, 0xb0, 0x17, 0xbb, 0x09, 0x85, 0xd6, 0x17, 0x51, 0x35, 0x35, 0x94, + 0xaf, 0x34, 0xb9, 0x7b, 0xf8, 0x14, 0xad, 0x86, 0x12, 0x22, 0x51, 0x5b, 0xd6, 0x8d, 0x79, 0x63, + 0x81, 0x9d, 0x76, 0xb7, 0x0c, 0xdf, 0xea, 0x23, 0x85, 0x24, 0x19, 0x41, 0xeb, 0x97, 0x65, 0x54, + 0xcd, 0xe6, 0xd2, 0x91, 0x92, 0x7a, 0xa7, 0x11, 0xc4, 0xf2, 0x25, 0x1c, 0x74, 0x0f, 0xad, 0x88, + 0x04, 0x3c, 0xdd, 0xcc, 0xca, 0xfe, 0xdb, 0x37, 0xe4, 0x3f, 0x9b, 0xd8, 0x51, 0x02, 0x9e, 0xbb, + 0x69, 0x88, 0x57, 0x94, 0x44, 0x34, 0x0d, 0xfe, 0x0c, 0xad, 0x09, 0x49, 0xe5, 0x50, 0x1d, 0xb9, + 0x22, 0xbc, 0xb7, 0x28, 0xa1, 0x06, 0xb9, 0xff, 0x33, 0x94, 0x6b, 0x99, 0x4c, 0x0c, 0x59, 0xeb, + 0x37, 0x0b, 0xed, 0xcc, 0x42, 0x5e, 0xc2, 0x74, 0x0f, 0xae, 0x4f, 0xf7, 0xcd, 0x05, 0x8b, 0xb9, + 0x61, 0xc2, 0x7f, 0x58, 0xe8, 0xd5, 0xb9, 0xba, 0xd9, 0x90, 0x7b, 0xa0, 0xde, 0x84, 0x64, 0xe6, + 0xe5, 0x79, 0x4c, 0x23, 0xc8, 0xd6, 0x3e, 0x7b, 0x13, 0x0e, 0x0b, 0xec, 0xa4, 0x10, 0x85, 0x9f, + 0xa1, 0x6a, 0x18, 0x0f, 0xc2, 0x18, 0x32, 0xdd, 0xd1, 0x74, 0xbe, 0x85, 0x87, 0x3b, 0xcb, 0xac, + 0x87, 0xbb, 0x93, 0x8e, 0x9b, 0xd5, 0x47, 0x33, 0x2c, 0x64, 0x8e, 0xb7, 0xf5, 0x7b, 0xc1, 0x64, + 0x94, 0x01, 0xbf, 0x83, 0xca, 0x54, 0x6b, 0x80, 0x9b, 0x32, 0x26, 0x9d, 0xee, 0x18, 0x3d, 0x99, + 0x78, 0xe8, 0xbd, 0xd1, 0xad, 0x30, 0x89, 0x2e, 0xbc, 0x37, 0x1a, 0x94, 0xdb, 0x1b, 0x2d, 0x13, + 0x43, 0xa6, 0x92, 0x88, 0x99, 0x9f, 0xf5, 0xb2, 0x74, 0x3d, 0x89, 0xc7, 0x46, 0x4f, 0x26, 0x1e, + 0xad, 0xbf, 0x4b, 0x05, 0x03, 0xd2, 0x0b, 0x98, 0xab, 0xc6, 0xd7, 0xd5, 0x94, 0xe7, 0xaa, 0xf1, + 0x27, 0xd5, 0xf8, 0xf8, 0x67, 0x0b, 0x61, 0x3a, 0xa1, 0xe8, 0x5d, 0x2d, 0x68, 0xb6, 0x45, 0xdd, + 0x5b, 0x9d, 0x84, 0xdd, 0x99, 0xe3, 0xc9, 0xbe, 0x84, 0x75, 0x13, 0x1f, 0xcf, 0x3b, 0x90, 0x82, + 0xe0, 0xd8, 0x47, 0x95, 0x4c, 0xdb, 0xe5, 0x9c, 0x71, 0x73, 0x9e, 0xad, 0x7f, 0xcd, 0x45, 0x7b, + 0xba, 0x0d, 0xf5, 0x65, 0xef, 0x4c, 0xa1, 0x97, 0xe3, 0x66, 0x25, 0x67, 0x27, 0x79, 0x5a, 0x15, + 0xc5, 0x87, 0x69, 0x94, 0x95, 0xdb, 0x45, 0xf9, 0x04, 0x6e, 0x8e, 0x92, 0xa3, 0xad, 0x77, 0xd1, + 0x6b, 0x37, 0xb4, 0xe5, 0x56, 0xdf, 0x8b, 0x1f, 0x2c, 0x94, 0x8f, 0x81, 0x0f, 0xd0, 0x8a, 0xfa, + 0xbb, 0x65, 0x1e, 0x92, 0xbb, 0x8b, 0x3d, 0x24, 0xc7, 0x61, 0x04, 0xd3, 0xa7, 0x50, 0x49, 0x44, + 0xb3, 0xe0, 0xb7, 0xd0, 0x7a, 0x04, 0x42, 0xd0, 0xc0, 0x44, 0x76, 0xef, 0x18, 0xa7, 0xf5, 0x5e, + 0xa6, 0x26, 0x57, 0x76, 0xb7, 0x7d, 0x7e, 0xd1, 0x58, 0x7a, 0x7e, 0xd1, 0x58, 0x7a, 0x71, 0xd1, + 0x58, 0xfa, 0x3e, 0x6d, 0x58, 0xe7, 0x69, 0xc3, 0x7a, 0x9e, 0x36, 0xac, 0x17, 0x69, 0xc3, 0xfa, + 0x33, 0x6d, 0x58, 0x3f, 0xfd, 0xd5, 0x58, 0xfa, 0x62, 0x79, 0xb4, 0xf7, 0x4f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xe2, 0xd4, 0x42, 0x3d, 0x3c, 0x0b, 0x00, 0x00, +} + func (m *StorageClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -112,100 +366,108 @@ func (m *StorageClass) Marshal() (dAtA []byte, err error) { } func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) - i += copy(dAtA[i:], m.Provisioner) - if len(m.Parameters) > 0 { - keysForParameters := make([]string, 0, len(m.Parameters)) - for k := range m.Parameters { - keysForParameters = append(keysForParameters, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - for _, k := range keysForParameters { - dAtA[i] = 0x1a - i++ - v := m.Parameters[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + if len(m.AllowedTopologies) > 0 { + for iNdEx := len(m.AllowedTopologies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedTopologies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 } } - if m.ReclaimPolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) - i += copy(dAtA[i:], *m.ReclaimPolicy) - } - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + if m.VolumeBindingMode != nil { + i -= len(*m.VolumeBindingMode) + copy(dAtA[i:], *m.VolumeBindingMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i-- + dAtA[i] = 0x3a } if m.AllowVolumeExpansion != nil { - dAtA[i] = 0x30 - i++ + i-- if *m.AllowVolumeExpansion { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x30 } - if m.VolumeBindingMode != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) - i += copy(dAtA[i:], *m.VolumeBindingMode) + if len(m.MountOptions) > 0 { + for iNdEx := len(m.MountOptions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MountOptions[iNdEx]) + copy(dAtA[i:], m.MountOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountOptions[iNdEx]))) + i-- + dAtA[i] = 0x2a + } } - if len(m.AllowedTopologies) > 0 { - for _, msg := range m.AllowedTopologies { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if m.ReclaimPolicy != nil { + i -= len(*m.ReclaimPolicy) + copy(dAtA[i:], *m.ReclaimPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Parameters[string(keysForParameters[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForParameters[iNdEx]) + copy(dAtA[i:], keysForParameters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a } } - return i, nil + i -= len(m.Provisioner) + copy(dAtA[i:], m.Provisioner) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StorageClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -213,37 +475,46 @@ func (m *StorageClassList) Marshal() (dAtA []byte, err error) { } func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -251,41 +522,52 @@ func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n4, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n5, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -293,37 +575,46 @@ func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -331,23 +622,41 @@ func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.InlineVolumeSpec != nil { + { + size, err := m.InlineVolumeSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } if m.PersistentVolumeName != nil { - dAtA[i] = 0xa - i++ + i -= len(*m.PersistentVolumeName) + copy(dAtA[i:], *m.PersistentVolumeName) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) - i += copy(dAtA[i:], *m.PersistentVolumeName) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -355,33 +664,42 @@ func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) - i += copy(dAtA[i:], m.Attacher) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n7, err := m.Source.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x1a - i++ + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i += copy(dAtA[i:], m.NodeName) - return i, nil + i-- + dAtA[i] = 0x1a + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Attacher) + copy(dAtA[i:], m.Attacher) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -389,67 +707,78 @@ func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Attached { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.DetachError != nil { + { + size, err := m.DetachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AttachError != nil { + { + size, err := m.AttachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i++ if len(m.AttachmentMetadata) > 0 { keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) for k := range m.AttachmentMetadata { keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - for _, k := range keysForAttachmentMetadata { + for iNdEx := len(keysForAttachmentMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.AttachmentMetadata[string(keysForAttachmentMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.AttachmentMetadata[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForAttachmentMetadata[iNdEx]) + copy(dAtA[i:], keysForAttachmentMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttachmentMetadata[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - if m.AttachError != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) - n8, err := m.AttachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - if m.DetachError != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) - n9, err := m.DetachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 + i-- + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *VolumeError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -457,35 +786,48 @@ func (m *VolumeError) Marshal() (dAtA []byte, err error) { } func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n10, err := m.Time.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x12 - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *StorageClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -527,6 +869,9 @@ func (m *StorageClass) Size() (n int) { } func (m *StorageClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -541,6 +886,9 @@ func (m *StorageClassList) Size() (n int) { } func (m *VolumeAttachment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -553,6 +901,9 @@ func (m *VolumeAttachment) Size() (n int) { } func (m *VolumeAttachmentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -567,16 +918,26 @@ func (m *VolumeAttachmentList) Size() (n int) { } func (m *VolumeAttachmentSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.PersistentVolumeName != nil { l = len(*m.PersistentVolumeName) n += 1 + l + sovGenerated(uint64(l)) } + if m.InlineVolumeSpec != nil { + l = m.InlineVolumeSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *VolumeAttachmentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Attacher) @@ -589,6 +950,9 @@ func (m *VolumeAttachmentSpec) Size() (n int) { } func (m *VolumeAttachmentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -612,6 +976,9 @@ func (m *VolumeAttachmentStatus) Size() (n int) { } func (m *VolumeError) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Time.Size() @@ -622,14 +989,7 @@ func (m *VolumeError) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -638,6 +998,11 @@ func (this *StorageClass) String() string { if this == nil { return "nil" } + repeatedStringForAllowedTopologies := "[]TopologySelectorTerm{" + for _, f := range this.AllowedTopologies { + repeatedStringForAllowedTopologies += fmt.Sprintf("%v", f) + "," + } + repeatedStringForAllowedTopologies += "}" keysForParameters := make([]string, 0, len(this.Parameters)) for k := range this.Parameters { keysForParameters = append(keysForParameters, k) @@ -649,14 +1014,14 @@ func (this *StorageClass) String() string { } mapStringForParameters += "}" s := strings.Join([]string{`&StorageClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, `Parameters:` + mapStringForParameters + `,`, `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, - `AllowedTopologies:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedTopologies), "TopologySelectorTerm", "k8s_io_api_core_v1.TopologySelectorTerm", 1), `&`, ``, 1) + `,`, + `AllowedTopologies:` + repeatedStringForAllowedTopologies + `,`, `}`, }, "") return s @@ -665,9 +1030,14 @@ func (this *StorageClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]StorageClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StorageClass", "StorageClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&StorageClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -677,7 +1047,7 @@ func (this *VolumeAttachment) String() string { return "nil" } s := strings.Join([]string{`&VolumeAttachment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -688,9 +1058,14 @@ func (this *VolumeAttachmentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]VolumeAttachment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&VolumeAttachmentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -701,6 +1076,7 @@ func (this *VolumeAttachmentSource) String() string { } s := strings.Join([]string{`&VolumeAttachmentSource{`, `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "v11.PersistentVolumeSpec", 1) + `,`, `}`, }, "") return s @@ -734,8 +1110,8 @@ func (this *VolumeAttachmentStatus) String() string { s := strings.Join([]string{`&VolumeAttachmentStatus{`, `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, - `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, - `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, + `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, `}`, }, "") return s @@ -745,7 +1121,7 @@ func (this *VolumeError) String() string { return "nil" } s := strings.Join([]string{`&VolumeError{`, - `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, }, "") @@ -774,7 +1150,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -802,7 +1178,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -811,6 +1187,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -832,7 +1211,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -842,6 +1221,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -861,7 +1243,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -870,6 +1252,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -890,7 +1275,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -907,7 +1292,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -917,6 +1302,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -933,7 +1321,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -943,6 +1331,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -979,7 +1370,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -989,6 +1380,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1009,7 +1403,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1019,6 +1413,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1038,7 +1435,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1059,7 +1456,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1069,6 +1466,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1089,7 +1489,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1098,10 +1498,13 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedTopologies = append(m.AllowedTopologies, k8s_io_api_core_v1.TopologySelectorTerm{}) + m.AllowedTopologies = append(m.AllowedTopologies, v11.TopologySelectorTerm{}) if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1115,6 +1518,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1142,7 +1548,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1170,7 +1576,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1179,6 +1585,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1200,7 +1609,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1209,6 +1618,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1226,6 +1638,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1253,7 +1668,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1281,7 +1696,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1290,6 +1705,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1311,7 +1729,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1320,6 +1738,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1341,7 +1762,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1350,6 +1771,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1366,6 +1790,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1393,7 +1820,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1421,7 +1848,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1430,6 +1857,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1451,7 +1881,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1460,6 +1890,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1477,6 +1910,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1504,7 +1940,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1532,7 +1968,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1542,12 +1978,51 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) m.PersistentVolumeName = &s iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InlineVolumeSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.InlineVolumeSpec == nil { + m.InlineVolumeSpec = &v11.PersistentVolumeSpec{} + } + if err := m.InlineVolumeSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1557,6 +2032,9 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1584,7 +2062,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1612,7 +2090,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1622,6 +2100,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1641,7 +2122,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1650,6 +2131,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1671,7 +2155,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1681,6 +2165,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1695,6 +2182,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1722,7 +2212,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1750,7 +2240,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1770,7 +2260,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1779,6 +2269,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1799,7 +2292,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1816,7 +2309,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1826,6 +2319,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -1842,7 +2338,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1852,6 +2348,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -1888,7 +2387,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1897,6 +2396,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1921,7 +2423,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1930,6 +2432,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1949,6 +2454,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1976,7 +2484,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2004,7 +2512,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2013,6 +2521,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2034,7 +2545,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2044,6 +2555,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2058,6 +2572,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2124,10 +2641,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2156,6 +2676,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2174,73 +2697,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 984 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x3d, 0x6f, 0x23, 0x45, - 0x18, 0xce, 0xc6, 0xf9, 0x70, 0xc6, 0x09, 0x97, 0x0c, 0x01, 0x8c, 0x0b, 0x3b, 0x32, 0x05, 0xe6, - 0xe0, 0x76, 0x2f, 0xe1, 0x40, 0x27, 0x24, 0x90, 0xbc, 0x60, 0x09, 0xa4, 0xf8, 0x2e, 0x9a, 0x84, - 0x13, 0x42, 0x14, 0x4c, 0x76, 0xdf, 0xdb, 0x2c, 0xf6, 0xee, 0x2c, 0x33, 0x63, 0x43, 0x3a, 0x2a, - 0x3a, 0x24, 0x68, 0xf9, 0x29, 0x94, 0x54, 0xa1, 0xbb, 0xf2, 0x2a, 0x8b, 0x2c, 0x35, 0x7f, 0x20, - 0x15, 0x9a, 0xd9, 0x89, 0xbd, 0xb1, 0xd7, 0x9c, 0xd3, 0x5c, 0xe7, 0xf7, 0xe3, 0x79, 0xde, 0xef, - 0x59, 0xa3, 0x4f, 0x7a, 0x0f, 0x85, 0x1d, 0x32, 0xa7, 0x37, 0x38, 0x05, 0x1e, 0x83, 0x04, 0xe1, - 0x0c, 0x21, 0xf6, 0x19, 0x77, 0x8c, 0x81, 0x26, 0xa1, 0x23, 0x24, 0xe3, 0x34, 0x00, 0x67, 0xb8, - 0xef, 0x04, 0x10, 0x03, 0xa7, 0x12, 0x7c, 0x3b, 0xe1, 0x4c, 0x32, 0xfc, 0x5a, 0xe6, 0x66, 0xd3, - 0x24, 0xb4, 0x8d, 0x9b, 0x3d, 0xdc, 0xaf, 0xdd, 0x0b, 0x42, 0x79, 0x36, 0x38, 0xb5, 0x3d, 0x16, - 0x39, 0x01, 0x0b, 0x98, 0xa3, 0xbd, 0x4f, 0x07, 0x4f, 0xb5, 0xa4, 0x05, 0xfd, 0x2b, 0x63, 0xa9, - 0x35, 0x73, 0xc1, 0x3c, 0xc6, 0x8b, 0x22, 0xd5, 0x1e, 0x4c, 0x7c, 0x22, 0xea, 0x9d, 0x85, 0x31, - 0xf0, 0x73, 0x27, 0xe9, 0x05, 0x4a, 0x21, 0x9c, 0x08, 0x24, 0x2d, 0x42, 0x39, 0xf3, 0x50, 0x7c, - 0x10, 0xcb, 0x30, 0x82, 0x19, 0xc0, 0x87, 0x2f, 0x02, 0x08, 0xef, 0x0c, 0x22, 0x3a, 0x8d, 0x6b, - 0xfe, 0xb2, 0x86, 0x36, 0x8f, 0xb3, 0x06, 0x7c, 0xda, 0xa7, 0x42, 0xe0, 0x6f, 0x51, 0x59, 0x25, - 0xe5, 0x53, 0x49, 0xab, 0xd6, 0x9e, 0xd5, 0xaa, 0x1c, 0xdc, 0xb7, 0x27, 0xcd, 0x1a, 0x73, 0xdb, - 0x49, 0x2f, 0x50, 0x0a, 0x61, 0x2b, 0x6f, 0x7b, 0xb8, 0x6f, 0x3f, 0x3e, 0xfd, 0x0e, 0x3c, 0xd9, - 0x05, 0x49, 0x5d, 0x7c, 0x31, 0x6a, 0x2c, 0xa5, 0xa3, 0x06, 0x9a, 0xe8, 0xc8, 0x98, 0x15, 0x7f, - 0x80, 0x2a, 0x09, 0x67, 0xc3, 0x50, 0x84, 0x2c, 0x06, 0x5e, 0x5d, 0xde, 0xb3, 0x5a, 0x1b, 0xee, - 0xab, 0x06, 0x52, 0x39, 0x9a, 0x98, 0x48, 0xde, 0x0f, 0x07, 0x08, 0x25, 0x94, 0xd3, 0x08, 0x24, - 0x70, 0x51, 0x2d, 0xed, 0x95, 0x5a, 0x95, 0x83, 0xf7, 0xed, 0xc2, 0x39, 0xda, 0xf9, 0x8a, 0xec, - 0xa3, 0x31, 0xaa, 0x13, 0x4b, 0x7e, 0x3e, 0xc9, 0x6e, 0x62, 0x20, 0x39, 0x6a, 0xdc, 0x43, 0x5b, - 0x1c, 0xbc, 0x3e, 0x0d, 0xa3, 0x23, 0xd6, 0x0f, 0xbd, 0xf3, 0xea, 0x8a, 0xce, 0xb0, 0x93, 0x8e, - 0x1a, 0x5b, 0x24, 0x6f, 0xb8, 0x1a, 0x35, 0xee, 0xcf, 0x6e, 0x80, 0x7d, 0x04, 0x5c, 0x84, 0x42, - 0x42, 0x2c, 0x9f, 0xb0, 0xfe, 0x20, 0x82, 0x1b, 0x18, 0x72, 0x93, 0x1b, 0x3f, 0x40, 0x9b, 0x11, - 0x1b, 0xc4, 0xf2, 0x71, 0x22, 0x43, 0x16, 0x8b, 0xea, 0xea, 0x5e, 0xa9, 0xb5, 0xe1, 0x6e, 0xa7, - 0xa3, 0xc6, 0x66, 0x37, 0xa7, 0x27, 0x37, 0xbc, 0xf0, 0x21, 0xda, 0xa5, 0xfd, 0x3e, 0xfb, 0x21, - 0x0b, 0xd0, 0xf9, 0x31, 0xa1, 0xb1, 0xea, 0x52, 0x75, 0x6d, 0xcf, 0x6a, 0x95, 0xdd, 0x6a, 0x3a, - 0x6a, 0xec, 0xb6, 0x0b, 0xec, 0xa4, 0x10, 0x85, 0xbf, 0x42, 0x3b, 0x43, 0xad, 0x72, 0xc3, 0xd8, - 0x0f, 0xe3, 0xa0, 0xcb, 0x7c, 0xa8, 0xae, 0xeb, 0xa2, 0xef, 0xa6, 0xa3, 0xc6, 0xce, 0x93, 0x69, - 0xe3, 0x55, 0x91, 0x92, 0xcc, 0x92, 0xe0, 0xef, 0xd1, 0x8e, 0x8e, 0x08, 0xfe, 0x09, 0x4b, 0x58, - 0x9f, 0x05, 0x21, 0x88, 0x6a, 0x59, 0x8f, 0xae, 0x95, 0x1f, 0x9d, 0x6a, 0x9d, 0x9a, 0x9b, 0xf1, - 0x3a, 0x3f, 0x86, 0x3e, 0x78, 0x92, 0xf1, 0x13, 0xe0, 0x91, 0xfb, 0xa6, 0x99, 0xd7, 0x4e, 0x7b, - 0x9a, 0x8a, 0xcc, 0xb2, 0xd7, 0x3e, 0x46, 0x77, 0xa6, 0x06, 0x8e, 0xb7, 0x51, 0xa9, 0x07, 0xe7, - 0x7a, 0x9b, 0x37, 0x88, 0xfa, 0x89, 0x77, 0xd1, 0xea, 0x90, 0xf6, 0x07, 0x90, 0x2d, 0x1f, 0xc9, - 0x84, 0x8f, 0x96, 0x1f, 0x5a, 0xcd, 0x3f, 0x2c, 0xb4, 0x9d, 0xdf, 0x9e, 0xc3, 0x50, 0x48, 0xfc, - 0xcd, 0xcc, 0x4d, 0xd8, 0x8b, 0xdd, 0x84, 0x42, 0xeb, 0x8b, 0xd8, 0x36, 0x35, 0x94, 0xaf, 0x35, - 0xb9, 0x7b, 0xf8, 0x1c, 0xad, 0x86, 0x12, 0x22, 0x51, 0x5d, 0xd6, 0x8d, 0x79, 0x6b, 0x81, 0x9d, - 0x76, 0xb7, 0x0c, 0xdf, 0xea, 0x17, 0x0a, 0x49, 0x32, 0x82, 0xe6, 0xef, 0xcb, 0x68, 0x3b, 0x9b, - 0x4b, 0x5b, 0x4a, 0xea, 0x9d, 0x45, 0x10, 0xcb, 0x97, 0x70, 0xd0, 0x5d, 0xb4, 0x22, 0x12, 0xf0, - 0x74, 0x33, 0x2b, 0x07, 0xef, 0xce, 0xc9, 0x7f, 0x3a, 0xb1, 0xe3, 0x04, 0x3c, 0x77, 0xd3, 0x10, - 0xaf, 0x28, 0x89, 0x68, 0x1a, 0xfc, 0x25, 0x5a, 0x13, 0x92, 0xca, 0x81, 0x3a, 0x72, 0x45, 0x78, - 0x6f, 0x51, 0x42, 0x0d, 0x72, 0x5f, 0x31, 0x94, 0x6b, 0x99, 0x4c, 0x0c, 0x59, 0xf3, 0x4f, 0x0b, - 0xed, 0x4e, 0x43, 0x5e, 0xc2, 0x74, 0x0f, 0x6f, 0x4e, 0xf7, 0xed, 0x05, 0x8b, 0x99, 0x33, 0xe1, - 0xa7, 0xe8, 0xf5, 0x99, 0xb2, 0xd9, 0x80, 0x7b, 0xa0, 0x9e, 0x84, 0x64, 0xea, 0xe1, 0x79, 0x44, - 0x23, 0xc8, 0xb6, 0x3e, 0x7b, 0x12, 0x8e, 0x0a, 0xec, 0xa4, 0x10, 0xd5, 0xfc, 0xab, 0xa0, 0x59, - 0x6a, 0x44, 0xf8, 0x3d, 0x54, 0xa6, 0x5a, 0x03, 0xdc, 0x50, 0x8f, 0x8b, 0x6f, 0x1b, 0x3d, 0x19, - 0x7b, 0xe8, 0x51, 0xea, 0xf4, 0xcc, 0x6e, 0x2c, 0x3c, 0x4a, 0x0d, 0xca, 0x8d, 0x52, 0xcb, 0xc4, - 0x90, 0xa9, 0x24, 0x62, 0xe6, 0x67, 0xf5, 0x95, 0x6e, 0x26, 0xf1, 0xc8, 0xe8, 0xc9, 0xd8, 0xa3, - 0xf9, 0x6f, 0xa9, 0xa0, 0x69, 0x7a, 0x27, 0x72, 0xd5, 0xf8, 0xba, 0x9a, 0xf2, 0x4c, 0x35, 0xfe, - 0xb8, 0x1a, 0x1f, 0xff, 0x66, 0x21, 0x4c, 0xc7, 0x14, 0xdd, 0xeb, 0x9d, 0xc9, 0x06, 0xdb, 0xb9, - 0xd5, 0x96, 0xda, 0xed, 0x19, 0x9e, 0xec, 0xe3, 0x54, 0x33, 0xf1, 0xf1, 0xac, 0x03, 0x29, 0x08, - 0x8e, 0x7d, 0x54, 0xc9, 0xb4, 0x1d, 0xce, 0x19, 0x37, 0x17, 0xd3, 0xfc, 0xdf, 0x5c, 0xb4, 0xa7, - 0x5b, 0x57, 0x1f, 0xdb, 0xf6, 0x04, 0x7a, 0x35, 0x6a, 0x54, 0x72, 0x76, 0x92, 0xa7, 0x55, 0x51, - 0x7c, 0x98, 0x44, 0x59, 0xb9, 0x5d, 0x94, 0xcf, 0x60, 0x7e, 0x94, 0x1c, 0x6d, 0xad, 0x83, 0xde, - 0x98, 0xd3, 0x96, 0x5b, 0x3d, 0xe1, 0x3f, 0x5b, 0x28, 0x1f, 0x03, 0x1f, 0xa2, 0x15, 0xf5, 0x0f, - 0xc8, 0xdc, 0xf6, 0xdd, 0xc5, 0x6e, 0xfb, 0x24, 0x8c, 0x60, 0xf2, 0x3a, 0x29, 0x89, 0x68, 0x16, - 0xfc, 0x0e, 0x5a, 0x8f, 0x40, 0x08, 0x1a, 0x98, 0xc8, 0xee, 0x1d, 0xe3, 0xb4, 0xde, 0xcd, 0xd4, - 0xe4, 0xda, 0xee, 0xb6, 0x2e, 0x2e, 0xeb, 0x4b, 0xcf, 0x2e, 0xeb, 0x4b, 0xcf, 0x2f, 0xeb, 0x4b, - 0x3f, 0xa5, 0x75, 0xeb, 0x22, 0xad, 0x5b, 0xcf, 0xd2, 0xba, 0xf5, 0x3c, 0xad, 0x5b, 0x7f, 0xa7, - 0x75, 0xeb, 0xd7, 0x7f, 0xea, 0x4b, 0x5f, 0x2f, 0x0f, 0xf7, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, - 0x85, 0x2a, 0x88, 0xc0, 0xcf, 0x0a, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index 7ac6cb2d2..4087b7e5d 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -36,7 +36,7 @@ option go_package = "v1"; // according to etcd is in ObjectMeta.Name. message StorageClass { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -80,7 +80,7 @@ message StorageClass { // StorageClassList is a collection of storage classes. message StorageClassList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -94,7 +94,7 @@ message StorageClassList { // VolumeAttachment objects are non-namespaced. message VolumeAttachment { // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -112,7 +112,7 @@ message VolumeAttachment { // VolumeAttachmentList is a collection of VolumeAttachment objects. message VolumeAttachmentList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -128,6 +128,15 @@ message VolumeAttachmentSource { // Name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; + + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index bd60e1026..e42ee5e9c 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -33,7 +33,7 @@ import ( type StorageClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -80,7 +80,7 @@ type StorageClass struct { type StorageClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -115,7 +115,7 @@ type VolumeAttachment struct { metav1.TypeMeta `json:",inline"` // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -136,7 +136,7 @@ type VolumeAttachment struct { type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -166,7 +166,14 @@ type VolumeAttachmentSource struct { // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` - // Placeholder for *VolumeSource to accommodate inline volumes in pods. + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + InlineVolumeSpec *v1.PersistentVolumeSpec `json:"inlineVolumeSpec,omitempty" protobuf:"bytes,2,opt,name=inlineVolumeSpec"` } // VolumeAttachmentStatus is the status of a VolumeAttachment request. diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index e31dd7f71..78ff19c21 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "provisioner": "Provisioner indicates the type of the provisioner.", "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", @@ -45,7 +45,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of StorageClasses", } @@ -55,7 +55,7 @@ func (StorageClassList) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } @@ -66,7 +66,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of VolumeAttachments", } diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go index 3157ec678..eb8626e6e 100644 --- a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -89,7 +89,7 @@ func (in *StorageClass) DeepCopyObject() runtime.Object { func (in *StorageClassList) DeepCopyInto(out *StorageClassList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StorageClass, len(*in)) @@ -150,7 +150,7 @@ func (in *VolumeAttachment) DeepCopyObject() runtime.Object { func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]VolumeAttachment, len(*in)) @@ -187,6 +187,11 @@ func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { *out = new(string) **out = **in } + if in.InlineVolumeSpec != nil { + in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec + *out = new(corev1.PersistentVolumeSpec) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go index 0511ccabd..423243521 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -17,32 +17,22 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto - - It has these top-level messages: - VolumeAttachment - VolumeAttachmentList - VolumeAttachmentSource - VolumeAttachmentSpec - VolumeAttachmentStatus - VolumeError -*/ package v1alpha1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v11 "k8s.io/api/core/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -55,29 +45,173 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } -func (*VolumeAttachment) ProtoMessage() {} -func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{0} +} +func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachment) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachment.Merge(m, src) +} +func (m *VolumeAttachment) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachment) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachment.DiscardUnknown(m) +} -func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } -func (*VolumeAttachmentList) ProtoMessage() {} -func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo -func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } -func (*VolumeAttachmentSource) ProtoMessage() {} -func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{1} +} +func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentList.Merge(m, src) +} +func (m *VolumeAttachmentList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentList.DiscardUnknown(m) +} -func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } -func (*VolumeAttachmentSpec) ProtoMessage() {} -func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo -func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } -func (*VolumeAttachmentStatus) ProtoMessage() {} -func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{2} +} +func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSource.Merge(m, src) +} +func (m *VolumeAttachmentSource) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSource.DiscardUnknown(m) +} -func (m *VolumeError) Reset() { *m = VolumeError{} } -func (*VolumeError) ProtoMessage() {} -func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo + +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{3} +} +func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSpec.Merge(m, src) +} +func (m *VolumeAttachmentSpec) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo + +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{4} +} +func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentStatus.Merge(m, src) +} +func (m *VolumeAttachmentStatus) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo + +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{5} +} +func (m *VolumeError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeError) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeError.Merge(m, src) +} +func (m *VolumeError) XXX_Size() int { + return m.Size() +} +func (m *VolumeError) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeError.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeError proto.InternalMessageInfo func init() { proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachment") @@ -85,12 +219,69 @@ func init() { proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSource") proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSpec") proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus.AttachmentMetadataEntry") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1alpha1.VolumeError") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto", fileDescriptor_10f856db1e670dc4) +} + +var fileDescriptor_10f856db1e670dc4 = []byte{ + // 745 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xc7, 0xe3, 0x24, 0x6d, 0xd3, 0x0d, 0x1f, 0xd1, 0x2a, 0x82, 0x28, 0x48, 0x4e, 0x95, 0x53, + 0x40, 0x74, 0x4d, 0x0a, 0x42, 0x15, 0xb7, 0x58, 0xed, 0xa1, 0xa2, 0x2d, 0x68, 0x8b, 0x38, 0x00, + 0x07, 0x36, 0xf6, 0xe2, 0xb8, 0x89, 0x3f, 0xe4, 0x5d, 0x47, 0xea, 0x8d, 0x13, 0x67, 0x6e, 0xbc, + 0x01, 0xcf, 0x92, 0x1b, 0x15, 0xa7, 0x9e, 0x22, 0x6a, 0xde, 0x82, 0x0b, 0x68, 0xd7, 0x9b, 0xc4, + 0x24, 0x29, 0xb4, 0xbd, 0x79, 0x66, 0x67, 0x7e, 0x33, 0xf3, 0xdf, 0xf1, 0x82, 0x9d, 0xfe, 0x36, + 0x43, 0x6e, 0x60, 0xf4, 0xe3, 0x2e, 0x8d, 0x7c, 0xca, 0x29, 0x33, 0x86, 0xd4, 0xb7, 0x83, 0xc8, + 0x50, 0x07, 0x24, 0x74, 0x0d, 0xc6, 0x83, 0x88, 0x38, 0xd4, 0x18, 0xb6, 0xc9, 0x20, 0xec, 0x91, + 0xb6, 0xe1, 0x50, 0x9f, 0x46, 0x84, 0x53, 0x1b, 0x85, 0x51, 0xc0, 0x03, 0x78, 0x2f, 0x0d, 0x46, + 0x24, 0x74, 0x91, 0x0a, 0x46, 0x93, 0xe0, 0xfa, 0xa6, 0xe3, 0xf2, 0x5e, 0xdc, 0x45, 0x56, 0xe0, + 0x19, 0x4e, 0xe0, 0x04, 0x86, 0xcc, 0xe9, 0xc6, 0x1f, 0xa4, 0x25, 0x0d, 0xf9, 0x95, 0xb2, 0xea, + 0xcd, 0x4c, 0x61, 0x2b, 0x88, 0x44, 0xd5, 0xf9, 0x7a, 0xf5, 0x27, 0xb3, 0x18, 0x8f, 0x58, 0x3d, + 0xd7, 0xa7, 0xd1, 0x89, 0x11, 0xf6, 0x1d, 0xe1, 0x60, 0x86, 0x47, 0x39, 0x59, 0x96, 0x65, 0x5c, + 0x94, 0x15, 0xc5, 0x3e, 0x77, 0x3d, 0xba, 0x90, 0xf0, 0xf4, 0x7f, 0x09, 0xcc, 0xea, 0x51, 0x8f, + 0xcc, 0xe7, 0x35, 0xbf, 0xe6, 0x41, 0xe5, 0x75, 0x30, 0x88, 0x3d, 0xda, 0xe1, 0x9c, 0x58, 0x3d, + 0x8f, 0xfa, 0x1c, 0xbe, 0x07, 0x25, 0xd1, 0x98, 0x4d, 0x38, 0xa9, 0x69, 0x1b, 0x5a, 0xab, 0xbc, + 0xf5, 0x08, 0xcd, 0x64, 0x9b, 0xf2, 0x51, 0xd8, 0x77, 0x84, 0x83, 0x21, 0x11, 0x8d, 0x86, 0x6d, + 0xf4, 0xa2, 0x7b, 0x4c, 0x2d, 0x7e, 0x40, 0x39, 0x31, 0xe1, 0x68, 0xdc, 0xc8, 0x25, 0xe3, 0x06, + 0x98, 0xf9, 0xf0, 0x94, 0x0a, 0x8f, 0x40, 0x91, 0x85, 0xd4, 0xaa, 0xe5, 0x25, 0xbd, 0x8d, 0xfe, + 0x71, 0x29, 0x68, 0xbe, 0xbd, 0xa3, 0x90, 0x5a, 0xe6, 0x0d, 0x85, 0x2f, 0x0a, 0x0b, 0x4b, 0x18, + 0x7c, 0x0b, 0x56, 0x19, 0x27, 0x3c, 0x66, 0xb5, 0x82, 0xc4, 0x3e, 0xbe, 0x1a, 0x56, 0xa6, 0x9a, + 0xb7, 0x14, 0x78, 0x35, 0xb5, 0xb1, 0x42, 0x36, 0x47, 0x1a, 0xa8, 0xce, 0xa7, 0xec, 0xbb, 0x8c, + 0xc3, 0x77, 0x0b, 0x62, 0xa1, 0xcb, 0x89, 0x25, 0xb2, 0xa5, 0x54, 0x15, 0x55, 0xb2, 0x34, 0xf1, + 0x64, 0x84, 0xc2, 0x60, 0xc5, 0xe5, 0xd4, 0x63, 0xb5, 0xfc, 0x46, 0xa1, 0x55, 0xde, 0xda, 0xbc, + 0xd2, 0x48, 0xe6, 0x4d, 0x45, 0x5e, 0xd9, 0x13, 0x0c, 0x9c, 0xa2, 0x9a, 0xdf, 0x35, 0x70, 0x67, + 0x61, 0xfa, 0x20, 0x8e, 0x2c, 0x0a, 0xf7, 0x41, 0x35, 0xa4, 0x11, 0x73, 0x19, 0xa7, 0x3e, 0x4f, + 0x63, 0x0e, 0x89, 0x47, 0xe5, 0x60, 0xeb, 0x66, 0x2d, 0x19, 0x37, 0xaa, 0x2f, 0x97, 0x9c, 0xe3, + 0xa5, 0x59, 0xf0, 0x18, 0x54, 0x5c, 0x7f, 0xe0, 0xfa, 0x34, 0xf5, 0x1d, 0xcd, 0x6e, 0xbc, 0x95, + 0x9d, 0x43, 0xfc, 0x3a, 0x42, 0x90, 0x79, 0xb2, 0xbc, 0xe8, 0x6a, 0x32, 0x6e, 0x54, 0xf6, 0xe6, + 0x28, 0x78, 0x81, 0xdb, 0xfc, 0xb6, 0xe4, 0x7e, 0xc4, 0x01, 0x7c, 0x08, 0x4a, 0x44, 0x7a, 0x68, + 0xa4, 0xc6, 0x98, 0xea, 0xdd, 0x51, 0x7e, 0x3c, 0x8d, 0x90, 0x3b, 0x24, 0xa5, 0x50, 0x8d, 0x5e, + 0x71, 0x87, 0x64, 0x6a, 0x66, 0x87, 0xa4, 0x8d, 0x15, 0x52, 0xb4, 0xe2, 0x07, 0x76, 0xaa, 0x68, + 0xe1, 0xef, 0x56, 0x0e, 0x95, 0x1f, 0x4f, 0x23, 0x9a, 0xbf, 0x0b, 0x4b, 0xae, 0x49, 0x2e, 0x63, + 0x66, 0x26, 0x5b, 0xce, 0x54, 0x5a, 0x98, 0xc9, 0x9e, 0xce, 0x64, 0xc3, 0x2f, 0x1a, 0x80, 0x64, + 0x8a, 0x38, 0x98, 0x2c, 0x6b, 0xba, 0x51, 0xcf, 0xaf, 0xf1, 0x93, 0xa0, 0xce, 0x02, 0x6d, 0xd7, + 0xe7, 0xd1, 0x89, 0x59, 0x57, 0x5d, 0xc0, 0xc5, 0x00, 0xbc, 0xa4, 0x05, 0x78, 0x0c, 0xca, 0xa9, + 0x77, 0x37, 0x8a, 0x82, 0x48, 0xfd, 0xb6, 0xad, 0x4b, 0x74, 0x24, 0xe3, 0x4d, 0x3d, 0x19, 0x37, + 0xca, 0x9d, 0x19, 0xe0, 0xd7, 0xb8, 0x51, 0xce, 0x9c, 0xe3, 0x2c, 0x5c, 0xd4, 0xb2, 0xe9, 0xac, + 0x56, 0xf1, 0x3a, 0xb5, 0x76, 0xe8, 0xc5, 0xb5, 0x32, 0xf0, 0xfa, 0x2e, 0xb8, 0x7b, 0x81, 0x44, + 0xb0, 0x02, 0x0a, 0x7d, 0x7a, 0x92, 0x6e, 0x22, 0x16, 0x9f, 0xb0, 0x0a, 0x56, 0x86, 0x64, 0x10, + 0xa7, 0x1b, 0xb7, 0x8e, 0x53, 0xe3, 0x59, 0x7e, 0x5b, 0x6b, 0x7e, 0xd2, 0x40, 0xb6, 0x06, 0xdc, + 0x07, 0x45, 0xf1, 0x96, 0xab, 0x67, 0xe6, 0xc1, 0xe5, 0x9e, 0x99, 0x57, 0xae, 0x47, 0x67, 0xcf, + 0xa5, 0xb0, 0xb0, 0xa4, 0xc0, 0xfb, 0x60, 0xcd, 0xa3, 0x8c, 0x11, 0x47, 0x55, 0x36, 0x6f, 0xab, + 0xa0, 0xb5, 0x83, 0xd4, 0x8d, 0x27, 0xe7, 0x26, 0x1a, 0x9d, 0xeb, 0xb9, 0xd3, 0x73, 0x3d, 0x77, + 0x76, 0xae, 0xe7, 0x3e, 0x26, 0xba, 0x36, 0x4a, 0x74, 0xed, 0x34, 0xd1, 0xb5, 0xb3, 0x44, 0xd7, + 0x7e, 0x24, 0xba, 0xf6, 0xf9, 0xa7, 0x9e, 0x7b, 0x53, 0x9a, 0x08, 0xf7, 0x27, 0x00, 0x00, 0xff, + 0xff, 0xe8, 0x45, 0xe3, 0xba, 0xab, 0x07, 0x00, 0x00, +} + func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -98,41 +289,52 @@ func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -140,37 +342,46 @@ func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -178,23 +389,41 @@ func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.InlineVolumeSpec != nil { + { + size, err := m.InlineVolumeSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } if m.PersistentVolumeName != nil { - dAtA[i] = 0xa - i++ + i -= len(*m.PersistentVolumeName) + copy(dAtA[i:], *m.PersistentVolumeName) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) - i += copy(dAtA[i:], *m.PersistentVolumeName) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -202,33 +431,42 @@ func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) - i += copy(dAtA[i:], m.Attacher) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n5, err := m.Source.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x1a - i++ + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i += copy(dAtA[i:], m.NodeName) - return i, nil + i-- + dAtA[i] = 0x1a + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Attacher) + copy(dAtA[i:], m.Attacher) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -236,67 +474,78 @@ func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Attached { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.DetachError != nil { + { + size, err := m.DetachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AttachError != nil { + { + size, err := m.AttachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i++ if len(m.AttachmentMetadata) > 0 { keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) for k := range m.AttachmentMetadata { keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - for _, k := range keysForAttachmentMetadata { + for iNdEx := len(keysForAttachmentMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.AttachmentMetadata[string(keysForAttachmentMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.AttachmentMetadata[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForAttachmentMetadata[iNdEx]) + copy(dAtA[i:], keysForAttachmentMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttachmentMetadata[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - if m.AttachError != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) - n6, err := m.AttachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - if m.DetachError != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) - n7, err := m.DetachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 + i-- + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *VolumeError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -304,35 +553,48 @@ func (m *VolumeError) Marshal() (dAtA []byte, err error) { } func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n8, err := m.Time.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - dAtA[i] = 0x12 - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *VolumeAttachment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -345,6 +607,9 @@ func (m *VolumeAttachment) Size() (n int) { } func (m *VolumeAttachmentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -359,16 +624,26 @@ func (m *VolumeAttachmentList) Size() (n int) { } func (m *VolumeAttachmentSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.PersistentVolumeName != nil { l = len(*m.PersistentVolumeName) n += 1 + l + sovGenerated(uint64(l)) } + if m.InlineVolumeSpec != nil { + l = m.InlineVolumeSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *VolumeAttachmentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Attacher) @@ -381,6 +656,9 @@ func (m *VolumeAttachmentSpec) Size() (n int) { } func (m *VolumeAttachmentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -404,6 +682,9 @@ func (m *VolumeAttachmentStatus) Size() (n int) { } func (m *VolumeError) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Time.Size() @@ -414,14 +695,7 @@ func (m *VolumeError) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -431,7 +705,7 @@ func (this *VolumeAttachment) String() string { return "nil" } s := strings.Join([]string{`&VolumeAttachment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -442,9 +716,14 @@ func (this *VolumeAttachmentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]VolumeAttachment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&VolumeAttachmentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -455,6 +734,7 @@ func (this *VolumeAttachmentSource) String() string { } s := strings.Join([]string{`&VolumeAttachmentSource{`, `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "v11.PersistentVolumeSpec", 1) + `,`, `}`, }, "") return s @@ -488,8 +768,8 @@ func (this *VolumeAttachmentStatus) String() string { s := strings.Join([]string{`&VolumeAttachmentStatus{`, `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, - `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, - `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, + `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, `}`, }, "") return s @@ -499,7 +779,7 @@ func (this *VolumeError) String() string { return "nil" } s := strings.Join([]string{`&VolumeError{`, - `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, }, "") @@ -528,7 +808,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -556,7 +836,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -565,6 +845,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -586,7 +869,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -595,6 +878,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -616,7 +902,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -625,6 +911,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -641,6 +930,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -668,7 +960,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -696,7 +988,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -705,6 +997,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -726,7 +1021,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -735,6 +1030,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -752,6 +1050,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -779,7 +1080,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -807,7 +1108,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -817,12 +1118,51 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) m.PersistentVolumeName = &s iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InlineVolumeSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.InlineVolumeSpec == nil { + m.InlineVolumeSpec = &v11.PersistentVolumeSpec{} + } + if err := m.InlineVolumeSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -832,6 +1172,9 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -859,7 +1202,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -887,7 +1230,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -897,6 +1240,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -916,7 +1262,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -925,6 +1271,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -946,7 +1295,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -956,6 +1305,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -970,6 +1322,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -997,7 +1352,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1025,7 +1380,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1045,7 +1400,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1054,6 +1409,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1074,7 +1432,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1091,7 +1449,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1101,6 +1459,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -1117,7 +1478,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1127,6 +1488,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -1163,7 +1527,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1172,6 +1536,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1196,7 +1563,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1205,6 +1572,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1224,6 +1594,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1251,7 +1624,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1279,7 +1652,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1288,6 +1661,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1309,7 +1685,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1319,6 +1695,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1333,6 +1712,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1399,10 +1781,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1431,6 +1816,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1449,55 +1837,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 704 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4d, 0x6f, 0xd3, 0x4c, - 0x10, 0xc7, 0xe3, 0x24, 0x6d, 0xd3, 0xcd, 0xf3, 0x52, 0xad, 0xa2, 0xe7, 0x89, 0x82, 0xe4, 0x54, - 0x39, 0x15, 0x44, 0xd7, 0xa4, 0x20, 0x54, 0x71, 0x8b, 0xd5, 0x1e, 0x10, 0x6d, 0x41, 0x5b, 0xc4, - 0x01, 0x38, 0xb0, 0xb1, 0xa7, 0x8e, 0x9b, 0xfa, 0x45, 0xbb, 0xeb, 0x48, 0xbd, 0x71, 0xe2, 0xcc, - 0x8d, 0x6f, 0xc0, 0x67, 0xc9, 0x8d, 0x1e, 0x7b, 0x8a, 0xa8, 0xf9, 0x16, 0x5c, 0x40, 0x5e, 0x6f, - 0x5e, 0x68, 0x52, 0x68, 0x7b, 0xf3, 0xcc, 0xce, 0xfc, 0x66, 0xe6, 0xbf, 0xb3, 0x46, 0x3b, 0xfd, - 0x6d, 0x41, 0xfc, 0xc8, 0xea, 0x27, 0x5d, 0xe0, 0x21, 0x48, 0x10, 0xd6, 0x00, 0x42, 0x37, 0xe2, - 0x96, 0x3e, 0x60, 0xb1, 0x6f, 0x09, 0x19, 0x71, 0xe6, 0x81, 0x35, 0x68, 0xb3, 0x93, 0xb8, 0xc7, - 0xda, 0x96, 0x07, 0x21, 0x70, 0x26, 0xc1, 0x25, 0x31, 0x8f, 0x64, 0x84, 0xef, 0xe4, 0xc1, 0x84, - 0xc5, 0x3e, 0xd1, 0xc1, 0x64, 0x1c, 0xdc, 0xd8, 0xf4, 0x7c, 0xd9, 0x4b, 0xba, 0xc4, 0x89, 0x02, - 0xcb, 0x8b, 0xbc, 0xc8, 0x52, 0x39, 0xdd, 0xe4, 0x48, 0x59, 0xca, 0x50, 0x5f, 0x39, 0xab, 0xf1, - 0x68, 0x5a, 0x38, 0x60, 0x4e, 0xcf, 0x0f, 0x81, 0x9f, 0x5a, 0x71, 0xdf, 0xcb, 0x1c, 0xc2, 0x0a, - 0x40, 0x32, 0x6b, 0x30, 0xd7, 0x41, 0xc3, 0xba, 0x2a, 0x8b, 0x27, 0xa1, 0xf4, 0x03, 0x98, 0x4b, - 0x78, 0xfc, 0xa7, 0x04, 0xe1, 0xf4, 0x20, 0x60, 0x97, 0xf3, 0x5a, 0x9f, 0x8b, 0x68, 0xed, 0x55, - 0x74, 0x92, 0x04, 0xd0, 0x91, 0x92, 0x39, 0xbd, 0x00, 0x42, 0x89, 0xdf, 0xa1, 0x4a, 0xd6, 0x98, - 0xcb, 0x24, 0xab, 0x1b, 0xeb, 0xc6, 0x46, 0x75, 0xeb, 0x01, 0x99, 0x4a, 0x32, 0xe1, 0x93, 0xb8, - 0xef, 0x65, 0x0e, 0x41, 0xb2, 0x68, 0x32, 0x68, 0x93, 0xe7, 0xdd, 0x63, 0x70, 0xe4, 0x3e, 0x48, - 0x66, 0xe3, 0xe1, 0xa8, 0x59, 0x48, 0x47, 0x4d, 0x34, 0xf5, 0xd1, 0x09, 0x15, 0x1f, 0xa2, 0xb2, - 0x88, 0xc1, 0xa9, 0x17, 0x15, 0xbd, 0x4d, 0x7e, 0x23, 0x38, 0xb9, 0xdc, 0xde, 0x61, 0x0c, 0x8e, - 0xfd, 0x97, 0xc6, 0x97, 0x33, 0x8b, 0x2a, 0x18, 0x7e, 0x83, 0x96, 0x85, 0x64, 0x32, 0x11, 0xf5, - 0x92, 0xc2, 0x3e, 0xbc, 0x19, 0x56, 0xa5, 0xda, 0xff, 0x68, 0xf0, 0x72, 0x6e, 0x53, 0x8d, 0x6c, - 0x0d, 0x0d, 0x54, 0xbb, 0x9c, 0xb2, 0xe7, 0x0b, 0x89, 0xdf, 0xce, 0x89, 0x45, 0xae, 0x27, 0x56, - 0x96, 0xad, 0xa4, 0x5a, 0xd3, 0x25, 0x2b, 0x63, 0xcf, 0x8c, 0x50, 0x14, 0x2d, 0xf9, 0x12, 0x02, - 0x51, 0x2f, 0xae, 0x97, 0x36, 0xaa, 0x5b, 0x9b, 0x37, 0x1a, 0xc9, 0xfe, 0x5b, 0x93, 0x97, 0x9e, - 0x66, 0x0c, 0x9a, 0xa3, 0x5a, 0x47, 0xe8, 0xbf, 0xb9, 0xe1, 0xa3, 0x84, 0x3b, 0x80, 0xf7, 0x50, - 0x2d, 0x06, 0x2e, 0x7c, 0x21, 0x21, 0x94, 0x79, 0xcc, 0x01, 0x0b, 0x40, 0xcd, 0xb5, 0x6a, 0xd7, - 0xd3, 0x51, 0xb3, 0xf6, 0x62, 0xc1, 0x39, 0x5d, 0x98, 0xd5, 0xfa, 0xb2, 0x40, 0xb2, 0xec, 0xba, - 0xf0, 0x7d, 0x54, 0x61, 0xca, 0x03, 0x5c, 0xa3, 0x27, 0x12, 0x74, 0xb4, 0x9f, 0x4e, 0x22, 0xd4, - 0xb5, 0xaa, 0xf6, 0xf4, 0xb6, 0xdc, 0xf0, 0x5a, 0x55, 0xea, 0xcc, 0xb5, 0x2a, 0x9b, 0x6a, 0x64, - 0xd6, 0x4a, 0x18, 0xb9, 0xf9, 0x94, 0xa5, 0x5f, 0x5b, 0x39, 0xd0, 0x7e, 0x3a, 0x89, 0x68, 0xfd, - 0x28, 0x2d, 0x90, 0x4e, 0xed, 0xc7, 0xcc, 0x4c, 0xae, 0x9a, 0xa9, 0x32, 0x37, 0x93, 0x3b, 0x99, - 0xc9, 0xc5, 0x9f, 0x0c, 0x84, 0xd9, 0x04, 0xb1, 0x3f, 0xde, 0x9f, 0xfc, 0x92, 0x9f, 0xdd, 0x62, - 0x6f, 0x49, 0x67, 0x8e, 0xb6, 0x1b, 0x4a, 0x7e, 0x6a, 0x37, 0x74, 0x17, 0x78, 0x3e, 0x80, 0x2e, - 0x68, 0x01, 0x1f, 0xa3, 0x6a, 0xee, 0xdd, 0xe5, 0x3c, 0xe2, 0xfa, 0x25, 0x6d, 0x5c, 0xa3, 0x23, - 0x15, 0x6f, 0x9b, 0xe9, 0xa8, 0x59, 0xed, 0x4c, 0x01, 0xdf, 0x47, 0xcd, 0xea, 0xcc, 0x39, 0x9d, - 0x85, 0x67, 0xb5, 0x5c, 0x98, 0xd6, 0x2a, 0xdf, 0xa6, 0xd6, 0x0e, 0x5c, 0x5d, 0x6b, 0x06, 0xde, - 0xd8, 0x45, 0xff, 0x5f, 0x21, 0x11, 0x5e, 0x43, 0xa5, 0x3e, 0x9c, 0xe6, 0x9b, 0x48, 0xb3, 0x4f, - 0x5c, 0x43, 0x4b, 0x03, 0x76, 0x92, 0xe4, 0x1b, 0xb7, 0x4a, 0x73, 0xe3, 0x49, 0x71, 0xdb, 0x68, - 0x7d, 0x30, 0xd0, 0x6c, 0x0d, 0xbc, 0x87, 0xca, 0xd9, 0xef, 0x55, 0xbf, 0xfc, 0x7b, 0xd7, 0x7b, - 0xf9, 0x2f, 0xfd, 0x00, 0xa6, 0x7f, 0xb0, 0xcc, 0xa2, 0x8a, 0x82, 0xef, 0xa2, 0x95, 0x00, 0x84, - 0x60, 0x9e, 0xae, 0x6c, 0xff, 0xab, 0x83, 0x56, 0xf6, 0x73, 0x37, 0x1d, 0x9f, 0xdb, 0x64, 0x78, - 0x61, 0x16, 0xce, 0x2e, 0xcc, 0xc2, 0xf9, 0x85, 0x59, 0x78, 0x9f, 0x9a, 0xc6, 0x30, 0x35, 0x8d, - 0xb3, 0xd4, 0x34, 0xce, 0x53, 0xd3, 0xf8, 0x9a, 0x9a, 0xc6, 0xc7, 0x6f, 0x66, 0xe1, 0x75, 0x65, - 0x2c, 0xdc, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x42, 0xba, 0xdb, 0x12, 0x1a, 0x07, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto index fdc4ad257..760196392 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -21,6 +21,7 @@ syntax = 'proto2'; package k8s.io.api.storage.v1alpha1; +import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -34,7 +35,7 @@ option go_package = "v1alpha1"; // VolumeAttachment objects are non-namespaced. message VolumeAttachment { // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -52,7 +53,7 @@ message VolumeAttachment { // VolumeAttachmentList is a collection of VolumeAttachment objects. message VolumeAttachmentList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -68,6 +69,15 @@ message VolumeAttachmentSource { // Name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; + + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go index 964bb5f7b..39408857c 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -16,7 +16,10 @@ limitations under the License. package v1alpha1 -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) // +genclient // +genclient:nonNamespaced @@ -30,7 +33,7 @@ type VolumeAttachment struct { metav1.TypeMeta `json:",inline"` // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -51,7 +54,7 @@ type VolumeAttachment struct { type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -81,7 +84,14 @@ type VolumeAttachmentSource struct { // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` - // Placeholder for *VolumeSource to accommodate inline volumes in pods. + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + InlineVolumeSpec *v1.PersistentVolumeSpec `json:"inlineVolumeSpec,omitempty" protobuf:"bytes,2,opt,name=inlineVolumeSpec"` } // VolumeAttachmentStatus is the status of a VolumeAttachment request. diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go index 3701b0864..2e8216166 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } @@ -40,7 +40,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of VolumeAttachments", } diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go index e27c6ff3f..3debf9df1 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package v1alpha1 import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -56,7 +57,7 @@ func (in *VolumeAttachment) DeepCopyObject() runtime.Object { func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]VolumeAttachment, len(*in)) @@ -93,6 +94,11 @@ func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { *out = new(string) **out = **in } + if in.InlineVolumeSpec != nil { + in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec + *out = new(v1.PersistentVolumeSpec) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go index 0cde6ec08..cd35af34f 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -17,43 +17,23 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto - - It has these top-level messages: - CSIDriver - CSIDriverList - CSIDriverSpec - CSINode - CSINodeDriver - CSINodeList - CSINodeSpec - StorageClass - StorageClassList - VolumeAttachment - VolumeAttachmentList - VolumeAttachmentSource - VolumeAttachmentSpec - VolumeAttachmentStatus - VolumeError -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" +import ( + fmt "fmt" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -66,65 +46,453 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *CSIDriver) Reset() { *m = CSIDriver{} } -func (*CSIDriver) ProtoMessage() {} -func (*CSIDriver) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *CSIDriver) Reset() { *m = CSIDriver{} } +func (*CSIDriver) ProtoMessage() {} +func (*CSIDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{0} +} +func (m *CSIDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIDriver.Merge(m, src) +} +func (m *CSIDriver) XXX_Size() int { + return m.Size() +} +func (m *CSIDriver) XXX_DiscardUnknown() { + xxx_messageInfo_CSIDriver.DiscardUnknown(m) +} + +var xxx_messageInfo_CSIDriver proto.InternalMessageInfo + +func (m *CSIDriverList) Reset() { *m = CSIDriverList{} } +func (*CSIDriverList) ProtoMessage() {} +func (*CSIDriverList) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{1} +} +func (m *CSIDriverList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIDriverList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIDriverList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIDriverList.Merge(m, src) +} +func (m *CSIDriverList) XXX_Size() int { + return m.Size() +} +func (m *CSIDriverList) XXX_DiscardUnknown() { + xxx_messageInfo_CSIDriverList.DiscardUnknown(m) +} + +var xxx_messageInfo_CSIDriverList proto.InternalMessageInfo + +func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} } +func (*CSIDriverSpec) ProtoMessage() {} +func (*CSIDriverSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{2} +} +func (m *CSIDriverSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIDriverSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIDriverSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIDriverSpec.Merge(m, src) +} +func (m *CSIDriverSpec) XXX_Size() int { + return m.Size() +} +func (m *CSIDriverSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CSIDriverSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CSIDriverSpec proto.InternalMessageInfo + +func (m *CSINode) Reset() { *m = CSINode{} } +func (*CSINode) ProtoMessage() {} +func (*CSINode) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{3} +} +func (m *CSINode) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINode) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINode.Merge(m, src) +} +func (m *CSINode) XXX_Size() int { + return m.Size() +} +func (m *CSINode) XXX_DiscardUnknown() { + xxx_messageInfo_CSINode.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINode proto.InternalMessageInfo + +func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} } +func (*CSINodeDriver) ProtoMessage() {} +func (*CSINodeDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{4} +} +func (m *CSINodeDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINodeDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeDriver.Merge(m, src) +} +func (m *CSINodeDriver) XXX_Size() int { + return m.Size() +} +func (m *CSINodeDriver) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeDriver.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINodeDriver proto.InternalMessageInfo + +func (m *CSINodeList) Reset() { *m = CSINodeList{} } +func (*CSINodeList) ProtoMessage() {} +func (*CSINodeList) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{5} +} +func (m *CSINodeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINodeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeList.Merge(m, src) +} +func (m *CSINodeList) XXX_Size() int { + return m.Size() +} +func (m *CSINodeList) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeList.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINodeList proto.InternalMessageInfo + +func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} } +func (*CSINodeSpec) ProtoMessage() {} +func (*CSINodeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{6} +} +func (m *CSINodeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINodeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeSpec.Merge(m, src) +} +func (m *CSINodeSpec) XXX_Size() int { + return m.Size() +} +func (m *CSINodeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINodeSpec proto.InternalMessageInfo + +func (m *StorageClass) Reset() { *m = StorageClass{} } +func (*StorageClass) ProtoMessage() {} +func (*StorageClass) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{7} +} +func (m *StorageClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageClass.Merge(m, src) +} +func (m *StorageClass) XXX_Size() int { + return m.Size() +} +func (m *StorageClass) XXX_DiscardUnknown() { + xxx_messageInfo_StorageClass.DiscardUnknown(m) +} -func (m *CSIDriverList) Reset() { *m = CSIDriverList{} } -func (*CSIDriverList) ProtoMessage() {} -func (*CSIDriverList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_StorageClass proto.InternalMessageInfo -func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} } -func (*CSIDriverSpec) ProtoMessage() {} -func (*CSIDriverSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *StorageClassList) Reset() { *m = StorageClassList{} } +func (*StorageClassList) ProtoMessage() {} +func (*StorageClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{8} +} +func (m *StorageClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageClassList.Merge(m, src) +} +func (m *StorageClassList) XXX_Size() int { + return m.Size() +} +func (m *StorageClassList) XXX_DiscardUnknown() { + xxx_messageInfo_StorageClassList.DiscardUnknown(m) +} -func (m *CSINode) Reset() { *m = CSINode{} } -func (*CSINode) ProtoMessage() {} -func (*CSINode) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_StorageClassList proto.InternalMessageInfo -func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} } -func (*CSINodeDriver) ProtoMessage() {} -func (*CSINodeDriver) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{9} +} +func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachment) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachment.Merge(m, src) +} +func (m *VolumeAttachment) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachment) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachment.DiscardUnknown(m) +} -func (m *CSINodeList) Reset() { *m = CSINodeList{} } -func (*CSINodeList) ProtoMessage() {} -func (*CSINodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo -func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} } -func (*CSINodeSpec) ProtoMessage() {} -func (*CSINodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{10} +} +func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentList.Merge(m, src) +} +func (m *VolumeAttachmentList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentList.DiscardUnknown(m) +} -func (m *StorageClass) Reset() { *m = StorageClass{} } -func (*StorageClass) ProtoMessage() {} -func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo -func (m *StorageClassList) Reset() { *m = StorageClassList{} } -func (*StorageClassList) ProtoMessage() {} -func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{11} +} +func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSource.Merge(m, src) +} +func (m *VolumeAttachmentSource) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo -func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } -func (*VolumeAttachment) ProtoMessage() {} -func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{12} +} +func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSpec.Merge(m, src) +} +func (m *VolumeAttachmentSpec) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSpec.DiscardUnknown(m) +} -func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } -func (*VolumeAttachmentList) ProtoMessage() {} -func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo -func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } -func (*VolumeAttachmentSource) ProtoMessage() {} -func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{13} +} +func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentStatus.Merge(m, src) +} +func (m *VolumeAttachmentStatus) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentStatus.DiscardUnknown(m) +} -func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } -func (*VolumeAttachmentSpec) ProtoMessage() {} -func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo -func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } -func (*VolumeAttachmentStatus) ProtoMessage() {} -func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{14} +} +func (m *VolumeError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeError) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeError.Merge(m, src) +} +func (m *VolumeError) XXX_Size() int { + return m.Size() +} +func (m *VolumeError) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeError.DiscardUnknown(m) +} -func (m *VolumeError) Reset() { *m = VolumeError{} } -func (*VolumeError) ProtoMessage() {} -func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +var xxx_messageInfo_VolumeError proto.InternalMessageInfo + +func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } +func (*VolumeNodeResources) ProtoMessage() {} +func (*VolumeNodeResources) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{15} +} +func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeNodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeNodeResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeNodeResources.Merge(m, src) +} +func (m *VolumeNodeResources) XXX_Size() int { + return m.Size() +} +func (m *VolumeNodeResources) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeNodeResources.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeNodeResources proto.InternalMessageInfo func init() { proto.RegisterType((*CSIDriver)(nil), "k8s.io.api.storage.v1beta1.CSIDriver") @@ -135,18 +503,114 @@ func init() { proto.RegisterType((*CSINodeList)(nil), "k8s.io.api.storage.v1beta1.CSINodeList") proto.RegisterType((*CSINodeSpec)(nil), "k8s.io.api.storage.v1beta1.CSINodeSpec") proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1beta1.StorageClass") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.StorageClass.ParametersEntry") proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1beta1.StorageClassList") proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachment") proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentList") proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSource") proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSpec") proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus.AttachmentMetadataEntry") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1beta1.VolumeError") + proto.RegisterType((*VolumeNodeResources)(nil), "k8s.io.api.storage.v1beta1.VolumeNodeResources") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto", fileDescriptor_7d2980599fd0de80) } + +var fileDescriptor_7d2980599fd0de80 = []byte{ + // 1344 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xbd, 0x6f, 0xdb, 0x46, + 0x1b, 0x37, 0x2d, 0x7f, 0x9e, 0xec, 0x44, 0xbe, 0x18, 0xef, 0xab, 0x57, 0x83, 0x64, 0xe8, 0x45, + 0x1b, 0x27, 0x48, 0xc8, 0x24, 0x48, 0x83, 0x20, 0x40, 0x07, 0xd3, 0x31, 0x50, 0x25, 0x96, 0xe3, + 0x9e, 0x8d, 0xa0, 0x08, 0x3a, 0xf4, 0x44, 0x3e, 0x91, 0x19, 0x93, 0x3c, 0x86, 0x3c, 0xa9, 0xd5, + 0xd6, 0xa9, 0x73, 0xd1, 0xa1, 0x7f, 0x41, 0xff, 0x85, 0x16, 0x68, 0x97, 0x8e, 0xcd, 0x54, 0x04, + 0x9d, 0x32, 0x09, 0x0d, 0xbb, 0x76, 0xeb, 0x66, 0x74, 0x28, 0xee, 0x78, 0x12, 0x29, 0x89, 0x8a, + 0xed, 0x0e, 0xde, 0x78, 0xcf, 0xc7, 0xef, 0xf9, 0x7e, 0xee, 0x88, 0xb6, 0x8f, 0xef, 0x47, 0xba, + 0xc3, 0x8c, 0xe3, 0x4e, 0x0b, 0x42, 0x1f, 0x38, 0x44, 0x46, 0x17, 0x7c, 0x9b, 0x85, 0x86, 0x62, + 0xd0, 0xc0, 0x31, 0x22, 0xce, 0x42, 0xda, 0x06, 0xa3, 0x7b, 0xbb, 0x05, 0x9c, 0xde, 0x36, 0xda, + 0xe0, 0x43, 0x48, 0x39, 0xd8, 0x7a, 0x10, 0x32, 0xce, 0x70, 0x25, 0x91, 0xd5, 0x69, 0xe0, 0xe8, + 0x4a, 0x56, 0x57, 0xb2, 0x95, 0x9b, 0x6d, 0x87, 0x1f, 0x75, 0x5a, 0xba, 0xc5, 0x3c, 0xa3, 0xcd, + 0xda, 0xcc, 0x90, 0x2a, 0xad, 0xce, 0x73, 0x79, 0x92, 0x07, 0xf9, 0x95, 0x40, 0x55, 0xea, 0x19, + 0xb3, 0x16, 0x0b, 0x85, 0xcd, 0x71, 0x73, 0x95, 0xbb, 0xa9, 0x8c, 0x47, 0xad, 0x23, 0xc7, 0x87, + 0xb0, 0x67, 0x04, 0xc7, 0x6d, 0x41, 0x88, 0x0c, 0x0f, 0x38, 0xcd, 0xd3, 0x32, 0xa6, 0x69, 0x85, + 0x1d, 0x9f, 0x3b, 0x1e, 0x4c, 0x28, 0xdc, 0x3b, 0x4d, 0x21, 0xb2, 0x8e, 0xc0, 0xa3, 0xe3, 0x7a, + 0xf5, 0x9f, 0x34, 0xb4, 0xbc, 0x7d, 0xd0, 0x78, 0x18, 0x3a, 0x5d, 0x08, 0xf1, 0x67, 0x68, 0x49, + 0x78, 0x64, 0x53, 0x4e, 0xcb, 0xda, 0x86, 0xb6, 0x59, 0xbc, 0x73, 0x4b, 0x4f, 0xd3, 0x35, 0x04, + 0xd6, 0x83, 0xe3, 0xb6, 0x20, 0x44, 0xba, 0x90, 0xd6, 0xbb, 0xb7, 0xf5, 0x27, 0xad, 0x17, 0x60, + 0xf1, 0x26, 0x70, 0x6a, 0xe2, 0x57, 0xfd, 0xda, 0x4c, 0xdc, 0xaf, 0xa1, 0x94, 0x46, 0x86, 0xa8, + 0xf8, 0x31, 0x9a, 0x8b, 0x02, 0xb0, 0xca, 0xb3, 0x12, 0xfd, 0x9a, 0x3e, 0xbd, 0x18, 0xfa, 0xd0, + 0xad, 0x83, 0x00, 0x2c, 0x73, 0x45, 0xc1, 0xce, 0x89, 0x13, 0x91, 0x20, 0xf5, 0x1f, 0x35, 0xb4, + 0x3a, 0x94, 0xda, 0x75, 0x22, 0x8e, 0x3f, 0x9d, 0x08, 0x40, 0x3f, 0x5b, 0x00, 0x42, 0x5b, 0xba, + 0x5f, 0x52, 0x76, 0x96, 0x06, 0x94, 0x8c, 0xf3, 0x8f, 0xd0, 0xbc, 0xc3, 0xc1, 0x8b, 0xca, 0xb3, + 0x1b, 0x85, 0xcd, 0xe2, 0x9d, 0xf7, 0xce, 0xe4, 0xbd, 0xb9, 0xaa, 0x10, 0xe7, 0x1b, 0x42, 0x97, + 0x24, 0x10, 0xf5, 0x3f, 0xb3, 0xbe, 0x8b, 0x98, 0xf0, 0x03, 0x74, 0x89, 0x72, 0x4e, 0xad, 0x23, + 0x02, 0x2f, 0x3b, 0x4e, 0x08, 0xb6, 0x8c, 0x60, 0xc9, 0xc4, 0x71, 0xbf, 0x76, 0x69, 0x6b, 0x84, + 0x43, 0xc6, 0x24, 0x85, 0x6e, 0xc0, 0xec, 0x86, 0xff, 0x9c, 0x3d, 0xf1, 0x9b, 0xac, 0xe3, 0x73, + 0x99, 0x60, 0xa5, 0xbb, 0x3f, 0xc2, 0x21, 0x63, 0x92, 0xd8, 0x42, 0xeb, 0x5d, 0xe6, 0x76, 0x3c, + 0xd8, 0x75, 0x9e, 0x83, 0xd5, 0xb3, 0x5c, 0x68, 0x32, 0x1b, 0xa2, 0x72, 0x61, 0xa3, 0xb0, 0xb9, + 0x6c, 0x1a, 0x71, 0xbf, 0xb6, 0xfe, 0x34, 0x87, 0x7f, 0xd2, 0xaf, 0x5d, 0xc9, 0xa1, 0x93, 0x5c, + 0xb0, 0xfa, 0x0f, 0x1a, 0x5a, 0xdc, 0x3e, 0x68, 0xec, 0x31, 0x1b, 0x2e, 0xa0, 0xcb, 0x1a, 0x23, + 0x5d, 0x76, 0xf5, 0x94, 0x3a, 0x09, 0xa7, 0xa6, 0xf6, 0xd8, 0x5f, 0x49, 0x9d, 0x84, 0x8c, 0x1a, + 0x92, 0x0d, 0x34, 0xe7, 0x53, 0x0f, 0xa4, 0xeb, 0xcb, 0xa9, 0xce, 0x1e, 0xf5, 0x80, 0x48, 0x0e, + 0x7e, 0x1f, 0x2d, 0xf8, 0xcc, 0x86, 0xc6, 0x43, 0xe9, 0xc0, 0xb2, 0x79, 0x49, 0xc9, 0x2c, 0xec, + 0x49, 0x2a, 0x51, 0x5c, 0x7c, 0x17, 0xad, 0x70, 0x16, 0x30, 0x97, 0xb5, 0x7b, 0x8f, 0xa1, 0x37, + 0xc8, 0x78, 0x29, 0xee, 0xd7, 0x56, 0x0e, 0x33, 0x74, 0x32, 0x22, 0x85, 0x5b, 0xa8, 0x48, 0x5d, + 0x97, 0x59, 0x94, 0xd3, 0x96, 0x0b, 0xe5, 0x39, 0x19, 0xa3, 0xf1, 0xae, 0x18, 0x93, 0x32, 0x09, + 0xe3, 0x04, 0x22, 0xd6, 0x09, 0x2d, 0x88, 0xcc, 0xcb, 0x71, 0xbf, 0x56, 0xdc, 0x4a, 0x71, 0x48, + 0x16, 0xb4, 0xfe, 0xbd, 0x86, 0x8a, 0x2a, 0xea, 0x0b, 0x98, 0xab, 0x8f, 0x46, 0xe7, 0xea, 0xff, + 0x67, 0xa8, 0xd7, 0x94, 0xa9, 0xb2, 0x86, 0x6e, 0xcb, 0x91, 0x3a, 0x44, 0x8b, 0xb6, 0x2c, 0x5a, + 0x54, 0xd6, 0x24, 0xf4, 0xb5, 0x33, 0x40, 0xab, 0xb1, 0xbd, 0xac, 0x0c, 0x2c, 0x26, 0xe7, 0x88, + 0x0c, 0xa0, 0xea, 0xdf, 0x2c, 0xa0, 0x95, 0x83, 0x44, 0x77, 0xdb, 0xa5, 0x51, 0x74, 0x01, 0x0d, + 0xfd, 0x01, 0x2a, 0x06, 0x21, 0xeb, 0x3a, 0x91, 0xc3, 0x7c, 0x08, 0x55, 0x5b, 0x5d, 0x51, 0x2a, + 0xc5, 0xfd, 0x94, 0x45, 0xb2, 0x72, 0xd8, 0x45, 0x28, 0xa0, 0x21, 0xf5, 0x80, 0x8b, 0x14, 0x14, + 0x64, 0x0a, 0xee, 0xbf, 0x2b, 0x05, 0xd9, 0xb0, 0xf4, 0xfd, 0xa1, 0xea, 0x8e, 0xcf, 0xc3, 0x5e, + 0xea, 0x62, 0xca, 0x20, 0x19, 0x7c, 0x7c, 0x8c, 0x56, 0x43, 0xb0, 0x5c, 0xea, 0x78, 0xfb, 0xcc, + 0x75, 0xac, 0x9e, 0x6c, 0xcd, 0x65, 0x73, 0x27, 0xee, 0xd7, 0x56, 0x49, 0x96, 0x71, 0xd2, 0xaf, + 0xdd, 0x9a, 0xbc, 0x3a, 0xf5, 0x7d, 0x08, 0x23, 0x27, 0xe2, 0xe0, 0xf3, 0xa4, 0x61, 0x47, 0x74, + 0xc8, 0x28, 0xb6, 0x98, 0x1d, 0x4f, 0xac, 0xaf, 0x27, 0x01, 0x77, 0x98, 0x1f, 0x95, 0xe7, 0xd3, + 0xd9, 0x69, 0x66, 0xe8, 0x64, 0x44, 0x0a, 0xef, 0xa2, 0x75, 0xd1, 0xe6, 0x9f, 0x27, 0x06, 0x76, + 0xbe, 0x08, 0xa8, 0x2f, 0x52, 0x55, 0x5e, 0x90, 0xdb, 0xb2, 0x2c, 0x76, 0xdd, 0x56, 0x0e, 0x9f, + 0xe4, 0x6a, 0xe1, 0x4f, 0xd0, 0x5a, 0xb2, 0xec, 0x4c, 0xc7, 0xb7, 0x1d, 0xbf, 0x2d, 0x56, 0x5d, + 0x79, 0x51, 0x06, 0x7d, 0x3d, 0xee, 0xd7, 0xd6, 0x9e, 0x8e, 0x33, 0x4f, 0xf2, 0x88, 0x64, 0x12, + 0x04, 0xbf, 0x44, 0x6b, 0xd2, 0x22, 0xd8, 0x6a, 0x11, 0x38, 0x10, 0x95, 0x97, 0x64, 0xfd, 0x36, + 0xb3, 0xf5, 0x13, 0xa9, 0x13, 0x8d, 0x34, 0x58, 0x17, 0x07, 0xe0, 0x82, 0xc5, 0x59, 0x78, 0x08, + 0xa1, 0x67, 0xfe, 0x4f, 0xd5, 0x6b, 0x6d, 0x6b, 0x1c, 0x8a, 0x4c, 0xa2, 0x57, 0x3e, 0x44, 0x97, + 0xc7, 0x0a, 0x8e, 0x4b, 0xa8, 0x70, 0x0c, 0xbd, 0x64, 0xd1, 0x11, 0xf1, 0x89, 0xd7, 0xd1, 0x7c, + 0x97, 0xba, 0x1d, 0x48, 0x3a, 0x90, 0x24, 0x87, 0x07, 0xb3, 0xf7, 0xb5, 0xfa, 0xcf, 0x1a, 0x2a, + 0x65, 0xbb, 0xe7, 0x02, 0xd6, 0x46, 0x73, 0x74, 0x6d, 0x6c, 0x9e, 0xb5, 0xb1, 0xa7, 0xec, 0x8e, + 0xef, 0x66, 0x51, 0x29, 0x29, 0x4e, 0x72, 0xd9, 0x7a, 0xe0, 0xf3, 0x0b, 0x18, 0x6d, 0x32, 0x72, + 0x57, 0xdd, 0x3a, 0x7d, 0x8f, 0xa7, 0xde, 0x4d, 0xbb, 0xb4, 0xf0, 0x33, 0xb4, 0x10, 0x71, 0xca, + 0x3b, 0x62, 0xe6, 0x05, 0xea, 0x9d, 0x73, 0xa1, 0x4a, 0xcd, 0xf4, 0xd2, 0x4a, 0xce, 0x44, 0x21, + 0xd6, 0x7f, 0xd1, 0xd0, 0xfa, 0xb8, 0xca, 0x05, 0x14, 0xfb, 0xe3, 0xd1, 0x62, 0xdf, 0x38, 0x4f, + 0x44, 0x53, 0x0a, 0xfe, 0x9b, 0x86, 0xfe, 0x33, 0x11, 0xbc, 0xbc, 0x1e, 0xc5, 0x9e, 0x08, 0xc6, + 0xb6, 0xd1, 0x5e, 0x7a, 0xe7, 0xcb, 0x3d, 0xb1, 0x9f, 0xc3, 0x27, 0xb9, 0x5a, 0xf8, 0x05, 0x2a, + 0x39, 0xbe, 0xeb, 0xf8, 0x90, 0xd0, 0x0e, 0xd2, 0x72, 0xe7, 0x0e, 0xf3, 0x38, 0xb2, 0x2c, 0xf3, + 0x7a, 0xdc, 0xaf, 0x95, 0x1a, 0x63, 0x28, 0x64, 0x02, 0xb7, 0xfe, 0x6b, 0x4e, 0x79, 0xe4, 0x5d, + 0x78, 0x03, 0x2d, 0x25, 0x8f, 0x46, 0x08, 0x55, 0x18, 0xc3, 0x74, 0x6f, 0x29, 0x3a, 0x19, 0x4a, + 0xc8, 0x0e, 0x92, 0xa9, 0x50, 0x8e, 0x9e, 0xaf, 0x83, 0xa4, 0x66, 0xa6, 0x83, 0xe4, 0x99, 0x28, + 0x44, 0xe1, 0x89, 0x78, 0x00, 0xc9, 0x84, 0x16, 0x46, 0x3d, 0xd9, 0x53, 0x74, 0x32, 0x94, 0xa8, + 0xff, 0x5d, 0xc8, 0xa9, 0x92, 0x6c, 0xc5, 0x4c, 0x48, 0x83, 0xb7, 0xf2, 0x78, 0x48, 0xf6, 0x30, + 0x24, 0x1b, 0x7f, 0xab, 0x21, 0x4c, 0x87, 0x10, 0xcd, 0x41, 0xab, 0x26, 0xfd, 0xf4, 0xe8, 0xfc, + 0x13, 0xa2, 0x6f, 0x4d, 0x80, 0x25, 0xf7, 0x64, 0x45, 0x39, 0x81, 0x27, 0x05, 0x48, 0x8e, 0x07, + 0xd8, 0x41, 0xc5, 0x84, 0xba, 0x13, 0x86, 0x2c, 0x54, 0x23, 0x7b, 0xf5, 0x74, 0x87, 0xa4, 0xb8, + 0x59, 0x95, 0x0f, 0xb9, 0x54, 0xff, 0xa4, 0x5f, 0x2b, 0x66, 0xf8, 0x24, 0x8b, 0x2d, 0x4c, 0xd9, + 0x90, 0x9a, 0x9a, 0xfb, 0x17, 0xa6, 0x1e, 0xc2, 0x74, 0x53, 0x19, 0xec, 0xca, 0x0e, 0xfa, 0xef, + 0x94, 0x04, 0x9d, 0xeb, 0x5e, 0xf9, 0x4a, 0x43, 0x59, 0x1b, 0x78, 0x17, 0xcd, 0x89, 0xff, 0x59, + 0xb5, 0x61, 0xae, 0x9f, 0x6d, 0xc3, 0x1c, 0x3a, 0x1e, 0xa4, 0x8b, 0x52, 0x9c, 0x88, 0x44, 0xc1, + 0xd7, 0xd0, 0xa2, 0x07, 0x51, 0x44, 0xdb, 0xca, 0x72, 0xfa, 0xea, 0x6b, 0x26, 0x64, 0x32, 0xe0, + 0xd7, 0xef, 0xa1, 0x2b, 0x39, 0xef, 0x68, 0x5c, 0x43, 0xf3, 0x96, 0xfc, 0xe1, 0x12, 0x0e, 0xcd, + 0x9b, 0xcb, 0x62, 0xcb, 0x6c, 0xcb, 0xff, 0xac, 0x84, 0x6e, 0xde, 0x7c, 0xf5, 0xb6, 0x3a, 0xf3, + 0xfa, 0x6d, 0x75, 0xe6, 0xcd, 0xdb, 0xea, 0xcc, 0x97, 0x71, 0x55, 0x7b, 0x15, 0x57, 0xb5, 0xd7, + 0x71, 0x55, 0x7b, 0x13, 0x57, 0xb5, 0xdf, 0xe3, 0xaa, 0xf6, 0xf5, 0x1f, 0xd5, 0x99, 0x67, 0x8b, + 0x2a, 0xdf, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x72, 0xff, 0xde, 0x2e, 0xe4, 0x10, 0x00, 0x00, +} + func (m *CSIDriver) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -154,33 +618,42 @@ func (m *CSIDriver) Marshal() (dAtA []byte, err error) { } func (m *CSIDriver) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CSIDriverList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -188,37 +661,46 @@ func (m *CSIDriverList) Marshal() (dAtA []byte, err error) { } func (m *CSIDriverList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIDriverList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CSIDriverSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -226,37 +708,51 @@ func (m *CSIDriverSpec) Marshal() (dAtA []byte, err error) { } func (m *CSIDriverSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.AttachRequired != nil { - dAtA[i] = 0x8 - i++ - if *m.AttachRequired { + if len(m.VolumeLifecycleModes) > 0 { + for iNdEx := len(m.VolumeLifecycleModes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VolumeLifecycleModes[iNdEx]) + copy(dAtA[i:], m.VolumeLifecycleModes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeLifecycleModes[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.PodInfoOnMount != nil { + i-- + if *m.PodInfoOnMount { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - if m.PodInfoOnMount != nil { + i-- dAtA[i] = 0x10 - i++ - if *m.PodInfoOnMount { + } + if m.AttachRequired != nil { + i-- + if *m.AttachRequired { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *CSINode) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -264,33 +760,42 @@ func (m *CSINode) Marshal() (dAtA []byte, err error) { } func (m *CSINode) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINode) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CSINodeDriver) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -298,40 +803,53 @@ func (m *CSINodeDriver) Marshal() (dAtA []byte, err error) { } func (m *CSINodeDriver) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeID))) - i += copy(dAtA[i:], m.NodeID) + if m.Allocatable != nil { + { + size, err := m.Allocatable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } if len(m.TopologyKeys) > 0 { - for _, s := range m.TopologyKeys { + for iNdEx := len(m.TopologyKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TopologyKeys[iNdEx]) + copy(dAtA[i:], m.TopologyKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKeys[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.NodeID) + copy(dAtA[i:], m.NodeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CSINodeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -339,37 +857,46 @@ func (m *CSINodeList) Marshal() (dAtA []byte, err error) { } func (m *CSINodeList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CSINodeSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -377,29 +904,36 @@ func (m *CSINodeSpec) Marshal() (dAtA []byte, err error) { } func (m *CSINodeSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Drivers) > 0 { - for _, msg := range m.Drivers { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Drivers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Drivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *StorageClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -407,100 +941,108 @@ func (m *StorageClass) Marshal() (dAtA []byte, err error) { } func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) - i += copy(dAtA[i:], m.Provisioner) - if len(m.Parameters) > 0 { - keysForParameters := make([]string, 0, len(m.Parameters)) - for k := range m.Parameters { - keysForParameters = append(keysForParameters, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - for _, k := range keysForParameters { - dAtA[i] = 0x1a - i++ - v := m.Parameters[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.ReclaimPolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) - i += copy(dAtA[i:], *m.ReclaimPolicy) - } - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.AllowedTopologies) > 0 { + for iNdEx := len(m.AllowedTopologies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedTopologies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x42 } } + if m.VolumeBindingMode != nil { + i -= len(*m.VolumeBindingMode) + copy(dAtA[i:], *m.VolumeBindingMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i-- + dAtA[i] = 0x3a + } if m.AllowVolumeExpansion != nil { - dAtA[i] = 0x30 - i++ + i-- if *m.AllowVolumeExpansion { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x30 + } + if len(m.MountOptions) > 0 { + for iNdEx := len(m.MountOptions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MountOptions[iNdEx]) + copy(dAtA[i:], m.MountOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountOptions[iNdEx]))) + i-- + dAtA[i] = 0x2a + } } - if m.VolumeBindingMode != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) - i += copy(dAtA[i:], *m.VolumeBindingMode) + if m.ReclaimPolicy != nil { + i -= len(*m.ReclaimPolicy) + copy(dAtA[i:], *m.ReclaimPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) + i-- + dAtA[i] = 0x22 } - if len(m.AllowedTopologies) > 0 { - for _, msg := range m.AllowedTopologies { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Parameters[string(keysForParameters[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForParameters[iNdEx]) + copy(dAtA[i:], keysForParameters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Provisioner) + copy(dAtA[i:], m.Provisioner) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StorageClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -508,37 +1050,46 @@ func (m *StorageClassList) Marshal() (dAtA []byte, err error) { } func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n8, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -546,41 +1097,52 @@ func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n11, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -588,37 +1150,46 @@ func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n12, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -626,23 +1197,41 @@ func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.InlineVolumeSpec != nil { + { + size, err := m.InlineVolumeSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } if m.PersistentVolumeName != nil { - dAtA[i] = 0xa - i++ + i -= len(*m.PersistentVolumeName) + copy(dAtA[i:], *m.PersistentVolumeName) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) - i += copy(dAtA[i:], *m.PersistentVolumeName) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -650,33 +1239,42 @@ func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) - i += copy(dAtA[i:], m.Attacher) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n13, err := m.Source.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x1a - i++ + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i += copy(dAtA[i:], m.NodeName) - return i, nil + i-- + dAtA[i] = 0x1a + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Attacher) + copy(dAtA[i:], m.Attacher) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -684,67 +1282,78 @@ func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Attached { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.DetachError != nil { + { + size, err := m.DetachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AttachError != nil { + { + size, err := m.AttachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i++ if len(m.AttachmentMetadata) > 0 { keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) for k := range m.AttachmentMetadata { keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - for _, k := range keysForAttachmentMetadata { + for iNdEx := len(keysForAttachmentMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.AttachmentMetadata[string(keysForAttachmentMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.AttachmentMetadata[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForAttachmentMetadata[iNdEx]) + copy(dAtA[i:], keysForAttachmentMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttachmentMetadata[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - if m.AttachError != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) - n14, err := m.AttachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - if m.DetachError != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) - n15, err := m.DetachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 + i-- + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *VolumeError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -752,35 +1361,76 @@ func (m *VolumeError) Marshal() (dAtA []byte, err error) { } func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n16, err := m.Time.MarshalTo(dAtA[i:]) + return len(dAtA) - i, nil +} + +func (m *VolumeNodeResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { - return 0, err + return nil, err } - i += n16 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + return dAtA[:n], nil +} + +func (m *VolumeNodeResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeNodeResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Count != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Count)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CSIDriver) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -791,6 +1441,9 @@ func (m *CSIDriver) Size() (n int) { } func (m *CSIDriverList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -805,6 +1458,9 @@ func (m *CSIDriverList) Size() (n int) { } func (m *CSIDriverSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.AttachRequired != nil { @@ -813,10 +1469,19 @@ func (m *CSIDriverSpec) Size() (n int) { if m.PodInfoOnMount != nil { n += 2 } + if len(m.VolumeLifecycleModes) > 0 { + for _, s := range m.VolumeLifecycleModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } func (m *CSINode) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -827,6 +1492,9 @@ func (m *CSINode) Size() (n int) { } func (m *CSINodeDriver) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -839,10 +1507,17 @@ func (m *CSINodeDriver) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.Allocatable != nil { + l = m.Allocatable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *CSINodeList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -857,6 +1532,9 @@ func (m *CSINodeList) Size() (n int) { } func (m *CSINodeSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Drivers) > 0 { @@ -869,6 +1547,9 @@ func (m *CSINodeSpec) Size() (n int) { } func (m *StorageClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -910,6 +1591,9 @@ func (m *StorageClass) Size() (n int) { } func (m *StorageClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -924,6 +1608,9 @@ func (m *StorageClassList) Size() (n int) { } func (m *VolumeAttachment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -936,6 +1623,9 @@ func (m *VolumeAttachment) Size() (n int) { } func (m *VolumeAttachmentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -950,16 +1640,26 @@ func (m *VolumeAttachmentList) Size() (n int) { } func (m *VolumeAttachmentSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.PersistentVolumeName != nil { l = len(*m.PersistentVolumeName) n += 1 + l + sovGenerated(uint64(l)) } + if m.InlineVolumeSpec != nil { + l = m.InlineVolumeSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *VolumeAttachmentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Attacher) @@ -972,6 +1672,9 @@ func (m *VolumeAttachmentSpec) Size() (n int) { } func (m *VolumeAttachmentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -995,6 +1698,9 @@ func (m *VolumeAttachmentStatus) Size() (n int) { } func (m *VolumeError) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Time.Size() @@ -1004,16 +1710,21 @@ func (m *VolumeError) Size() (n int) { return n } -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } +func (m *VolumeNodeResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != nil { + n += 1 + sovGenerated(uint64(*m.Count)) } return n } + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } @@ -1022,7 +1733,7 @@ func (this *CSIDriver) String() string { return "nil" } s := strings.Join([]string{`&CSIDriver{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSIDriverSpec", "CSIDriverSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -1032,9 +1743,14 @@ func (this *CSIDriverList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]CSIDriver{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CSIDriver", "CSIDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&CSIDriverList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CSIDriver", "CSIDriver", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1046,6 +1762,7 @@ func (this *CSIDriverSpec) String() string { s := strings.Join([]string{`&CSIDriverSpec{`, `AttachRequired:` + valueToStringGenerated(this.AttachRequired) + `,`, `PodInfoOnMount:` + valueToStringGenerated(this.PodInfoOnMount) + `,`, + `VolumeLifecycleModes:` + fmt.Sprintf("%v", this.VolumeLifecycleModes) + `,`, `}`, }, "") return s @@ -1055,7 +1772,7 @@ func (this *CSINode) String() string { return "nil" } s := strings.Join([]string{`&CSINode{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSINodeSpec", "CSINodeSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -1069,6 +1786,7 @@ func (this *CSINodeDriver) String() string { `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, + `Allocatable:` + strings.Replace(this.Allocatable.String(), "VolumeNodeResources", "VolumeNodeResources", 1) + `,`, `}`, }, "") return s @@ -1077,9 +1795,14 @@ func (this *CSINodeList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]CSINode{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CSINode", "CSINode", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&CSINodeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CSINode", "CSINode", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1088,8 +1811,13 @@ func (this *CSINodeSpec) String() string { if this == nil { return "nil" } + repeatedStringForDrivers := "[]CSINodeDriver{" + for _, f := range this.Drivers { + repeatedStringForDrivers += strings.Replace(strings.Replace(f.String(), "CSINodeDriver", "CSINodeDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForDrivers += "}" s := strings.Join([]string{`&CSINodeSpec{`, - `Drivers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Drivers), "CSINodeDriver", "CSINodeDriver", 1), `&`, ``, 1) + `,`, + `Drivers:` + repeatedStringForDrivers + `,`, `}`, }, "") return s @@ -1098,6 +1826,11 @@ func (this *StorageClass) String() string { if this == nil { return "nil" } + repeatedStringForAllowedTopologies := "[]TopologySelectorTerm{" + for _, f := range this.AllowedTopologies { + repeatedStringForAllowedTopologies += fmt.Sprintf("%v", f) + "," + } + repeatedStringForAllowedTopologies += "}" keysForParameters := make([]string, 0, len(this.Parameters)) for k := range this.Parameters { keysForParameters = append(keysForParameters, k) @@ -1109,14 +1842,14 @@ func (this *StorageClass) String() string { } mapStringForParameters += "}" s := strings.Join([]string{`&StorageClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, `Parameters:` + mapStringForParameters + `,`, `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, - `AllowedTopologies:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedTopologies), "TopologySelectorTerm", "k8s_io_api_core_v1.TopologySelectorTerm", 1), `&`, ``, 1) + `,`, + `AllowedTopologies:` + repeatedStringForAllowedTopologies + `,`, `}`, }, "") return s @@ -1125,9 +1858,14 @@ func (this *StorageClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]StorageClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StorageClass", "StorageClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&StorageClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1137,7 +1875,7 @@ func (this *VolumeAttachment) String() string { return "nil" } s := strings.Join([]string{`&VolumeAttachment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1148,9 +1886,14 @@ func (this *VolumeAttachmentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]VolumeAttachment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&VolumeAttachmentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1161,6 +1904,7 @@ func (this *VolumeAttachmentSource) String() string { } s := strings.Join([]string{`&VolumeAttachmentSource{`, `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "v11.PersistentVolumeSpec", 1) + `,`, `}`, }, "") return s @@ -1194,8 +1938,8 @@ func (this *VolumeAttachmentStatus) String() string { s := strings.Join([]string{`&VolumeAttachmentStatus{`, `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, - `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, - `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, + `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, `}`, }, "") return s @@ -1205,12 +1949,22 @@ func (this *VolumeError) String() string { return "nil" } s := strings.Join([]string{`&VolumeError{`, - `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, }, "") return s } +func (this *VolumeNodeResources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeNodeResources{`, + `Count:` + valueToStringGenerated(this.Count) + `,`, + `}`, + }, "") + return s +} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -1234,7 +1988,7 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1262,7 +2016,7 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1271,6 +2025,9 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1292,7 +2049,7 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1301,6 +2058,9 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1317,6 +2077,9 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1344,7 +2107,7 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1372,7 +2135,7 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1381,6 +2144,9 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1402,7 +2168,7 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1411,6 +2177,9 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1428,6 +2197,9 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1455,7 +2227,7 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1483,7 +2255,7 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1494,7 +2266,28 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field PodInfoOnMount", wireType) } - var v int + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.PodInfoOnMount = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeLifecycleModes", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1504,13 +2297,24 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.PodInfoOnMount = &b + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeLifecycleModes = append(m.VolumeLifecycleModes, VolumeLifecycleMode(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1520,6 +2324,9 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1547,7 +2354,7 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1575,7 +2382,7 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1584,6 +2391,9 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1605,7 +2415,7 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1614,6 +2424,9 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1630,6 +2443,9 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1657,7 +2473,7 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1685,7 +2501,7 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1695,6 +2511,9 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1714,7 +2533,7 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1724,6 +2543,9 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1743,7 +2565,7 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1753,11 +2575,50 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Allocatable == nil { + m.Allocatable = &VolumeNodeResources{} + } + if err := m.Allocatable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1767,6 +2628,9 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1794,7 +2658,7 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1822,7 +2686,7 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1831,6 +2695,9 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1852,7 +2719,7 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1861,6 +2728,9 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1878,6 +2748,9 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1905,7 +2778,7 @@ func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1933,7 +2806,7 @@ func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1942,6 +2815,9 @@ func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1959,6 +2835,9 @@ func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1986,7 +2865,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2014,7 +2893,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2023,6 +2902,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2044,7 +2926,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2054,6 +2936,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2073,7 +2958,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2082,6 +2967,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2102,7 +2990,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2119,7 +3007,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2129,6 +3017,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -2145,7 +3036,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2155,6 +3046,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -2191,7 +3085,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2201,6 +3095,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2221,7 +3118,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2231,6 +3128,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2250,7 +3150,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2271,7 +3171,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2281,6 +3181,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2301,7 +3204,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2310,10 +3213,13 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedTopologies = append(m.AllowedTopologies, k8s_io_api_core_v1.TopologySelectorTerm{}) + m.AllowedTopologies = append(m.AllowedTopologies, v11.TopologySelectorTerm{}) if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2327,6 +3233,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2354,7 +3263,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2382,7 +3291,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2391,6 +3300,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2412,7 +3324,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2421,6 +3333,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2438,6 +3353,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2465,7 +3383,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2493,7 +3411,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2502,6 +3420,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2523,7 +3444,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2532,6 +3453,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2553,7 +3477,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2562,6 +3486,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2578,6 +3505,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2605,7 +3535,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2633,7 +3563,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2642,6 +3572,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2663,7 +3596,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2672,6 +3605,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2689,6 +3625,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2716,7 +3655,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2744,7 +3683,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2754,12 +3693,51 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) m.PersistentVolumeName = &s iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InlineVolumeSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.InlineVolumeSpec == nil { + m.InlineVolumeSpec = &v11.PersistentVolumeSpec{} + } + if err := m.InlineVolumeSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2769,6 +3747,9 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2796,7 +3777,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2824,7 +3805,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2834,6 +3815,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2853,7 +3837,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2862,6 +3846,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2883,7 +3870,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2893,6 +3880,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2907,6 +3897,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2934,7 +3927,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2962,7 +3955,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2982,7 +3975,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2991,6 +3984,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3011,7 +4007,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3028,7 +4024,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3038,6 +4034,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -3054,7 +4053,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3064,6 +4063,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -3100,7 +4102,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3109,6 +4111,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3133,7 +4138,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3142,6 +4147,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3161,6 +4169,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3188,7 +4199,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3216,7 +4227,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3225,6 +4236,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3246,7 +4260,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3256,6 +4270,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3270,6 +4287,82 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeNodeResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeNodeResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Count = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3336,10 +4429,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -3368,6 +4464,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -3386,87 +4485,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1216 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xcf, 0xc6, 0x4e, 0x9c, 0x8c, 0x93, 0x36, 0x59, 0x22, 0x30, 0x3e, 0xd8, 0x91, 0x11, 0x34, - 0xad, 0xda, 0x75, 0x1b, 0x15, 0x14, 0x55, 0xe2, 0x60, 0x27, 0x91, 0x70, 0x1b, 0x27, 0x66, 0x12, - 0x55, 0xa8, 0xe2, 0xc0, 0x64, 0xf7, 0xc5, 0x59, 0xec, 0xdd, 0xd9, 0xce, 0x8e, 0x0d, 0xbe, 0x71, - 0x82, 0x23, 0x88, 0x03, 0x9f, 0x80, 0xaf, 0x00, 0x12, 0x5c, 0x38, 0x92, 0x13, 0xea, 0xb1, 0x27, - 0x8b, 0x98, 0x6f, 0x11, 0x71, 0x40, 0x33, 0x3b, 0xf6, 0xee, 0xfa, 0x4f, 0xe3, 0x70, 0xf0, 0xcd, - 0xf3, 0xde, 0xfb, 0xfd, 0xde, 0xdf, 0x79, 0xb3, 0x46, 0xbb, 0x8d, 0x1d, 0xdf, 0xb0, 0x69, 0xb1, - 0xd1, 0x3a, 0x05, 0xe6, 0x02, 0x07, 0xbf, 0xd8, 0x06, 0xd7, 0xa2, 0xac, 0xa8, 0x14, 0xc4, 0xb3, - 0x8b, 0x3e, 0xa7, 0x8c, 0xd4, 0xa1, 0xd8, 0x7e, 0x74, 0x0a, 0x9c, 0x3c, 0x2a, 0xd6, 0xc1, 0x05, - 0x46, 0x38, 0x58, 0x86, 0xc7, 0x28, 0xa7, 0x7a, 0x36, 0xb0, 0x35, 0x88, 0x67, 0x1b, 0xca, 0xd6, - 0x50, 0xb6, 0xd9, 0x07, 0x75, 0x9b, 0x9f, 0xb7, 0x4e, 0x0d, 0x93, 0x3a, 0xc5, 0x3a, 0xad, 0xd3, - 0xa2, 0x84, 0x9c, 0xb6, 0xce, 0xe4, 0x49, 0x1e, 0xe4, 0xaf, 0x80, 0x2a, 0x5b, 0x88, 0xb8, 0x35, - 0x29, 0x13, 0x3e, 0x87, 0xdd, 0x65, 0x1f, 0x87, 0x36, 0x0e, 0x31, 0xcf, 0x6d, 0x17, 0x58, 0xa7, - 0xe8, 0x35, 0xea, 0x42, 0xe0, 0x17, 0x1d, 0xe0, 0x64, 0x1c, 0xaa, 0x38, 0x09, 0xc5, 0x5a, 0x2e, - 0xb7, 0x1d, 0x18, 0x01, 0x7c, 0x74, 0x1d, 0xc0, 0x37, 0xcf, 0xc1, 0x21, 0xc3, 0xb8, 0xc2, 0xef, - 0x1a, 0x5a, 0xde, 0x3d, 0xae, 0xec, 0x31, 0xbb, 0x0d, 0x4c, 0xff, 0x02, 0x2d, 0x89, 0x88, 0x2c, - 0xc2, 0x49, 0x46, 0xdb, 0xd4, 0xb6, 0xd2, 0xdb, 0x0f, 0x8d, 0xb0, 0x5c, 0x03, 0x62, 0xc3, 0x6b, - 0xd4, 0x85, 0xc0, 0x37, 0x84, 0xb5, 0xd1, 0x7e, 0x64, 0x1c, 0x9d, 0x7e, 0x09, 0x26, 0xaf, 0x02, - 0x27, 0x65, 0xfd, 0xa2, 0x9b, 0x9f, 0xeb, 0x75, 0xf3, 0x28, 0x94, 0xe1, 0x01, 0xab, 0xfe, 0x0c, - 0x25, 0x7d, 0x0f, 0xcc, 0xcc, 0xbc, 0x64, 0xbf, 0x6b, 0x4c, 0x6e, 0x86, 0x31, 0x08, 0xeb, 0xd8, - 0x03, 0xb3, 0xbc, 0xa2, 0x68, 0x93, 0xe2, 0x84, 0x25, 0x49, 0xe1, 0x37, 0x0d, 0xad, 0x0e, 0xac, - 0x0e, 0x6c, 0x9f, 0xeb, 0x9f, 0x8f, 0x24, 0x60, 0x4c, 0x97, 0x80, 0x40, 0xcb, 0xf0, 0xd7, 0x94, - 0x9f, 0xa5, 0xbe, 0x24, 0x12, 0xfc, 0x53, 0xb4, 0x60, 0x73, 0x70, 0xfc, 0xcc, 0xfc, 0x66, 0x62, - 0x2b, 0xbd, 0xfd, 0xfe, 0x54, 0xd1, 0x97, 0x57, 0x15, 0xe3, 0x42, 0x45, 0x60, 0x71, 0x40, 0x51, - 0xf8, 0x2e, 0x1a, 0xbb, 0xc8, 0x49, 0x7f, 0x82, 0x6e, 0x11, 0xce, 0x89, 0x79, 0x8e, 0xe1, 0x65, - 0xcb, 0x66, 0x60, 0xc9, 0x0c, 0x96, 0xca, 0x7a, 0xaf, 0x9b, 0xbf, 0x55, 0x8a, 0x69, 0xf0, 0x90, - 0xa5, 0xc0, 0x7a, 0xd4, 0xaa, 0xb8, 0x67, 0xf4, 0xc8, 0xad, 0xd2, 0x96, 0xcb, 0x65, 0x81, 0x15, - 0xb6, 0x16, 0xd3, 0xe0, 0x21, 0xcb, 0xc2, 0xaf, 0x1a, 0x4a, 0xed, 0x1e, 0x57, 0x0e, 0xa9, 0x05, - 0x33, 0x18, 0x80, 0x4a, 0x6c, 0x00, 0xee, 0x5c, 0x53, 0x42, 0x11, 0xd4, 0xc4, 0xf6, 0x7f, 0x1f, - 0x94, 0x50, 0xd8, 0xa8, 0xf9, 0xdd, 0x44, 0x49, 0x97, 0x38, 0x20, 0x43, 0x5f, 0x0e, 0x31, 0x87, - 0xc4, 0x01, 0x2c, 0x35, 0xfa, 0x07, 0x68, 0xd1, 0xa5, 0x16, 0x54, 0xf6, 0x64, 0x00, 0xcb, 0xe5, - 0x5b, 0xca, 0x66, 0xf1, 0x50, 0x4a, 0xb1, 0xd2, 0xea, 0x8f, 0xd1, 0x0a, 0xa7, 0x1e, 0x6d, 0xd2, - 0x7a, 0xe7, 0x19, 0x74, 0xfc, 0x4c, 0x62, 0x33, 0xb1, 0xb5, 0x5c, 0x5e, 0xeb, 0x75, 0xf3, 0x2b, - 0x27, 0x11, 0x39, 0x8e, 0x59, 0x15, 0x7e, 0xd1, 0x50, 0x5a, 0x45, 0x34, 0x83, 0x71, 0xfc, 0x24, - 0x3e, 0x8e, 0xef, 0x4d, 0x51, 0xcb, 0x09, 0xc3, 0x68, 0x0e, 0xc2, 0x96, 0x93, 0x78, 0x82, 0x52, - 0x96, 0x2c, 0xa8, 0x9f, 0xd1, 0x24, 0xf5, 0xdd, 0x29, 0xa8, 0xd5, 0xb4, 0xdf, 0x56, 0x0e, 0x52, - 0xc1, 0xd9, 0xc7, 0x7d, 0xaa, 0xc2, 0x8f, 0x8b, 0x68, 0xe5, 0x38, 0xc0, 0xee, 0x36, 0x89, 0xef, - 0xcf, 0x60, 0xd8, 0x3e, 0x44, 0x69, 0x8f, 0xd1, 0xb6, 0xed, 0xdb, 0xd4, 0x05, 0xa6, 0x5a, 0xfe, - 0x96, 0x82, 0xa4, 0x6b, 0xa1, 0x0a, 0x47, 0xed, 0xf4, 0x26, 0x42, 0x1e, 0x61, 0xc4, 0x01, 0x2e, - 0x4a, 0x90, 0x90, 0x25, 0xd8, 0x79, 0x53, 0x09, 0xa2, 0x69, 0x19, 0xb5, 0x01, 0x74, 0xdf, 0xe5, - 0xac, 0x13, 0x86, 0x18, 0x2a, 0x70, 0x84, 0x5f, 0x6f, 0xa0, 0x55, 0x06, 0x66, 0x93, 0xd8, 0x4e, - 0x8d, 0x36, 0x6d, 0xb3, 0x93, 0x49, 0xca, 0x30, 0xf7, 0x7b, 0xdd, 0xfc, 0x2a, 0x8e, 0x2a, 0xae, - 0xba, 0xf9, 0x87, 0xa3, 0x2f, 0x8e, 0x51, 0x03, 0xe6, 0xdb, 0x3e, 0x07, 0x97, 0x3f, 0xa7, 0xcd, - 0x96, 0x03, 0x31, 0x0c, 0x8e, 0x73, 0x8b, 0xb9, 0x76, 0xc4, 0xad, 0x3f, 0xf2, 0xb8, 0x4d, 0x5d, - 0x3f, 0xb3, 0x10, 0xce, 0x75, 0x35, 0x22, 0xc7, 0x31, 0x2b, 0xfd, 0x00, 0x6d, 0x90, 0x66, 0x93, - 0x7e, 0x15, 0x38, 0xd8, 0xff, 0xda, 0x23, 0xae, 0x28, 0x55, 0x66, 0x51, 0x2e, 0x99, 0x4c, 0xaf, - 0x9b, 0xdf, 0x28, 0x8d, 0xd1, 0xe3, 0xb1, 0x28, 0xfd, 0x33, 0xb4, 0xde, 0x96, 0xa2, 0xb2, 0xed, - 0x5a, 0xb6, 0x5b, 0xaf, 0x52, 0x0b, 0x32, 0x29, 0x99, 0xf4, 0xbd, 0x5e, 0x37, 0xbf, 0xfe, 0x7c, - 0x58, 0x79, 0x35, 0x4e, 0x88, 0x47, 0x49, 0xf4, 0x97, 0x68, 0x5d, 0x7a, 0x04, 0x4b, 0x5d, 0x52, - 0x1b, 0xfc, 0xcc, 0x92, 0xec, 0xdf, 0x56, 0xb4, 0x7f, 0xa2, 0x74, 0x62, 0x90, 0xfa, 0x57, 0xf9, - 0x18, 0x9a, 0x60, 0x72, 0xca, 0x4e, 0x80, 0x39, 0xe5, 0x77, 0x55, 0xbf, 0xd6, 0x4b, 0xc3, 0x54, - 0x78, 0x94, 0x3d, 0xfb, 0x31, 0xba, 0x3d, 0xd4, 0x70, 0x7d, 0x0d, 0x25, 0x1a, 0xd0, 0x09, 0x96, - 0x10, 0x16, 0x3f, 0xf5, 0x0d, 0xb4, 0xd0, 0x26, 0xcd, 0x16, 0x04, 0x13, 0x88, 0x83, 0xc3, 0x93, - 0xf9, 0x1d, 0xad, 0xf0, 0x87, 0x86, 0xd6, 0xa2, 0xd3, 0x33, 0x83, 0xb5, 0x51, 0x8d, 0xaf, 0x8d, - 0xad, 0x69, 0x07, 0x7b, 0xc2, 0xee, 0xf8, 0x79, 0x1e, 0xad, 0x05, 0xcd, 0x09, 0xde, 0x28, 0x07, - 0x5c, 0x3e, 0x83, 0xab, 0x8d, 0x63, 0xef, 0xc8, 0xc3, 0x37, 0x25, 0x31, 0x1c, 0xdd, 0xa4, 0x07, - 0x45, 0x7f, 0x81, 0x16, 0x7d, 0x4e, 0x78, 0x4b, 0xdc, 0x79, 0xc1, 0xba, 0x7d, 0x23, 0x56, 0x89, - 0x0c, 0x1f, 0x94, 0xe0, 0x8c, 0x15, 0x63, 0xe1, 0x4f, 0x0d, 0x6d, 0x0c, 0x43, 0x66, 0xd0, 0xec, - 0x4f, 0xe3, 0xcd, 0xbe, 0x7f, 0x93, 0x8c, 0x26, 0x34, 0xfc, 0x0c, 0xbd, 0x3d, 0x92, 0x3b, 0x6d, - 0x31, 0x13, 0xc4, 0x9a, 0xf0, 0x86, 0x96, 0xd1, 0x61, 0xf8, 0x1c, 0xcb, 0x35, 0x51, 0x1b, 0xa3, - 0xc7, 0x63, 0x51, 0x85, 0xbf, 0xc6, 0x54, 0x4c, 0x3e, 0x4f, 0xf7, 0xd1, 0x52, 0xf0, 0xf9, 0x03, - 0x4c, 0x51, 0x0f, 0x2a, 0x50, 0x52, 0x72, 0x3c, 0xb0, 0x90, 0x4d, 0x95, 0xe1, 0xa9, 0x51, 0xb9, - 0x59, 0x53, 0x25, 0x32, 0xd2, 0x54, 0x79, 0xc6, 0x8a, 0x51, 0x44, 0x22, 0xbe, 0x17, 0x64, 0x92, - 0x89, 0x78, 0x24, 0x87, 0x4a, 0x8e, 0x07, 0x16, 0x85, 0x7f, 0x13, 0x63, 0x2a, 0x27, 0xa7, 0x23, - 0x92, 0x52, 0xff, 0xab, 0x6f, 0x38, 0x25, 0x6b, 0x90, 0x92, 0xa5, 0xff, 0xa4, 0x21, 0x9d, 0x0c, - 0x28, 0xaa, 0xfd, 0xe9, 0x09, 0x5a, 0xfc, 0xf4, 0xe6, 0x43, 0x6b, 0x94, 0x46, 0xc8, 0x82, 0xa7, - 0x2b, 0xab, 0x82, 0xd0, 0x47, 0x0d, 0xf0, 0x98, 0x08, 0x74, 0x1b, 0xa5, 0x03, 0xe9, 0x3e, 0x63, - 0x94, 0xa9, 0x5b, 0x74, 0xe7, 0xfa, 0x80, 0xa4, 0x79, 0x39, 0x27, 0x1e, 0xe5, 0x52, 0x88, 0xbf, - 0xea, 0xe6, 0xd3, 0x11, 0x3d, 0x8e, 0x72, 0x0b, 0x57, 0x16, 0x84, 0xae, 0x92, 0xff, 0xc3, 0xd5, - 0x1e, 0x4c, 0x76, 0x15, 0xe1, 0xce, 0xee, 0xa3, 0x77, 0x26, 0x14, 0xe8, 0x46, 0xab, 0xfe, 0x5b, - 0x0d, 0x45, 0x7d, 0xe8, 0x07, 0x28, 0x29, 0xfe, 0x99, 0xa9, 0x4b, 0x7f, 0x6f, 0xba, 0x4b, 0x7f, - 0x62, 0x3b, 0x10, 0xee, 0x2e, 0x71, 0xc2, 0x92, 0x45, 0xbf, 0x8b, 0x52, 0x0e, 0xf8, 0x3e, 0xa9, - 0x2b, 0xcf, 0xe1, 0x87, 0x58, 0x35, 0x10, 0xe3, 0xbe, 0xbe, 0xfc, 0xe0, 0xe2, 0x32, 0x37, 0xf7, - 0xea, 0x32, 0x37, 0xf7, 0xfa, 0x32, 0x37, 0xf7, 0x4d, 0x2f, 0xa7, 0x5d, 0xf4, 0x72, 0xda, 0xab, - 0x5e, 0x4e, 0x7b, 0xdd, 0xcb, 0x69, 0x7f, 0xf7, 0x72, 0xda, 0x0f, 0xff, 0xe4, 0xe6, 0x5e, 0xa4, - 0x54, 0xdd, 0xfe, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x78, 0xdc, 0x5e, 0x79, 0x76, 0x0f, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index 6d9fe9049..bb2cf3450 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -45,7 +45,7 @@ message CSIDriver { // The driver name must be 63 characters or less, beginning and ending with // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and // alphanumerics between. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the CSI Driver. @@ -55,7 +55,7 @@ message CSIDriver { // CSIDriverList is a collection of CSIDriver objects. message CSIDriverList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -93,8 +93,32 @@ message CSIDriverSpec { // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace // "csi.storage.k8s.io/pod.uid": string(pod.UID) + // "csi.storage.k8s.io/ephemeral": "true" iff the volume is an ephemeral inline volume + // defined by a CSIVolumeSource, otherwise "false" + // + // "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only + // required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. + // Other drivers can leave pod info disabled and/or ignore this field. + // As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when + // deployed on such a cluster and the deployment determines which mode that is, for example + // via a command line parameter of the driver. // +optional optional bool podInfoOnMount = 2; + + // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage + // defined by the CSI specification and implemented in Kubernetes via the usual + // PV/PVC mechanism. + // The other mode is "Ephemeral". In this mode, volumes are defined inline + // inside the pod spec with CSIVolumeSource and their lifecycle is tied to + // the lifecycle of that pod. A driver has to be aware of this + // because it is only going to get a NodePublishVolume call for such a volume. + // For more information about implementing this mode, see + // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html + // A driver can support one or more of these modes and + // more modes may be added in the future. + // +optional + repeated string volumeLifecycleModes = 3; } // CSINode holds information about all CSI drivers installed on a node. @@ -144,12 +168,16 @@ message CSINodeDriver { // This can be empty if driver does not support topology. // +optional repeated string topologyKeys = 3; + + // allocatable represents the volume resources of a node that are available for scheduling. + // +optional + optional VolumeNodeResources allocatable = 4; } // CSINodeList is a collection of CSINode objects. message CSINodeList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -173,7 +201,7 @@ message CSINodeSpec { // according to etcd is in ObjectMeta.Name. message StorageClass { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -217,7 +245,7 @@ message StorageClass { // StorageClassList is a collection of storage classes. message StorageClassList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -231,7 +259,7 @@ message StorageClassList { // VolumeAttachment objects are non-namespaced. message VolumeAttachment { // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -249,7 +277,7 @@ message VolumeAttachment { // VolumeAttachmentList is a collection of VolumeAttachment objects. message VolumeAttachmentList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -265,6 +293,15 @@ message VolumeAttachmentSource { // Name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; + + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. @@ -321,3 +358,13 @@ message VolumeError { optional string message = 2; } +// VolumeNodeResources is a set of resource limits for scheduling of volumes. +message VolumeNodeResources { + // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // A volume that is both attached and mounted on a node is considered to be used once, not twice. + // The same rule applies for a unique volume that is shared among multiple pods on the same node. + // If this field is nil, then the supported number of volumes on this node is unbounded. + // +optional + optional int32 count = 1; +} + diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index 490057912..fa1bae1d8 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,7 +33,7 @@ import ( type StorageClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -80,7 +80,7 @@ type StorageClass struct { type StorageClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -115,7 +115,7 @@ type VolumeAttachment struct { metav1.TypeMeta `json:",inline"` // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -136,7 +136,7 @@ type VolumeAttachment struct { type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -166,7 +166,14 @@ type VolumeAttachmentSource struct { // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` - // Placeholder for *VolumeSource to accommodate inline volumes in pods. + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + InlineVolumeSpec *v1.PersistentVolumeSpec `json:"inlineVolumeSpec,omitempty" protobuf:"bytes,2,opt,name=inlineVolumeSpec"` } // VolumeAttachmentStatus is the status of a VolumeAttachment request. @@ -232,7 +239,7 @@ type CSIDriver struct { // The driver name must be 63 characters or less, beginning and ending with // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and // alphanumerics between. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the CSI Driver. @@ -246,7 +253,7 @@ type CSIDriverList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -284,10 +291,59 @@ type CSIDriverSpec struct { // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace // "csi.storage.k8s.io/pod.uid": string(pod.UID) + // "csi.storage.k8s.io/ephemeral": "true" iff the volume is an ephemeral inline volume + // defined by a CSIVolumeSource, otherwise "false" + // + // "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only + // required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. + // Other drivers can leave pod info disabled and/or ignore this field. + // As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when + // deployed on such a cluster and the deployment determines which mode that is, for example + // via a command line parameter of the driver. // +optional PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` + + // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage + // defined by the CSI specification and implemented in Kubernetes via the usual + // PV/PVC mechanism. + // The other mode is "Ephemeral". In this mode, volumes are defined inline + // inside the pod spec with CSIVolumeSource and their lifecycle is tied to + // the lifecycle of that pod. A driver has to be aware of this + // because it is only going to get a NodePublishVolume call for such a volume. + // For more information about implementing this mode, see + // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html + // A driver can support one or more of these modes and + // more modes may be added in the future. + // +optional + VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` } +// VolumeLifecycleMode is an enumeration of possible usage modes for a volume +// provided by a CSI driver. More modes may be added in the future. +type VolumeLifecycleMode string + +const ( + // VolumeLifecyclePersistent explicitly confirms that the driver implements + // the full CSI spec. It is the default when CSIDriverSpec.VolumeLifecycleModes is not + // set. Such volumes are managed in Kubernetes via the persistent volume + // claim mechanism and have a lifecycle that is independent of the pods which + // use them. + VolumeLifecyclePersistent VolumeLifecycleMode = "Persistent" + + // VolumeLifecycleEphemeral indicates that the driver can be used for + // ephemeral inline volumes. Such volumes are specified inside the pod + // spec with a CSIVolumeSource and, as far as Kubernetes is concerned, have + // a lifecycle that is tied to the lifecycle of the pod. For example, such + // a volume might contain data that gets created specifically for that pod, + // like secrets. + // But how the volume actually gets created and managed is entirely up to + // the driver. It might also use reference counting to share the same volume + // instance among different pods if the CSIVolumeSource of those pods is + // identical. + VolumeLifecycleEphemeral VolumeLifecycleMode = "Ephemeral" +) + // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -350,6 +406,20 @@ type CSINodeDriver struct { // This can be empty if driver does not support topology. // +optional TopologyKeys []string `json:"topologyKeys" protobuf:"bytes,3,rep,name=topologyKeys"` + + // allocatable represents the volume resources of a node that are available for scheduling. + // +optional + Allocatable *VolumeNodeResources `json:"allocatable,omitempty" protobuf:"bytes,4,opt,name=allocatable"` +} + +// VolumeNodeResources is a set of resource limits for scheduling of volumes. +type VolumeNodeResources struct { + // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // A volume that is both attached and mounted on a node is considered to be used once, not twice. + // The same rule applies for a unique volume that is shared among multiple pods on the same node. + // If this field is nil, then the supported number of volumes on this node is unbounded. + // +optional + Count *int32 `json:"count,omitempty" protobuf:"varint,1,opt,name=count"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -359,7 +429,7 @@ type CSINodeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index ec741ecf7..fe80a8e50 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CSIDriver = map[string]string{ "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. CSI drivers do not need to create the CSIDriver object directly. Instead they may use the cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically creates a CSIDriver object representing the driver. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", - "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the CSI Driver.", } @@ -39,7 +39,7 @@ func (CSIDriver) SwaggerDoc() map[string]string { var map_CSIDriverList = map[string]string{ "": "CSIDriverList is a collection of CSIDriver objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of CSIDriver", } @@ -48,9 +48,10 @@ func (CSIDriverList) SwaggerDoc() map[string]string { } var map_CSIDriverSpec = map[string]string{ - "": "CSIDriverSpec is the specification of a CSIDriver.", - "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.", - "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID)", + "": "CSIDriverSpec is the specification of a CSIDriver.", + "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.", + "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" iff the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.", + "volumeLifecycleModes": "VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.", } func (CSIDriverSpec) SwaggerDoc() map[string]string { @@ -72,6 +73,7 @@ var map_CSINodeDriver = map[string]string{ "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", + "allocatable": "allocatable represents the volume resources of a node that are available for scheduling.", } func (CSINodeDriver) SwaggerDoc() map[string]string { @@ -80,7 +82,7 @@ func (CSINodeDriver) SwaggerDoc() map[string]string { var map_CSINodeList = map[string]string{ "": "CSINodeList is a collection of CSINode objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of CSINode", } @@ -99,7 +101,7 @@ func (CSINodeSpec) SwaggerDoc() map[string]string { var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "provisioner": "Provisioner indicates the type of the provisioner.", "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", @@ -115,7 +117,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of StorageClasses", } @@ -125,7 +127,7 @@ func (StorageClassList) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } @@ -136,7 +138,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of VolumeAttachments", } @@ -186,4 +188,13 @@ func (VolumeError) SwaggerDoc() map[string]string { return map_VolumeError } +var map_VolumeNodeResources = map[string]string{ + "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", + "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.", +} + +func (VolumeNodeResources) SwaggerDoc() map[string]string { + return map_VolumeNodeResources +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go index 022815f18..52433fcdf 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -56,7 +56,7 @@ func (in *CSIDriver) DeepCopyObject() runtime.Object { func (in *CSIDriverList) DeepCopyInto(out *CSIDriverList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CSIDriver, len(*in)) @@ -98,6 +98,11 @@ func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { *out = new(bool) **out = **in } + if in.VolumeLifecycleModes != nil { + in, out := &in.VolumeLifecycleModes, &out.VolumeLifecycleModes + *out = make([]VolumeLifecycleMode, len(*in)) + copy(*out, *in) + } return } @@ -146,6 +151,11 @@ func (in *CSINodeDriver) DeepCopyInto(out *CSINodeDriver) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = new(VolumeNodeResources) + (*in).DeepCopyInto(*out) + } return } @@ -163,7 +173,7 @@ func (in *CSINodeDriver) DeepCopy() *CSINodeDriver { func (in *CSINodeList) DeepCopyInto(out *CSINodeList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CSINode, len(*in)) @@ -279,7 +289,7 @@ func (in *StorageClass) DeepCopyObject() runtime.Object { func (in *StorageClassList) DeepCopyInto(out *StorageClassList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StorageClass, len(*in)) @@ -340,7 +350,7 @@ func (in *VolumeAttachment) DeepCopyObject() runtime.Object { func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]VolumeAttachment, len(*in)) @@ -377,6 +387,11 @@ func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { *out = new(string) **out = **in } + if in.InlineVolumeSpec != nil { + in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec + *out = new(v1.PersistentVolumeSpec) + (*in).DeepCopyInto(*out) + } return } @@ -456,3 +471,24 @@ func (in *VolumeError) DeepCopy() *VolumeError { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeNodeResources) DeepCopyInto(out *VolumeNodeResources) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeResources. +func (in *VolumeNodeResources) DeepCopy() *VolumeNodeResources { + if in == nil { + return nil + } + out := new(VolumeNodeResources) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go index 37b4d1df9..3c7ac0060 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go @@ -258,5 +258,31 @@ func (in *JSONSchemaProps) DeepCopy() *JSONSchemaProps { } } + if in.XPreserveUnknownFields != nil { + in, out := &in.XPreserveUnknownFields, &out.XPreserveUnknownFields + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + + if in.XListMapKeys != nil { + in, out := &in.XListMapKeys, &out.XListMapKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + + if in.XListType != nil { + in, out := &in.XListType, &out.XListType + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return out } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go index 964a6190b..52d6ea866 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go @@ -27,18 +27,19 @@ var swaggerMetadataDescriptions = metav1.ObjectMeta{}.SwaggerDoc() // SetCRDCondition sets the status condition. It either overwrites the existing one or creates a new one. func SetCRDCondition(crd *CustomResourceDefinition, newCondition CustomResourceDefinitionCondition) { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + existingCondition := FindCRDCondition(crd, newCondition.Type) if existingCondition == nil { - newCondition.LastTransitionTime = metav1.NewTime(time.Now()) crd.Status.Conditions = append(crd.Status.Conditions, newCondition) return } - if existingCondition.Status != newCondition.Status { - existingCondition.Status = newCondition.Status + if existingCondition.Status != newCondition.Status || existingCondition.LastTransitionTime.IsZero() { existingCondition.LastTransitionTime = newCondition.LastTransitionTime } + existingCondition.Status = newCondition.Status existingCondition.Reason = newCondition.Reason existingCondition.Message = newCondition.Message } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go index 21873d460..8f502e8b1 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go @@ -73,13 +73,20 @@ type CustomResourceDefinitionSpec struct { // `conversion` defines conversion settings for the CRD. Conversion *CustomResourceConversion + + // preserveUnknownFields disables pruning of object fields which are not + // specified in the OpenAPI schema. apiVersion, kind, metadata and known + // fields inside metadata are always preserved. + // Defaults to true in v1beta and will default to false in v1. + PreserveUnknownFields *bool } // CustomResourceConversion describes how to convert different versions of a CR. type CustomResourceConversion struct { // `strategy` specifies the conversion strategy. Allowed values are: // - `None`: The converter only change the apiVersion and would not touch any other field in the CR. - // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information is needed for this option. + // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information + // is needed for this option. This requires spec.preserveUnknownFields to be false. Strategy ConversionStrategyType // `webhookClientConfig` is the instructions for how to call the webhook if strategy is `Webhook`. @@ -132,8 +139,6 @@ type WebhookClientConfig struct { // // If the webhook is running within the cluster, then you should use `service`. // - // Port 443 will be used if it is open, otherwise it is an error. - // // +optional Service *ServiceReference @@ -156,6 +161,11 @@ type ServiceReference struct { // this service. // +optional Path *string + + // If specified, the port on the service that hosting webhook. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + Port int32 } // CustomResourceDefinitionVersion describes a version for CRD. @@ -261,13 +271,34 @@ const ( // NamesAccepted means the names chosen for this CustomResourceDefinition do not conflict with others in // the group and are therefore accepted. NamesAccepted CustomResourceDefinitionConditionType = "NamesAccepted" + // NonStructuralSchema means that one or more OpenAPI schema is not structural. + // + // A schema is structural if it specifies types for all values, with the only exceptions of those with + // - x-kubernetes-int-or-string: true — for fields which can be integer or string + // - x-kubernetes-preserve-unknown-fields: true — for raw, unspecified JSON values + // and there is no type, additionalProperties, default, nullable or x-kubernetes-* vendor extenions + // specified under allOf, anyOf, oneOf or not. + // + // Non-structural schemas will not be allowed anymore in v1 API groups. Moreover, new features will not be + // available for non-structural CRDs: + // - pruning + // - defaulting + // - read-only + // - OpenAPI publishing + // - webhook conversion + NonStructuralSchema CustomResourceDefinitionConditionType = "NonStructuralSchema" // Terminating means that the CustomResourceDefinition has been deleted and is cleaning up. Terminating CustomResourceDefinitionConditionType = "Terminating" + // KubernetesAPIApprovalPolicyConformant indicates that an API in *.k8s.io or *.kubernetes.io is or is not approved. For CRDs + // outside those groups, this condition will not be set. For CRDs inside those groups, the condition will + // be true if .metadata.annotations["api-approved.kubernetes.io"] is set to a URL, otherwise it will be false. + // See https://github.com/kubernetes/enhancements/pull/1111 for more details. + KubernetesAPIApprovalPolicyConformant CustomResourceDefinitionConditionType = "KubernetesAPIApprovalPolicyConformant" ) // CustomResourceDefinitionCondition contains details for the current condition of this pod. type CustomResourceDefinitionCondition struct { - // Type is the type of the condition. + // Type is the type of the condition. Types include Established, NamesAccepted and Terminating. Type CustomResourceDefinitionConditionType // Status is the status of the condition. // Can be True, False, Unknown. @@ -368,8 +399,11 @@ type CustomResourceSubresourceScale struct { StatusReplicasPath string // LabelSelectorPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Selector. // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .status. + // Must be a JSON Path under .status or .spec. // Must be set to work with HPA. + // The field pointed by this JSON path must be a string field (not a complex selector struct) + // which contains a serialized label selector in string form. + // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource // If there is no value under the given path in the CustomResource, the status label selector value in the /scale // subresource will default to the empty string. // +optional diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go index af78c34fb..77abe9e36 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go @@ -55,6 +55,60 @@ type JSONSchemaProps struct { Definitions JSONSchemaDefinitions ExternalDocs *ExternalDocumentation Example *JSON + + // x-kubernetes-preserve-unknown-fields stops the API server + // decoding step from pruning fields which are not specified + // in the validation schema. This affects fields recursively, + // but switches back to normal pruning behaviour if nested + // properties or additionalProperties are specified in the schema. + // This can either be true or undefined. False is forbidden. + XPreserveUnknownFields *bool + + // x-kubernetes-embedded-resource defines that the value is an + // embedded Kubernetes runtime.Object, with TypeMeta and + // ObjectMeta. The type must be object. It is allowed to further + // restrict the embedded object. Both ObjectMeta and TypeMeta + // are validated automatically. x-kubernetes-preserve-unknown-fields + // must be true. + XEmbeddedResource bool + + // x-kubernetes-int-or-string specifies that this value is + // either an integer or a string. If this is true, an empty + // type is allowed and type as child of anyOf is permitted + // if following one of the following patterns: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + XIntOrString bool + + // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used + // as the index of the map. + // + // This tag MUST only be used on lists that have the "x-kubernetes-list-type" + // extension set to "map". Also, the values specified for this attribute must + // be a scalar typed field of the child structure (no nesting is supported). + XListMapKeys []string + + // x-kubernetes-list-type annotates an array to further describe its topology. + // This extension must only be used on lists and may have 3 possible values: + // + // 1) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic lists will be entirely replaced when updated. This extension + // may be used on any type of list (struct, scalar, ...). + // 2) `set`: + // Sets are lists that must not have multiple items with the same value. Each + // value must be a scalar (or another atomic type). + // 3) `map`: + // These lists are like maps in that their elements have a non-index key + // used to identify them. Order is preserved upon merge. The map tag + // must only be used on a list with elements of type object. + XListType *string } // JSON represents any valid JSON value. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go new file mode 100644 index 000000000..70a2265c8 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go @@ -0,0 +1,212 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/json" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + err := scheme.AddConversionFuncs( + Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps, + Convert_apiextensions_JSON_To_v1_JSON, + Convert_v1_JSON_To_apiextensions_JSON, + ) + if err != nil { + return err + } + return nil +} + +func Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(in *apiextensions.JSONSchemaProps, out *JSONSchemaProps, s conversion.Scope) error { + if err := autoConvert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(in, out, s); err != nil { + return err + } + if in.Default != nil && *(in.Default) == nil { + out.Default = nil + } + if in.Example != nil && *(in.Example) == nil { + out.Example = nil + } + return nil +} + +func Convert_apiextensions_JSON_To_v1_JSON(in *apiextensions.JSON, out *JSON, s conversion.Scope) error { + raw, err := json.Marshal(*in) + if err != nil { + return err + } + out.Raw = raw + return nil +} + +func Convert_v1_JSON_To_apiextensions_JSON(in *JSON, out *apiextensions.JSON, s conversion.Scope) error { + if in != nil { + var i interface{} + if err := json.Unmarshal(in.Raw, &i); err != nil { + return err + } + *out = i + } else { + out = nil + } + return nil +} + +func Convert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(in *apiextensions.CustomResourceDefinitionSpec, out *CustomResourceDefinitionSpec, s conversion.Scope) error { + if err := autoConvert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(in, out, s); err != nil { + return err + } + + if len(out.Versions) == 0 && len(in.Version) > 0 { + // no versions were specified, and a version name was specified + out.Versions = []CustomResourceDefinitionVersion{{Name: in.Version, Served: true, Storage: true}} + } + + // If spec.{subresources,validation,additionalPrinterColumns} exists, move to versions + if in.Subresources != nil { + subresources := &CustomResourceSubresources{} + if err := Convert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources(in.Subresources, subresources, s); err != nil { + return err + } + for i := range out.Versions { + out.Versions[i].Subresources = subresources + } + } + if in.Validation != nil { + schema := &CustomResourceValidation{} + if err := Convert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(in.Validation, schema, s); err != nil { + return err + } + for i := range out.Versions { + out.Versions[i].Schema = schema + } + } + if in.AdditionalPrinterColumns != nil { + additionalPrinterColumns := make([]CustomResourceColumnDefinition, len(in.AdditionalPrinterColumns)) + for i := range in.AdditionalPrinterColumns { + if err := Convert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition(&in.AdditionalPrinterColumns[i], &additionalPrinterColumns[i], s); err != nil { + return err + } + } + for i := range out.Versions { + out.Versions[i].AdditionalPrinterColumns = additionalPrinterColumns + } + } + return nil +} + +func Convert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in *CustomResourceDefinitionSpec, out *apiextensions.CustomResourceDefinitionSpec, s conversion.Scope) error { + if err := autoConvert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in, out, s); err != nil { + return nil + } + + if len(out.Versions) == 0 { + return nil + } + + // Copy versions[0] to version + out.Version = out.Versions[0].Name + + // If versions[*].{subresources,schema,additionalPrinterColumns} are identical, move to spec + subresources := out.Versions[0].Subresources + subresourcesIdentical := true + validation := out.Versions[0].Schema + validationIdentical := true + additionalPrinterColumns := out.Versions[0].AdditionalPrinterColumns + additionalPrinterColumnsIdentical := true + + // Detect if per-version fields are identical + for _, v := range out.Versions { + if subresourcesIdentical && !apiequality.Semantic.DeepEqual(v.Subresources, subresources) { + subresourcesIdentical = false + } + if validationIdentical && !apiequality.Semantic.DeepEqual(v.Schema, validation) { + validationIdentical = false + } + if additionalPrinterColumnsIdentical && !apiequality.Semantic.DeepEqual(v.AdditionalPrinterColumns, additionalPrinterColumns) { + additionalPrinterColumnsIdentical = false + } + } + + // If they are, set the top-level fields and clear the per-version fields + if subresourcesIdentical { + out.Subresources = subresources + } + if validationIdentical { + out.Validation = validation + } + if additionalPrinterColumnsIdentical { + out.AdditionalPrinterColumns = additionalPrinterColumns + } + for i := range out.Versions { + if subresourcesIdentical { + out.Versions[i].Subresources = nil + } + if validationIdentical { + out.Versions[i].Schema = nil + } + if additionalPrinterColumnsIdentical { + out.Versions[i].AdditionalPrinterColumns = nil + } + } + + return nil +} + +func Convert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in *CustomResourceConversion, out *apiextensions.CustomResourceConversion, s conversion.Scope) error { + if err := autoConvert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in, out, s); err != nil { + return err + } + + out.WebhookClientConfig = nil + out.ConversionReviewVersions = nil + if in.Webhook != nil { + out.ConversionReviewVersions = in.Webhook.ConversionReviewVersions + if in.Webhook.ClientConfig != nil { + out.WebhookClientConfig = &apiextensions.WebhookClientConfig{} + if err := Convert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in.Webhook.ClientConfig, out.WebhookClientConfig, s); err != nil { + return err + } + } + } + return nil +} + +func Convert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(in *apiextensions.CustomResourceConversion, out *CustomResourceConversion, s conversion.Scope) error { + if err := autoConvert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(in, out, s); err != nil { + return err + } + + out.Webhook = nil + if in.WebhookClientConfig != nil || in.ConversionReviewVersions != nil { + out.Webhook = &WebhookConversion{} + out.Webhook.ConversionReviewVersions = in.ConversionReviewVersions + if in.WebhookClientConfig != nil { + out.Webhook.ClientConfig = &WebhookClientConfig{} + if err := Convert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig(in.WebhookClientConfig, out.Webhook.ClientConfig, s); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/deepcopy.go new file mode 100644 index 000000000..b8c44c696 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/deepcopy.go @@ -0,0 +1,248 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// TODO: Update this after a tag is created for interface fields in DeepCopy +func (in *JSONSchemaProps) DeepCopy() *JSONSchemaProps { + if in == nil { + return nil + } + out := new(JSONSchemaProps) + *out = *in + + if in.Ref != nil { + in, out := &in.Ref, &out.Ref + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + + if in.Maximum != nil { + in, out := &in.Maximum, &out.Maximum + if *in == nil { + *out = nil + } else { + *out = new(float64) + **out = **in + } + } + + if in.Minimum != nil { + in, out := &in.Minimum, &out.Minimum + if *in == nil { + *out = nil + } else { + *out = new(float64) + **out = **in + } + } + + if in.MaxLength != nil { + in, out := &in.MaxLength, &out.MaxLength + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MinLength != nil { + in, out := &in.MinLength, &out.MinLength + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.MaxItems != nil { + in, out := &in.MaxItems, &out.MaxItems + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MinItems != nil { + in, out := &in.MinItems, &out.MinItems + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MultipleOf != nil { + in, out := &in.MultipleOf, &out.MultipleOf + if *in == nil { + *out = nil + } else { + *out = new(float64) + **out = **in + } + } + + if in.MaxProperties != nil { + in, out := &in.MaxProperties, &out.MaxProperties + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MinProperties != nil { + in, out := &in.MinProperties, &out.MinProperties + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = make([]string, len(*in)) + copy(*out, *in) + } + + if in.Items != nil { + in, out := &in.Items, &out.Items + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaPropsOrArray) + (*in).DeepCopyInto(*out) + } + } + + if in.AllOf != nil { + in, out := &in.AllOf, &out.AllOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + + if in.OneOf != nil { + in, out := &in.OneOf, &out.OneOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AnyOf != nil { + in, out := &in.AnyOf, &out.AnyOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + + if in.Not != nil { + in, out := &in.Not, &out.Not + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaProps) + (*in).DeepCopyInto(*out) + } + } + + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaPropsOrBool) + (*in).DeepCopyInto(*out) + } + } + + if in.PatternProperties != nil { + in, out := &in.PatternProperties, &out.PatternProperties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.Dependencies != nil { + in, out := &in.Dependencies, &out.Dependencies + *out = make(JSONSchemaDependencies, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.AdditionalItems != nil { + in, out := &in.AdditionalItems, &out.AdditionalItems + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaPropsOrBool) + (*in).DeepCopyInto(*out) + } + } + + if in.Definitions != nil { + in, out := &in.Definitions, &out.Definitions + *out = make(JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.ExternalDocs != nil { + in, out := &in.ExternalDocs, &out.ExternalDocs + if *in == nil { + *out = nil + } else { + *out = new(ExternalDocumentation) + (*in).DeepCopyInto(*out) + } + } + + if in.XPreserveUnknownFields != nil { + in, out := &in.XPreserveUnknownFields, &out.XPreserveUnknownFields + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + + return out +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/defaults.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/defaults.go new file mode 100644 index 000000000..5cebec927 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/defaults.go @@ -0,0 +1,61 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "strings" + + "k8s.io/apimachinery/pkg/runtime" + utilpointer "k8s.io/utils/pointer" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +func SetDefaults_CustomResourceDefinition(obj *CustomResourceDefinition) { + SetDefaults_CustomResourceDefinitionSpec(&obj.Spec) + if len(obj.Status.StoredVersions) == 0 { + for _, v := range obj.Spec.Versions { + if v.Storage { + obj.Status.StoredVersions = append(obj.Status.StoredVersions, v.Name) + break + } + } + } +} + +func SetDefaults_CustomResourceDefinitionSpec(obj *CustomResourceDefinitionSpec) { + if len(obj.Names.Singular) == 0 { + obj.Names.Singular = strings.ToLower(obj.Names.Kind) + } + if len(obj.Names.ListKind) == 0 && len(obj.Names.Kind) > 0 { + obj.Names.ListKind = obj.Names.Kind + "List" + } + if obj.Conversion == nil { + obj.Conversion = &CustomResourceConversion{ + Strategy: NoneConverter, + } + } +} + +// SetDefaults_ServiceReference sets defaults for Webhook's ServiceReference +func SetDefaults_ServiceReference(obj *ServiceReference) { + if obj.Port == nil { + obj.Port = utilpointer.Int32Ptr(443) + } +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go new file mode 100644 index 000000000..09d4872f8 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:conversion-gen=k8s.io/apiextensions-apiserver/pkg/apis/apiextensions +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true +// +groupName=apiextensions.k8s.io + +// Package v1 is the v1 version of the API. +package v1 // import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go new file mode 100644 index 000000000..1eba9e372 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go @@ -0,0 +1,8966 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto + +package v1 + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + runtime "k8s.io/apimachinery/pkg/runtime" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *ConversionRequest) Reset() { *m = ConversionRequest{} } +func (*ConversionRequest) ProtoMessage() {} +func (*ConversionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{0} +} +func (m *ConversionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionRequest.Merge(m, src) +} +func (m *ConversionRequest) XXX_Size() int { + return m.Size() +} +func (m *ConversionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ConversionRequest proto.InternalMessageInfo + +func (m *ConversionResponse) Reset() { *m = ConversionResponse{} } +func (*ConversionResponse) ProtoMessage() {} +func (*ConversionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{1} +} +func (m *ConversionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionResponse.Merge(m, src) +} +func (m *ConversionResponse) XXX_Size() int { + return m.Size() +} +func (m *ConversionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ConversionResponse proto.InternalMessageInfo + +func (m *ConversionReview) Reset() { *m = ConversionReview{} } +func (*ConversionReview) ProtoMessage() {} +func (*ConversionReview) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{2} +} +func (m *ConversionReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionReview.Merge(m, src) +} +func (m *ConversionReview) XXX_Size() int { + return m.Size() +} +func (m *ConversionReview) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionReview.DiscardUnknown(m) +} + +var xxx_messageInfo_ConversionReview proto.InternalMessageInfo + +func (m *CustomResourceColumnDefinition) Reset() { *m = CustomResourceColumnDefinition{} } +func (*CustomResourceColumnDefinition) ProtoMessage() {} +func (*CustomResourceColumnDefinition) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{3} +} +func (m *CustomResourceColumnDefinition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceColumnDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceColumnDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceColumnDefinition.Merge(m, src) +} +func (m *CustomResourceColumnDefinition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceColumnDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceColumnDefinition.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceColumnDefinition proto.InternalMessageInfo + +func (m *CustomResourceConversion) Reset() { *m = CustomResourceConversion{} } +func (*CustomResourceConversion) ProtoMessage() {} +func (*CustomResourceConversion) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{4} +} +func (m *CustomResourceConversion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceConversion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceConversion) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceConversion.Merge(m, src) +} +func (m *CustomResourceConversion) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceConversion) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceConversion.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceConversion proto.InternalMessageInfo + +func (m *CustomResourceDefinition) Reset() { *m = CustomResourceDefinition{} } +func (*CustomResourceDefinition) ProtoMessage() {} +func (*CustomResourceDefinition) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{5} +} +func (m *CustomResourceDefinition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinition.Merge(m, src) +} +func (m *CustomResourceDefinition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinition.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinition proto.InternalMessageInfo + +func (m *CustomResourceDefinitionCondition) Reset() { *m = CustomResourceDefinitionCondition{} } +func (*CustomResourceDefinitionCondition) ProtoMessage() {} +func (*CustomResourceDefinitionCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{6} +} +func (m *CustomResourceDefinitionCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionCondition.Merge(m, src) +} +func (m *CustomResourceDefinitionCondition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionCondition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionCondition proto.InternalMessageInfo + +func (m *CustomResourceDefinitionList) Reset() { *m = CustomResourceDefinitionList{} } +func (*CustomResourceDefinitionList) ProtoMessage() {} +func (*CustomResourceDefinitionList) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{7} +} +func (m *CustomResourceDefinitionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionList.Merge(m, src) +} +func (m *CustomResourceDefinitionList) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionList) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionList.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionList proto.InternalMessageInfo + +func (m *CustomResourceDefinitionNames) Reset() { *m = CustomResourceDefinitionNames{} } +func (*CustomResourceDefinitionNames) ProtoMessage() {} +func (*CustomResourceDefinitionNames) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{8} +} +func (m *CustomResourceDefinitionNames) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionNames) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionNames.Merge(m, src) +} +func (m *CustomResourceDefinitionNames) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionNames) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionNames.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionNames proto.InternalMessageInfo + +func (m *CustomResourceDefinitionSpec) Reset() { *m = CustomResourceDefinitionSpec{} } +func (*CustomResourceDefinitionSpec) ProtoMessage() {} +func (*CustomResourceDefinitionSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{9} +} +func (m *CustomResourceDefinitionSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionSpec.Merge(m, src) +} +func (m *CustomResourceDefinitionSpec) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionSpec proto.InternalMessageInfo + +func (m *CustomResourceDefinitionStatus) Reset() { *m = CustomResourceDefinitionStatus{} } +func (*CustomResourceDefinitionStatus) ProtoMessage() {} +func (*CustomResourceDefinitionStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{10} +} +func (m *CustomResourceDefinitionStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionStatus.Merge(m, src) +} +func (m *CustomResourceDefinitionStatus) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionStatus proto.InternalMessageInfo + +func (m *CustomResourceDefinitionVersion) Reset() { *m = CustomResourceDefinitionVersion{} } +func (*CustomResourceDefinitionVersion) ProtoMessage() {} +func (*CustomResourceDefinitionVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{11} +} +func (m *CustomResourceDefinitionVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionVersion.Merge(m, src) +} +func (m *CustomResourceDefinitionVersion) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionVersion) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionVersion proto.InternalMessageInfo + +func (m *CustomResourceSubresourceScale) Reset() { *m = CustomResourceSubresourceScale{} } +func (*CustomResourceSubresourceScale) ProtoMessage() {} +func (*CustomResourceSubresourceScale) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{12} +} +func (m *CustomResourceSubresourceScale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresourceScale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceSubresourceScale) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresourceScale.Merge(m, src) +} +func (m *CustomResourceSubresourceScale) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresourceScale) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresourceScale.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceSubresourceScale proto.InternalMessageInfo + +func (m *CustomResourceSubresourceStatus) Reset() { *m = CustomResourceSubresourceStatus{} } +func (*CustomResourceSubresourceStatus) ProtoMessage() {} +func (*CustomResourceSubresourceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{13} +} +func (m *CustomResourceSubresourceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceSubresourceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresourceStatus.Merge(m, src) +} +func (m *CustomResourceSubresourceStatus) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresourceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresourceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceSubresourceStatus proto.InternalMessageInfo + +func (m *CustomResourceSubresources) Reset() { *m = CustomResourceSubresources{} } +func (*CustomResourceSubresources) ProtoMessage() {} +func (*CustomResourceSubresources) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{14} +} +func (m *CustomResourceSubresources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceSubresources) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresources.Merge(m, src) +} +func (m *CustomResourceSubresources) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresources) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresources.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceSubresources proto.InternalMessageInfo + +func (m *CustomResourceValidation) Reset() { *m = CustomResourceValidation{} } +func (*CustomResourceValidation) ProtoMessage() {} +func (*CustomResourceValidation) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{15} +} +func (m *CustomResourceValidation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceValidation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceValidation) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceValidation.Merge(m, src) +} +func (m *CustomResourceValidation) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceValidation) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceValidation.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceValidation proto.InternalMessageInfo + +func (m *ExternalDocumentation) Reset() { *m = ExternalDocumentation{} } +func (*ExternalDocumentation) ProtoMessage() {} +func (*ExternalDocumentation) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{16} +} +func (m *ExternalDocumentation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalDocumentation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalDocumentation) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalDocumentation.Merge(m, src) +} +func (m *ExternalDocumentation) XXX_Size() int { + return m.Size() +} +func (m *ExternalDocumentation) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalDocumentation.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalDocumentation proto.InternalMessageInfo + +func (m *JSON) Reset() { *m = JSON{} } +func (*JSON) ProtoMessage() {} +func (*JSON) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{17} +} +func (m *JSON) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSON) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSON) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSON.Merge(m, src) +} +func (m *JSON) XXX_Size() int { + return m.Size() +} +func (m *JSON) XXX_DiscardUnknown() { + xxx_messageInfo_JSON.DiscardUnknown(m) +} + +var xxx_messageInfo_JSON proto.InternalMessageInfo + +func (m *JSONSchemaProps) Reset() { *m = JSONSchemaProps{} } +func (*JSONSchemaProps) ProtoMessage() {} +func (*JSONSchemaProps) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{18} +} +func (m *JSONSchemaProps) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaProps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaProps) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaProps.Merge(m, src) +} +func (m *JSONSchemaProps) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaProps) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaProps.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaProps proto.InternalMessageInfo + +func (m *JSONSchemaPropsOrArray) Reset() { *m = JSONSchemaPropsOrArray{} } +func (*JSONSchemaPropsOrArray) ProtoMessage() {} +func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{19} +} +func (m *JSONSchemaPropsOrArray) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrArray.Merge(m, src) +} +func (m *JSONSchemaPropsOrArray) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrArray) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrArray.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaPropsOrArray proto.InternalMessageInfo + +func (m *JSONSchemaPropsOrBool) Reset() { *m = JSONSchemaPropsOrBool{} } +func (*JSONSchemaPropsOrBool) ProtoMessage() {} +func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{20} +} +func (m *JSONSchemaPropsOrBool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrBool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrBool) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrBool.Merge(m, src) +} +func (m *JSONSchemaPropsOrBool) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrBool) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrBool.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaPropsOrBool proto.InternalMessageInfo + +func (m *JSONSchemaPropsOrStringArray) Reset() { *m = JSONSchemaPropsOrStringArray{} } +func (*JSONSchemaPropsOrStringArray) ProtoMessage() {} +func (*JSONSchemaPropsOrStringArray) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{21} +} +func (m *JSONSchemaPropsOrStringArray) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrStringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrStringArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrStringArray.Merge(m, src) +} +func (m *JSONSchemaPropsOrStringArray) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrStringArray) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrStringArray.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaPropsOrStringArray proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{22} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{23} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo + +func (m *WebhookConversion) Reset() { *m = WebhookConversion{} } +func (*WebhookConversion) ProtoMessage() {} +func (*WebhookConversion) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{24} +} +func (m *WebhookConversion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookConversion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookConversion) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookConversion.Merge(m, src) +} +func (m *WebhookConversion) XXX_Size() int { + return m.Size() +} +func (m *WebhookConversion) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookConversion.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookConversion proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ConversionRequest)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ConversionRequest") + proto.RegisterType((*ConversionResponse)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ConversionResponse") + proto.RegisterType((*ConversionReview)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ConversionReview") + proto.RegisterType((*CustomResourceColumnDefinition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceColumnDefinition") + proto.RegisterType((*CustomResourceConversion)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceConversion") + proto.RegisterType((*CustomResourceDefinition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition") + proto.RegisterType((*CustomResourceDefinitionCondition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionCondition") + proto.RegisterType((*CustomResourceDefinitionList)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionList") + proto.RegisterType((*CustomResourceDefinitionNames)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionNames") + proto.RegisterType((*CustomResourceDefinitionSpec)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionSpec") + proto.RegisterType((*CustomResourceDefinitionStatus)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionStatus") + proto.RegisterType((*CustomResourceDefinitionVersion)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionVersion") + proto.RegisterType((*CustomResourceSubresourceScale)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceScale") + proto.RegisterType((*CustomResourceSubresourceStatus)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceStatus") + proto.RegisterType((*CustomResourceSubresources)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresources") + proto.RegisterType((*CustomResourceValidation)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceValidation") + proto.RegisterType((*ExternalDocumentation)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ExternalDocumentation") + proto.RegisterType((*JSON)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSON") + proto.RegisterType((*JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps") + proto.RegisterMapType((JSONSchemaDefinitions)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps.DefinitionsEntry") + proto.RegisterMapType((JSONSchemaDependencies)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps.DependenciesEntry") + proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps.PatternPropertiesEntry") + proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps.PropertiesEntry") + proto.RegisterType((*JSONSchemaPropsOrArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrArray") + proto.RegisterType((*JSONSchemaPropsOrBool)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrBool") + proto.RegisterType((*JSONSchemaPropsOrStringArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrStringArray") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ServiceReference") + proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.WebhookClientConfig") + proto.RegisterType((*WebhookConversion)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.WebhookConversion") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto", fileDescriptor_f5a35c9667703937) +} + +var fileDescriptor_f5a35c9667703937 = []byte{ + // 2919 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0xcd, 0x6f, 0x24, 0x47, + 0x15, 0xdf, 0x1e, 0x7f, 0x8d, 0xcb, 0xf6, 0xda, 0xae, 0x5d, 0x9b, 0x5e, 0x67, 0xd7, 0xe3, 0x9d, + 0x90, 0xe0, 0x84, 0xcd, 0x38, 0xbb, 0x24, 0x24, 0xe4, 0x00, 0xf2, 0xd8, 0x4e, 0x70, 0xd6, 0x5e, + 0x5b, 0x35, 0xbb, 0x1b, 0x27, 0x41, 0x4a, 0xca, 0xdd, 0xe5, 0x71, 0xc7, 0xfd, 0xb5, 0x5d, 0xdd, + 0x63, 0x5b, 0x02, 0x29, 0x02, 0x45, 0x40, 0x24, 0x08, 0x07, 0x04, 0xe2, 0x80, 0x10, 0x42, 0x39, + 0xc0, 0x01, 0x6e, 0xf0, 0x2f, 0xe4, 0x82, 0x94, 0x03, 0x42, 0x91, 0x90, 0x46, 0x64, 0xf8, 0x13, + 0x00, 0x21, 0x7c, 0x40, 0xa8, 0x3e, 0xba, 0xba, 0xa6, 0x67, 0x66, 0x77, 0xb5, 0x1e, 0x27, 0xb7, + 0x99, 0xf7, 0xf5, 0x7b, 0xf5, 0xea, 0xd5, 0xab, 0xf7, 0x6a, 0x06, 0xe0, 0x83, 0x17, 0x69, 0xc5, + 0x09, 0x96, 0x0e, 0x92, 0x5d, 0x12, 0xf9, 0x24, 0x26, 0x74, 0xa9, 0x41, 0x7c, 0x3b, 0x88, 0x96, + 0x24, 0x03, 0x87, 0x0e, 0x39, 0x8a, 0x89, 0x4f, 0x9d, 0xc0, 0xa7, 0xcf, 0xe0, 0xd0, 0xa1, 0x24, + 0x6a, 0x90, 0x68, 0x29, 0x3c, 0xa8, 0x33, 0x1e, 0x6d, 0x17, 0x58, 0x6a, 0x5c, 0x5f, 0xaa, 0x13, + 0x9f, 0x44, 0x38, 0x26, 0x76, 0x25, 0x8c, 0x82, 0x38, 0x80, 0x2f, 0x0a, 0x4b, 0x95, 0x36, 0xc1, + 0xb7, 0x94, 0xa5, 0x4a, 0x78, 0x50, 0x67, 0x3c, 0xda, 0x2e, 0x50, 0x69, 0x5c, 0x9f, 0x7b, 0xa6, + 0xee, 0xc4, 0xfb, 0xc9, 0x6e, 0xc5, 0x0a, 0xbc, 0xa5, 0x7a, 0x50, 0x0f, 0x96, 0xb8, 0xc1, 0xdd, + 0x64, 0x8f, 0x7f, 0xe3, 0x5f, 0xf8, 0x27, 0x01, 0x34, 0xf7, 0x5c, 0xe6, 0xb2, 0x87, 0xad, 0x7d, + 0xc7, 0x27, 0xd1, 0x71, 0xe6, 0xa7, 0x47, 0x62, 0xdc, 0xc5, 0xbd, 0xb9, 0xa5, 0x5e, 0x5a, 0x51, + 0xe2, 0xc7, 0x8e, 0x47, 0x3a, 0x14, 0xbe, 0xfa, 0x20, 0x05, 0x6a, 0xed, 0x13, 0x0f, 0xe7, 0xf5, + 0xca, 0x27, 0x06, 0x98, 0x5e, 0x09, 0xfc, 0x06, 0x89, 0xd8, 0x02, 0x11, 0xb9, 0x97, 0x10, 0x1a, + 0xc3, 0x2a, 0x18, 0x48, 0x1c, 0xdb, 0x34, 0x16, 0x8c, 0xc5, 0xd1, 0xea, 0xb3, 0x1f, 0x35, 0x4b, + 0xe7, 0x5a, 0xcd, 0xd2, 0xc0, 0x9d, 0xf5, 0xd5, 0x93, 0x66, 0xe9, 0x6a, 0x2f, 0xa4, 0xf8, 0x38, + 0x24, 0xb4, 0x72, 0x67, 0x7d, 0x15, 0x31, 0x65, 0xf8, 0x0a, 0x98, 0xb6, 0x09, 0x75, 0x22, 0x62, + 0x2f, 0x6f, 0xaf, 0xdf, 0x15, 0xf6, 0xcd, 0x02, 0xb7, 0x78, 0x49, 0x5a, 0x9c, 0x5e, 0xcd, 0x0b, + 0xa0, 0x4e, 0x1d, 0xb8, 0x03, 0x46, 0x82, 0xdd, 0x77, 0x88, 0x15, 0x53, 0x73, 0x60, 0x61, 0x60, + 0x71, 0xec, 0xc6, 0x33, 0x95, 0x6c, 0xf3, 0x94, 0x0b, 0x7c, 0xc7, 0xe4, 0x62, 0x2b, 0x08, 0x1f, + 0xae, 0xa5, 0x9b, 0x56, 0x9d, 0x94, 0x68, 0x23, 0x5b, 0xc2, 0x0a, 0x4a, 0xcd, 0x95, 0x7f, 0x53, + 0x00, 0x50, 0x5f, 0x3c, 0x0d, 0x03, 0x9f, 0x92, 0xbe, 0xac, 0x9e, 0x82, 0x29, 0x8b, 0x5b, 0x8e, + 0x89, 0x2d, 0x71, 0xcd, 0xc2, 0xa3, 0x78, 0x6f, 0x4a, 0xfc, 0xa9, 0x95, 0x9c, 0x39, 0xd4, 0x01, + 0x00, 0x6f, 0x83, 0xe1, 0x88, 0xd0, 0xc4, 0x8d, 0xcd, 0x81, 0x05, 0x63, 0x71, 0xec, 0xc6, 0xb5, + 0x9e, 0x50, 0x3c, 0xb5, 0x59, 0xf2, 0x55, 0x1a, 0xd7, 0x2b, 0xb5, 0x18, 0xc7, 0x09, 0xad, 0x9e, + 0x97, 0x48, 0xc3, 0x88, 0xdb, 0x40, 0xd2, 0x56, 0xf9, 0x7f, 0x06, 0x98, 0xd2, 0xa3, 0xd4, 0x70, + 0xc8, 0x21, 0x8c, 0xc0, 0x48, 0x24, 0x92, 0x85, 0xc7, 0x69, 0xec, 0xc6, 0xcd, 0xca, 0xa3, 0x9e, + 0xa8, 0x4a, 0x47, 0xfe, 0x55, 0xc7, 0xd8, 0x76, 0xc9, 0x2f, 0x28, 0x05, 0x82, 0x0d, 0x50, 0x8c, + 0xe4, 0x1e, 0xf1, 0x44, 0x1a, 0xbb, 0xb1, 0xd1, 0x1f, 0x50, 0x61, 0xb3, 0x3a, 0xde, 0x6a, 0x96, + 0x8a, 0xe9, 0x37, 0xa4, 0xb0, 0xca, 0xbf, 0x2a, 0x80, 0xf9, 0x95, 0x84, 0xc6, 0x81, 0x87, 0x08, + 0x0d, 0x92, 0xc8, 0x22, 0x2b, 0x81, 0x9b, 0x78, 0xfe, 0x2a, 0xd9, 0x73, 0x7c, 0x27, 0x66, 0x39, + 0xba, 0x00, 0x06, 0x7d, 0xec, 0x11, 0x99, 0x33, 0xe3, 0x32, 0x92, 0x83, 0xb7, 0xb0, 0x47, 0x10, + 0xe7, 0x30, 0x09, 0x96, 0x22, 0xf2, 0x04, 0x28, 0x89, 0xdb, 0xc7, 0x21, 0x41, 0x9c, 0x03, 0x9f, + 0x04, 0xc3, 0x7b, 0x41, 0xe4, 0x61, 0xb1, 0x7b, 0xa3, 0xd9, 0x7e, 0xbc, 0xcc, 0xa9, 0x48, 0x72, + 0xe1, 0xf3, 0x60, 0xcc, 0x26, 0xd4, 0x8a, 0x9c, 0x90, 0x41, 0x9b, 0x83, 0x5c, 0xf8, 0x82, 0x14, + 0x1e, 0x5b, 0xcd, 0x58, 0x48, 0x97, 0x83, 0xd7, 0x40, 0x31, 0x8c, 0x9c, 0x20, 0x72, 0xe2, 0x63, + 0x73, 0x68, 0xc1, 0x58, 0x1c, 0xaa, 0x4e, 0x49, 0x9d, 0xe2, 0xb6, 0xa4, 0x23, 0x25, 0xc1, 0xa4, + 0xdf, 0xa1, 0x81, 0xbf, 0x8d, 0xe3, 0x7d, 0x73, 0x98, 0x23, 0x28, 0xe9, 0x57, 0x6b, 0x5b, 0xb7, + 0x18, 0x1d, 0x29, 0x89, 0xf2, 0x5f, 0x0d, 0x60, 0xe6, 0x23, 0x94, 0x86, 0x17, 0xbe, 0x0c, 0x8a, + 0x34, 0x66, 0x35, 0xa7, 0x7e, 0x2c, 0xe3, 0xf3, 0x74, 0x6a, 0xaa, 0x26, 0xe9, 0x27, 0xcd, 0xd2, + 0x6c, 0xa6, 0x91, 0x52, 0x79, 0x6c, 0x94, 0x2e, 0x4b, 0xb9, 0x43, 0xb2, 0xbb, 0x1f, 0x04, 0x07, + 0x72, 0xf7, 0x4f, 0x91, 0x72, 0xaf, 0x09, 0x43, 0x19, 0xa6, 0x48, 0x39, 0x49, 0x46, 0x29, 0x50, + 0xf9, 0xbf, 0x85, 0xfc, 0xc2, 0xb4, 0x4d, 0x7f, 0x1b, 0x14, 0xd9, 0x11, 0xb2, 0x71, 0x8c, 0xe5, + 0x21, 0x78, 0xf6, 0xe1, 0x0e, 0x9c, 0x38, 0xaf, 0x9b, 0x24, 0xc6, 0x55, 0x28, 0x43, 0x01, 0x32, + 0x1a, 0x52, 0x56, 0xe1, 0x11, 0x18, 0xa4, 0x21, 0xb1, 0xe4, 0x7a, 0xef, 0x9e, 0x22, 0xdb, 0x7b, + 0xac, 0xa1, 0x16, 0x12, 0x2b, 0x4b, 0x46, 0xf6, 0x0d, 0x71, 0x44, 0xf8, 0xae, 0x01, 0x86, 0x29, + 0xaf, 0x0b, 0xb2, 0x96, 0xec, 0x9c, 0x01, 0x78, 0xae, 0xee, 0x88, 0xef, 0x48, 0xe2, 0x96, 0xff, + 0x55, 0x00, 0x57, 0x7b, 0xa9, 0xae, 0x04, 0xbe, 0x2d, 0x36, 0x61, 0x5d, 0x9e, 0x2b, 0x91, 0x59, + 0xcf, 0xeb, 0xe7, 0xea, 0xa4, 0x59, 0x7a, 0xe2, 0x81, 0x06, 0xb4, 0x03, 0xf8, 0x35, 0xb5, 0x64, + 0x71, 0x48, 0xaf, 0xb6, 0x3b, 0x76, 0xd2, 0x2c, 0x4d, 0x2a, 0xb5, 0x76, 0x5f, 0x61, 0x03, 0x40, + 0x17, 0xd3, 0xf8, 0x76, 0x84, 0x7d, 0x2a, 0xcc, 0x3a, 0x1e, 0x91, 0x91, 0x7b, 0xfa, 0xe1, 0x92, + 0x82, 0x69, 0x54, 0xe7, 0x24, 0x24, 0xdc, 0xe8, 0xb0, 0x86, 0xba, 0x20, 0xb0, 0x9a, 0x11, 0x11, + 0x4c, 0x55, 0x19, 0xd0, 0x6a, 0x38, 0xa3, 0x22, 0xc9, 0x85, 0x4f, 0x81, 0x11, 0x8f, 0x50, 0x8a, + 0xeb, 0x84, 0x9f, 0xfd, 0xd1, 0xec, 0x52, 0xdc, 0x14, 0x64, 0x94, 0xf2, 0xcb, 0xff, 0x36, 0xc0, + 0xe5, 0x5e, 0x51, 0xdb, 0x70, 0x68, 0x0c, 0xbf, 0xd5, 0x91, 0xf6, 0x95, 0x87, 0x5b, 0x21, 0xd3, + 0xe6, 0x49, 0xaf, 0x4a, 0x49, 0x4a, 0xd1, 0x52, 0xfe, 0x10, 0x0c, 0x39, 0x31, 0xf1, 0xd2, 0xdb, + 0x12, 0xf5, 0x3f, 0xed, 0xaa, 0x13, 0x12, 0x7e, 0x68, 0x9d, 0x01, 0x21, 0x81, 0x57, 0xfe, 0xb0, + 0x00, 0xae, 0xf4, 0x52, 0x61, 0x75, 0x9c, 0xb2, 0x60, 0x87, 0x6e, 0x12, 0x61, 0x57, 0x26, 0x9b, + 0x0a, 0xf6, 0x36, 0xa7, 0x22, 0xc9, 0x65, 0xb5, 0x93, 0x3a, 0x7e, 0x3d, 0x71, 0x71, 0x24, 0x33, + 0x49, 0x2d, 0xb8, 0x26, 0xe9, 0x48, 0x49, 0xc0, 0x0a, 0x00, 0x74, 0x3f, 0x88, 0x62, 0x8e, 0xc1, + 0x3b, 0x9c, 0xd1, 0xea, 0x79, 0x56, 0x11, 0x6a, 0x8a, 0x8a, 0x34, 0x09, 0x76, 0x91, 0x1c, 0x38, + 0xbe, 0x2d, 0x37, 0x5c, 0x9d, 0xdd, 0x9b, 0x8e, 0x6f, 0x23, 0xce, 0x61, 0xf8, 0xae, 0x43, 0x63, + 0x46, 0x91, 0xbb, 0xdd, 0x16, 0x70, 0x2e, 0xa9, 0x24, 0x18, 0xbe, 0xc5, 0x0a, 0x6c, 0x10, 0x39, + 0x84, 0x9a, 0xc3, 0x19, 0xfe, 0x8a, 0xa2, 0x22, 0x4d, 0xa2, 0xfc, 0xb7, 0xc1, 0xde, 0xf9, 0xc1, + 0x0a, 0x08, 0x7c, 0x1c, 0x0c, 0xd5, 0xa3, 0x20, 0x09, 0x65, 0x94, 0x54, 0xb4, 0x5f, 0x61, 0x44, + 0x24, 0x78, 0xf0, 0xdb, 0x60, 0xc8, 0x97, 0x0b, 0x66, 0x19, 0xf4, 0x5a, 0xff, 0xb7, 0x99, 0x47, + 0x2b, 0x43, 0x17, 0x81, 0x14, 0xa0, 0xf0, 0x39, 0x30, 0x44, 0xad, 0x20, 0x24, 0x32, 0x88, 0xf3, + 0xa9, 0x50, 0x8d, 0x11, 0x4f, 0x9a, 0xa5, 0x89, 0xd4, 0x1c, 0x27, 0x20, 0x21, 0x0c, 0xbf, 0x6f, + 0x80, 0xa2, 0xbc, 0x2e, 0xa8, 0x39, 0xc2, 0xd3, 0xf3, 0xf5, 0xfe, 0xfb, 0x2d, 0xdb, 0xde, 0x6c, + 0xcf, 0x24, 0x81, 0x22, 0x05, 0x0e, 0xbf, 0x6b, 0x00, 0x60, 0xa9, 0xbb, 0xcb, 0x1c, 0xe5, 0x31, + 0xec, 0xdb, 0x51, 0xd1, 0x6e, 0x45, 0x91, 0x08, 0x59, 0xab, 0xa4, 0xa1, 0xc2, 0x1a, 0x98, 0x09, + 0x23, 0xc2, 0x6d, 0xdf, 0xf1, 0x0f, 0xfc, 0xe0, 0xd0, 0x7f, 0xd9, 0x21, 0xae, 0x4d, 0x4d, 0xb0, + 0x60, 0x2c, 0x16, 0xab, 0x57, 0xa4, 0xff, 0x33, 0xdb, 0xdd, 0x84, 0x50, 0x77, 0xdd, 0xf2, 0x7b, + 0x03, 0xf9, 0x5e, 0x2b, 0x7f, 0x5f, 0xc0, 0x0f, 0xc4, 0xe2, 0x45, 0x1d, 0xa6, 0xa6, 0xc1, 0x37, + 0xe2, 0xcd, 0xfe, 0x6f, 0x84, 0xaa, 0xf5, 0xd9, 0x25, 0xad, 0x48, 0x14, 0x69, 0x2e, 0xc0, 0x9f, + 0x1a, 0x60, 0x02, 0x5b, 0x16, 0x09, 0x63, 0x62, 0x8b, 0x63, 0x5c, 0x38, 0xdb, 0xac, 0x9e, 0x91, + 0x0e, 0x4d, 0x2c, 0xeb, 0xa8, 0xa8, 0xdd, 0x09, 0xf8, 0x12, 0x38, 0x4f, 0xe3, 0x20, 0x22, 0x76, + 0x9a, 0x41, 0xb2, 0xba, 0xc0, 0x56, 0xb3, 0x74, 0xbe, 0xd6, 0xc6, 0x41, 0x39, 0xc9, 0xf2, 0x5f, + 0x06, 0x41, 0xe9, 0x01, 0x19, 0xfa, 0x10, 0x4d, 0xef, 0x93, 0x60, 0x98, 0xaf, 0xd4, 0xe6, 0x01, + 0x29, 0x6a, 0x57, 0x3d, 0xa7, 0x22, 0xc9, 0x65, 0xd7, 0x13, 0xc3, 0x67, 0xd7, 0xd3, 0x00, 0x17, + 0x54, 0xd7, 0x53, 0x4d, 0x90, 0x51, 0xca, 0x87, 0x0d, 0x30, 0x2c, 0x46, 0x59, 0x7e, 0x76, 0xfb, + 0x98, 0xf5, 0x77, 0xb1, 0xeb, 0xd8, 0x98, 0xef, 0x37, 0xe0, 0x2e, 0x72, 0x14, 0x24, 0xd1, 0xe0, + 0xfb, 0x06, 0x18, 0xa7, 0xc9, 0x6e, 0x24, 0xa5, 0x29, 0xaf, 0xac, 0x63, 0x37, 0x6e, 0xf7, 0x0b, + 0xbe, 0xa6, 0xd9, 0xae, 0x4e, 0xb5, 0x9a, 0xa5, 0x71, 0x9d, 0x82, 0xda, 0xb0, 0xe1, 0x1f, 0x0d, + 0x60, 0x62, 0x5b, 0xa4, 0x1f, 0x76, 0xb7, 0x23, 0xc7, 0x8f, 0x49, 0x24, 0x86, 0x12, 0x51, 0xc2, + 0xfb, 0xd8, 0xaf, 0xe5, 0x67, 0x9d, 0xea, 0x82, 0xdc, 0x1b, 0x73, 0xb9, 0x87, 0x07, 0xa8, 0xa7, + 0x6f, 0xe5, 0xff, 0x18, 0xf9, 0xe3, 0xad, 0xad, 0xb2, 0x66, 0x61, 0x97, 0xc0, 0x55, 0x30, 0xc5, + 0x3a, 0x50, 0x44, 0x42, 0xd7, 0xb1, 0x30, 0xe5, 0x13, 0x88, 0xc8, 0x30, 0x35, 0x0a, 0xd7, 0x72, + 0x7c, 0xd4, 0xa1, 0x01, 0x5f, 0x05, 0x50, 0xb4, 0x66, 0x6d, 0x76, 0xc4, 0x6d, 0xac, 0x9a, 0xac, + 0x5a, 0x87, 0x04, 0xea, 0xa2, 0x05, 0x57, 0xc0, 0xb4, 0x8b, 0x77, 0x89, 0x5b, 0x23, 0x2e, 0xb1, + 0xe2, 0x20, 0xe2, 0xa6, 0xc4, 0x8c, 0x36, 0xd3, 0x6a, 0x96, 0xa6, 0x37, 0xf2, 0x4c, 0xd4, 0x29, + 0x5f, 0xbe, 0x9a, 0x3f, 0x4f, 0xfa, 0xc2, 0x45, 0xc3, 0xfb, 0xb3, 0x02, 0x98, 0xeb, 0x9d, 0x14, + 0xf0, 0x3b, 0xaa, 0x3d, 0x15, 0x5d, 0xd7, 0xeb, 0x67, 0x90, 0x7a, 0xb2, 0x25, 0x07, 0x9d, 0xed, + 0x38, 0x3c, 0x66, 0x77, 0x26, 0x76, 0xd3, 0xd1, 0x7b, 0xe7, 0x2c, 0xd0, 0x99, 0xfd, 0xea, 0xa8, + 0xb8, 0x89, 0xb1, 0xcb, 0x2f, 0x5e, 0xec, 0x92, 0xf2, 0x87, 0x1d, 0xe3, 0x65, 0x76, 0x58, 0xe1, + 0x0f, 0x0c, 0x30, 0x19, 0x84, 0xc4, 0x5f, 0xde, 0x5e, 0xbf, 0xfb, 0x15, 0x71, 0x68, 0x65, 0x80, + 0xd6, 0x1f, 0xdd, 0x45, 0x36, 0xe3, 0x0a, 0x5b, 0xdb, 0x51, 0x10, 0xd2, 0xea, 0x85, 0x56, 0xb3, + 0x34, 0xb9, 0xd5, 0x8e, 0x82, 0xf2, 0xb0, 0x65, 0x0f, 0xcc, 0xac, 0x1d, 0xc5, 0x24, 0xf2, 0xb1, + 0xbb, 0x1a, 0x58, 0x89, 0x47, 0xfc, 0x58, 0xf8, 0x98, 0x1b, 0xd9, 0x8d, 0x87, 0x1c, 0xd9, 0xaf, + 0x80, 0x81, 0x24, 0x72, 0x65, 0xd6, 0x8e, 0xa9, 0x87, 0x28, 0xb4, 0x81, 0x18, 0xbd, 0x7c, 0x15, + 0x0c, 0x32, 0x3f, 0xe1, 0x25, 0x30, 0x10, 0xe1, 0x43, 0x6e, 0x75, 0xbc, 0x3a, 0xc2, 0x44, 0x10, + 0x3e, 0x44, 0x8c, 0x56, 0xfe, 0x45, 0x09, 0x4c, 0xe6, 0xd6, 0x02, 0xe7, 0x40, 0x41, 0xbd, 0x6e, + 0x01, 0x69, 0xb4, 0xb0, 0xbe, 0x8a, 0x0a, 0x8e, 0x0d, 0x5f, 0x50, 0xd5, 0x55, 0x80, 0x96, 0x54, + 0xc1, 0xe6, 0x54, 0xd6, 0x1a, 0x65, 0xe6, 0x98, 0x23, 0x69, 0x79, 0x64, 0x3e, 0x90, 0x3d, 0x79, + 0x2a, 0x84, 0x0f, 0x64, 0x0f, 0x31, 0xda, 0xa3, 0xbe, 0x57, 0xa4, 0x0f, 0x26, 0x43, 0x0f, 0xf1, + 0x60, 0x32, 0x7c, 0xdf, 0x07, 0x93, 0xc7, 0xc1, 0x50, 0xec, 0xc4, 0x2e, 0x31, 0x47, 0xda, 0x1b, + 0xd2, 0xdb, 0x8c, 0x88, 0x04, 0x0f, 0x12, 0x30, 0x62, 0x93, 0x3d, 0x9c, 0xb8, 0xb1, 0x59, 0xe4, + 0xd9, 0xf3, 0xf5, 0xd3, 0x65, 0x8f, 0x78, 0x50, 0x58, 0x15, 0x26, 0x51, 0x6a, 0x1b, 0x3e, 0x01, + 0x46, 0x3c, 0x7c, 0xe4, 0x78, 0x89, 0xc7, 0xbb, 0x36, 0x43, 0x88, 0x6d, 0x0a, 0x12, 0x4a, 0x79, + 0xac, 0x08, 0x92, 0x23, 0xcb, 0x4d, 0xa8, 0xd3, 0x20, 0x92, 0x29, 0xdb, 0x2a, 0x55, 0x04, 0xd7, + 0x72, 0x7c, 0xd4, 0xa1, 0xc1, 0xc1, 0x1c, 0x9f, 0x2b, 0x8f, 0x69, 0x60, 0x82, 0x84, 0x52, 0x5e, + 0x3b, 0x98, 0x94, 0x1f, 0xef, 0x05, 0x26, 0x95, 0x3b, 0x34, 0xe0, 0x97, 0xc1, 0xa8, 0x87, 0x8f, + 0x36, 0x88, 0x5f, 0x8f, 0xf7, 0xcd, 0x89, 0x05, 0x63, 0x71, 0xa0, 0x3a, 0xd1, 0x6a, 0x96, 0x46, + 0x37, 0x53, 0x22, 0xca, 0xf8, 0x5c, 0xd8, 0xf1, 0xa5, 0xf0, 0x79, 0x4d, 0x38, 0x25, 0xa2, 0x8c, + 0xcf, 0xba, 0x83, 0x10, 0xc7, 0xec, 0x5c, 0x99, 0x93, 0xed, 0xc3, 0xeb, 0xb6, 0x20, 0xa3, 0x94, + 0x0f, 0x17, 0x41, 0xd1, 0xc3, 0x47, 0x7c, 0xae, 0x33, 0xa7, 0xb8, 0x59, 0xfe, 0xa8, 0xb7, 0x29, + 0x69, 0x48, 0x71, 0xb9, 0xa4, 0xe3, 0x0b, 0xc9, 0x69, 0x4d, 0x52, 0xd2, 0x90, 0xe2, 0xb2, 0xfc, + 0x4d, 0x7c, 0xe7, 0x5e, 0x42, 0x84, 0x30, 0xe4, 0x91, 0x51, 0xf9, 0x7b, 0x27, 0x63, 0x21, 0x5d, + 0x8e, 0xcd, 0x55, 0x5e, 0xe2, 0xc6, 0x4e, 0xe8, 0x92, 0xad, 0x3d, 0xf3, 0x02, 0x8f, 0x3f, 0x6f, + 0xa7, 0x37, 0x15, 0x15, 0x69, 0x12, 0xf0, 0x6d, 0x30, 0x48, 0xfc, 0xc4, 0x33, 0x2f, 0xf2, 0xeb, + 0xfb, 0xb4, 0xd9, 0xa7, 0xce, 0xcb, 0x9a, 0x9f, 0x78, 0x88, 0x5b, 0x86, 0x2f, 0x80, 0x09, 0x0f, + 0x1f, 0xb1, 0x22, 0x40, 0xa2, 0x98, 0x0d, 0x7b, 0x33, 0x7c, 0xdd, 0xd3, 0xac, 0x91, 0xdc, 0xd4, + 0x19, 0xa8, 0x5d, 0x8e, 0x2b, 0x3a, 0xbe, 0xa6, 0x38, 0xab, 0x29, 0xea, 0x0c, 0xd4, 0x2e, 0xc7, + 0x82, 0x1c, 0x91, 0x7b, 0x89, 0x13, 0x11, 0xdb, 0xfc, 0x02, 0xef, 0x3d, 0xe5, 0x1b, 0xab, 0xa0, + 0x21, 0xc5, 0x85, 0xf7, 0xd2, 0xb1, 0xdf, 0xe4, 0x87, 0x6f, 0xbb, 0x6f, 0xa5, 0x7b, 0x2b, 0x5a, + 0x8e, 0x22, 0x7c, 0x2c, 0x6e, 0x15, 0x7d, 0xe0, 0x87, 0x3e, 0x18, 0xc2, 0xae, 0xbb, 0xb5, 0x67, + 0x5e, 0xe2, 0x11, 0xef, 0xe3, 0x6d, 0xa1, 0x2a, 0xcc, 0x32, 0xb3, 0x8f, 0x04, 0x0c, 0xc3, 0x0b, + 0x7c, 0x96, 0x0b, 0x73, 0x67, 0x86, 0xb7, 0xc5, 0xec, 0x23, 0x01, 0xc3, 0xd7, 0xe7, 0x1f, 0x6f, + 0xed, 0x99, 0x8f, 0x9d, 0xdd, 0xfa, 0x98, 0x7d, 0x24, 0x60, 0xa0, 0x0d, 0x06, 0xfc, 0x20, 0x36, + 0x2f, 0xf7, 0xfb, 0xee, 0xe5, 0xb7, 0xc9, 0xad, 0x20, 0x46, 0xcc, 0x3c, 0xfc, 0x91, 0x01, 0x40, + 0x98, 0x65, 0xe2, 0x95, 0xd3, 0x8e, 0xe1, 0x39, 0xb4, 0x4a, 0x96, 0xbd, 0x6b, 0x7e, 0x1c, 0x1d, + 0x67, 0xb3, 0x9f, 0x96, 0xe5, 0x9a, 0x03, 0xf0, 0x97, 0x06, 0xb8, 0xa8, 0xb7, 0xbb, 0xca, 0xb3, + 0x79, 0x1e, 0x87, 0xad, 0x3e, 0x26, 0x72, 0x35, 0x08, 0xdc, 0xaa, 0xd9, 0x6a, 0x96, 0x2e, 0x2e, + 0x77, 0x01, 0x44, 0x5d, 0xdd, 0x80, 0xbf, 0x35, 0xc0, 0xb4, 0xac, 0x8e, 0x9a, 0x73, 0x25, 0x1e, + 0xb6, 0xb7, 0xfb, 0x18, 0xb6, 0x3c, 0x84, 0x88, 0x9e, 0xfa, 0xa5, 0xaf, 0x83, 0x8f, 0x3a, 0xbd, + 0x82, 0x7f, 0x30, 0xc0, 0xb8, 0x4d, 0x42, 0xe2, 0xdb, 0xc4, 0xb7, 0x98, 0x9b, 0x0b, 0xa7, 0x9d, + 0xed, 0xf3, 0x6e, 0xae, 0x6a, 0xd6, 0x85, 0x87, 0x15, 0xe9, 0xe1, 0xb8, 0xce, 0x3a, 0x69, 0x96, + 0x66, 0x33, 0x55, 0x9d, 0x83, 0xda, 0x1c, 0x84, 0x3f, 0x36, 0xc0, 0x64, 0x16, 0x76, 0x71, 0x41, + 0x5c, 0x3d, 0x9b, 0x8d, 0xe7, 0x2d, 0xe8, 0x72, 0x3b, 0x16, 0xca, 0x83, 0xc3, 0xdf, 0x19, 0xac, + 0xdb, 0x4a, 0x67, 0x35, 0x6a, 0x96, 0x79, 0x04, 0xdf, 0xe8, 0x67, 0x04, 0x95, 0x71, 0x11, 0xc0, + 0x6b, 0x59, 0x27, 0xa7, 0x38, 0x27, 0xcd, 0xd2, 0x8c, 0x1e, 0x3f, 0xc5, 0x40, 0xba, 0x73, 0xf0, + 0x3d, 0x03, 0x8c, 0x93, 0xac, 0x61, 0xa6, 0xe6, 0xe3, 0xa7, 0x0d, 0x5d, 0xd7, 0xf6, 0x5b, 0x8c, + 0xd3, 0x1a, 0x8b, 0xa2, 0x36, 0x58, 0xd6, 0xfb, 0x91, 0x23, 0xec, 0x85, 0x2e, 0x31, 0xbf, 0xd8, + 0xbf, 0xde, 0x6f, 0x4d, 0x98, 0x44, 0xa9, 0x6d, 0x78, 0x0d, 0x14, 0xfd, 0xc4, 0x75, 0xf1, 0xae, + 0x4b, 0xcc, 0x27, 0x78, 0x17, 0xa1, 0xde, 0xf8, 0x6e, 0x49, 0x3a, 0x52, 0x12, 0x70, 0x0f, 0x2c, + 0x1c, 0xdd, 0x54, 0x7f, 0x80, 0xe8, 0xfa, 0x88, 0x66, 0x3e, 0xc9, 0xad, 0xcc, 0xb5, 0x9a, 0xa5, + 0xd9, 0x9d, 0xee, 0xcf, 0x6c, 0x0f, 0xb4, 0x01, 0xdf, 0x04, 0x8f, 0x69, 0x32, 0x6b, 0xde, 0x2e, + 0xb1, 0x6d, 0x62, 0xa7, 0x83, 0x96, 0xf9, 0x25, 0x0e, 0xa1, 0xce, 0xf1, 0x4e, 0x5e, 0x00, 0xdd, + 0x4f, 0x1b, 0x6e, 0x80, 0x59, 0x8d, 0xbd, 0xee, 0xc7, 0x5b, 0x51, 0x2d, 0x8e, 0x1c, 0xbf, 0x6e, + 0x2e, 0x72, 0xbb, 0x17, 0xd3, 0xd3, 0xb7, 0xa3, 0xf1, 0x50, 0x0f, 0x1d, 0xf8, 0xcd, 0x36, 0x6b, + 0xfc, 0xc7, 0x03, 0x1c, 0xde, 0x24, 0xc7, 0xd4, 0x7c, 0x8a, 0x37, 0x17, 0x7c, 0x9f, 0x77, 0x34, + 0x3a, 0xea, 0x21, 0x0f, 0xbf, 0x01, 0x2e, 0xe4, 0x38, 0x6c, 0xae, 0x30, 0x9f, 0x16, 0x03, 0x02, + 0xeb, 0x44, 0x77, 0x52, 0x22, 0xea, 0x26, 0x39, 0xc7, 0xa6, 0xce, 0x5c, 0xb1, 0x83, 0x53, 0x60, + 0xe0, 0x80, 0xc8, 0xdf, 0x38, 0x11, 0xfb, 0x08, 0xdf, 0x02, 0x43, 0x0d, 0xec, 0x26, 0xe9, 0xcc, + 0xdc, 0xbf, 0x4b, 0x11, 0x09, 0xbb, 0x2f, 0x15, 0x5e, 0x34, 0xe6, 0x3e, 0x30, 0xc0, 0x6c, 0xf7, + 0xf2, 0xfb, 0x79, 0x79, 0xf4, 0x73, 0x03, 0x4c, 0x77, 0x54, 0xda, 0x2e, 0xce, 0xb8, 0xed, 0xce, + 0xdc, 0xed, 0x63, 0xc9, 0x14, 0x19, 0xc3, 0x5b, 0x3f, 0xdd, 0xb3, 0x1f, 0x1a, 0x60, 0x2a, 0x5f, + 0xc1, 0x3e, 0xa7, 0x28, 0x95, 0xdf, 0x2f, 0x80, 0xd9, 0xee, 0xcd, 0x2a, 0xf4, 0xd4, 0x18, 0xde, + 0xf7, 0x97, 0x8c, 0x6e, 0x6f, 0x9b, 0xef, 0x1a, 0x60, 0xec, 0x1d, 0x25, 0x97, 0xfe, 0xf4, 0xd6, + 0xcf, 0xe7, 0x93, 0xf4, 0x8e, 0xc8, 0x18, 0x14, 0xe9, 0x90, 0xe5, 0xdf, 0x1b, 0x60, 0xa6, 0xeb, + 0xbd, 0xc7, 0xa6, 0x7c, 0xec, 0xba, 0xc1, 0xa1, 0x78, 0xf6, 0xd2, 0xde, 0x90, 0x97, 0x39, 0x15, + 0x49, 0xae, 0x16, 0xb3, 0xc2, 0x67, 0x10, 0xb3, 0xf2, 0x9f, 0x0c, 0x70, 0xf9, 0x7e, 0x59, 0xf7, + 0x59, 0xef, 0xe1, 0x22, 0x28, 0xca, 0xae, 0xf4, 0x98, 0xef, 0x9f, 0x1c, 0xb5, 0x64, 0x45, 0xe0, + 0x7f, 0xed, 0x10, 0x9f, 0xca, 0xbf, 0x36, 0xc0, 0x54, 0x8d, 0x44, 0x0d, 0xc7, 0x22, 0x88, 0xec, + 0x91, 0x88, 0xf8, 0x16, 0x81, 0x4b, 0x60, 0x94, 0xff, 0x34, 0x16, 0x62, 0x2b, 0x7d, 0xd0, 0x9f, + 0x96, 0x81, 0x1e, 0xbd, 0x95, 0x32, 0x50, 0x26, 0xa3, 0x1e, 0xff, 0x0b, 0x3d, 0x1f, 0xff, 0x2f, + 0x83, 0xc1, 0x30, 0x7b, 0x29, 0x2d, 0x32, 0x2e, 0x7f, 0x1c, 0xe5, 0x54, 0xce, 0x0d, 0xa2, 0x98, + 0x3f, 0x07, 0x0d, 0x49, 0x6e, 0x10, 0xc5, 0x88, 0x53, 0xcb, 0x7f, 0x36, 0xc0, 0x85, 0xf4, 0x3f, + 0x1a, 0xae, 0x43, 0xfc, 0x78, 0x25, 0xf0, 0xf7, 0x9c, 0x3a, 0xbc, 0x24, 0x5e, 0xc4, 0xb4, 0x67, + 0xa6, 0xf4, 0x35, 0x0c, 0xde, 0x03, 0x23, 0x54, 0xac, 0x4a, 0x06, 0xfc, 0xd5, 0x47, 0x0f, 0x78, + 0x3e, 0x3c, 0xe2, 0x42, 0x4f, 0xa9, 0x29, 0x0e, 0x8b, 0xb9, 0x85, 0xab, 0x89, 0x6f, 0xcb, 0x57, + 0xd1, 0x71, 0x11, 0xf3, 0x95, 0x65, 0x41, 0x43, 0x8a, 0x5b, 0xfe, 0xa7, 0x01, 0xa6, 0x3b, 0xfe, + 0x73, 0x02, 0xbf, 0x67, 0x80, 0x71, 0x4b, 0x5b, 0x9e, 0xcc, 0xdc, 0xcd, 0xd3, 0xff, 0xaf, 0x45, + 0x33, 0x2a, 0x6e, 0x45, 0x9d, 0x82, 0xda, 0x40, 0xe1, 0x0e, 0x30, 0xad, 0xdc, 0xdf, 0xbb, 0x72, + 0x3f, 0x18, 0x5d, 0x6e, 0x35, 0x4b, 0xe6, 0x4a, 0x0f, 0x19, 0xd4, 0x53, 0xbb, 0xba, 0xf8, 0xd1, + 0xa7, 0xf3, 0xe7, 0x3e, 0xfe, 0x74, 0xfe, 0xdc, 0x27, 0x9f, 0xce, 0x9f, 0x7b, 0xb7, 0x35, 0x6f, + 0x7c, 0xd4, 0x9a, 0x37, 0x3e, 0x6e, 0xcd, 0x1b, 0x9f, 0xb4, 0xe6, 0x8d, 0xbf, 0xb7, 0xe6, 0x8d, + 0x9f, 0xfc, 0x63, 0xfe, 0xdc, 0x1b, 0x85, 0xc6, 0xf5, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0xcd, + 0xae, 0x89, 0xe9, 0xf2, 0x29, 0x00, 0x00, +} + +func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConversionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Objects) > 0 { + for iNdEx := len(m.Objects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Objects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.DesiredAPIVersion) + copy(dAtA[i:], m.DesiredAPIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DesiredAPIVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConversionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConversionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.ConvertedObjects) > 0 { + for iNdEx := len(m.ConvertedObjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ConvertedObjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConversionReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConversionReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Request != nil { + { + size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomResourceColumnDefinition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceColumnDefinition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceColumnDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.JSONPath) + copy(dAtA[i:], m.JSONPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.JSONPath))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x28 + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i -= len(m.Format) + copy(dAtA[i:], m.Format) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) + i-- + dAtA[i] = 0x1a + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceConversion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceConversion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceConversion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Webhook != nil { + { + size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Strategy) + copy(dAtA[i:], m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionNames) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionNames) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionNames) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Categories) > 0 { + for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Categories[iNdEx]) + copy(dAtA[i:], m.Categories[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.ListKind) + copy(dAtA[i:], m.ListKind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ListKind))) + i-- + dAtA[i] = 0x2a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x22 + if len(m.ShortNames) > 0 { + for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ShortNames[iNdEx]) + copy(dAtA[i:], m.ShortNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Singular) + copy(dAtA[i:], m.Singular) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Singular))) + i-- + dAtA[i] = 0x12 + i -= len(m.Plural) + copy(dAtA[i:], m.Plural) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Plural))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.PreserveUnknownFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + if m.Conversion != nil { + { + size, err := m.Conversion.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + i -= len(m.Scope) + copy(dAtA[i:], m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scope))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Names.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.StoredVersions) > 0 { + for iNdEx := len(m.StoredVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.StoredVersions[iNdEx]) + copy(dAtA[i:], m.StoredVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoredVersions[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.AcceptedNames.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionVersion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AdditionalPrinterColumns) > 0 { + for iNdEx := len(m.AdditionalPrinterColumns) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AdditionalPrinterColumns[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.Subresources != nil { + { + size, err := m.Subresources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i-- + if m.Storage { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Served { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceSubresourceScale) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceSubresourceScale) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresourceScale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LabelSelectorPath != nil { + i -= len(*m.LabelSelectorPath) + copy(dAtA[i:], *m.LabelSelectorPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.LabelSelectorPath))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.StatusReplicasPath) + copy(dAtA[i:], m.StatusReplicasPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StatusReplicasPath))) + i-- + dAtA[i] = 0x12 + i -= len(m.SpecReplicasPath) + copy(dAtA[i:], m.SpecReplicasPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SpecReplicasPath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceSubresourceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceSubresourceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *CustomResourceSubresources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceSubresources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scale != nil { + { + size, err := m.Scale.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomResourceValidation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceValidation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceValidation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.OpenAPIV3Schema != nil { + { + size, err := m.OpenAPIV3Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExternalDocumentation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalDocumentation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalDocumentation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i-- + dAtA[i] = 0x12 + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *JSON) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSON) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSON) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Raw != nil { + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *JSONSchemaProps) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaProps) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XListType != nil { + i -= len(*m.XListType) + copy(dAtA[i:], *m.XListType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.XListType))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xd2 + } + if len(m.XListMapKeys) > 0 { + for iNdEx := len(m.XListMapKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.XListMapKeys[iNdEx]) + copy(dAtA[i:], m.XListMapKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.XListMapKeys[iNdEx]))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca + } + } + i-- + if m.XIntOrString { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc0 + i-- + if m.XEmbeddedResource { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb8 + if m.XPreserveUnknownFields != nil { + i-- + if *m.XPreserveUnknownFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb0 + } + i-- + if m.Nullable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa8 + if m.Example != nil { + { + size, err := m.Example.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if m.ExternalDocs != nil { + { + size, err := m.ExternalDocs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if len(m.Definitions) > 0 { + keysForDefinitions := make([]string, 0, len(m.Definitions)) + for k := range m.Definitions { + keysForDefinitions = append(keysForDefinitions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions) + for iNdEx := len(keysForDefinitions) - 1; iNdEx >= 0; iNdEx-- { + v := m.Definitions[string(keysForDefinitions[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForDefinitions[iNdEx]) + copy(dAtA[i:], keysForDefinitions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefinitions[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + } + if m.AdditionalItems != nil { + { + size, err := m.AdditionalItems.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if len(m.Dependencies) > 0 { + keysForDependencies := make([]string, 0, len(m.Dependencies)) + for k := range m.Dependencies { + keysForDependencies = append(keysForDependencies, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies) + for iNdEx := len(keysForDependencies) - 1; iNdEx >= 0; iNdEx-- { + v := m.Dependencies[string(keysForDependencies[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForDependencies[iNdEx]) + copy(dAtA[i:], keysForDependencies[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDependencies[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + } + if len(m.PatternProperties) > 0 { + keysForPatternProperties := make([]string, 0, len(m.PatternProperties)) + for k := range m.PatternProperties { + keysForPatternProperties = append(keysForPatternProperties, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties) + for iNdEx := len(keysForPatternProperties) - 1; iNdEx >= 0; iNdEx-- { + v := m.PatternProperties[string(keysForPatternProperties[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForPatternProperties[iNdEx]) + copy(dAtA[i:], keysForPatternProperties[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPatternProperties[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } + } + if m.AdditionalProperties != nil { + { + size, err := m.AdditionalProperties.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } + if len(m.Properties) > 0 { + keysForProperties := make([]string, 0, len(m.Properties)) + for k := range m.Properties { + keysForProperties = append(keysForProperties, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForProperties) + for iNdEx := len(keysForProperties) - 1; iNdEx >= 0; iNdEx-- { + v := m.Properties[string(keysForProperties[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForProperties[iNdEx]) + copy(dAtA[i:], keysForProperties[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForProperties[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + } + if m.Not != nil { + { + size, err := m.Not.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if len(m.AnyOf) > 0 { + for iNdEx := len(m.AnyOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AnyOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + } + if len(m.OneOf) > 0 { + for iNdEx := len(m.OneOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.OneOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + } + if len(m.AllOf) > 0 { + for iNdEx := len(m.AllOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if m.Items != nil { + { + size, err := m.Items.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.Required) > 0 { + for iNdEx := len(m.Required) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Required[iNdEx]) + copy(dAtA[i:], m.Required[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Required[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } + if m.MinProperties != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinProperties)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if m.MaxProperties != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxProperties)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if len(m.Enum) > 0 { + for iNdEx := len(m.Enum) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Enum[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } + if m.MultipleOf != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.MultipleOf)))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x99 + } + i-- + if m.UniqueItems { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + if m.MinItems != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinItems)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.MaxItems != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxItems)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + i -= len(m.Pattern) + copy(dAtA[i:], m.Pattern) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pattern))) + i-- + dAtA[i] = 0x7a + if m.MinLength != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinLength)) + i-- + dAtA[i] = 0x70 + } + if m.MaxLength != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxLength)) + i-- + dAtA[i] = 0x68 + } + i-- + if m.ExclusiveMinimum { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + if m.Minimum != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Minimum)))) + i-- + dAtA[i] = 0x59 + } + i-- + if m.ExclusiveMaximum { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + if m.Maximum != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Maximum)))) + i-- + dAtA[i] = 0x49 + } + if m.Default != nil { + { + size, err := m.Default.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + i -= len(m.Title) + copy(dAtA[i:], m.Title) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Title))) + i-- + dAtA[i] = 0x3a + i -= len(m.Format) + copy(dAtA[i:], m.Format) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) + i-- + dAtA[i] = 0x32 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x2a + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + if m.Ref != nil { + i -= len(*m.Ref) + copy(dAtA[i:], *m.Ref) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Ref))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Schema) + copy(dAtA[i:], m.Schema) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schema))) + i-- + dAtA[i] = 0x12 + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *JSONSchemaPropsOrArray) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaPropsOrArray) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.JSONSchemas) > 0 { + for iNdEx := len(m.JSONSchemas) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.JSONSchemas[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *JSONSchemaPropsOrBool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaPropsOrBool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrBool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i-- + if m.Allows { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *JSONSchemaPropsOrStringArray) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaPropsOrStringArray) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrStringArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Property) > 0 { + for iNdEx := len(m.Property) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Property[iNdEx]) + copy(dAtA[i:], m.Property[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Property[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } + if m.Path != nil { + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a + } + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x12 + } + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WebhookConversion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookConversion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookConversion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ConversionReviewVersions) > 0 { + for iNdEx := len(m.ConversionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ConversionReviewVersions[iNdEx]) + copy(dAtA[i:], m.ConversionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConversionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.ClientConfig != nil { + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ConversionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DesiredAPIVersion) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Objects) > 0 { + for _, e := range m.Objects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ConversionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ConvertedObjects) > 0 { + for _, e := range m.ConvertedObjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Result.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ConversionReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Request != nil { + l = m.Request.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Response != nil { + l = m.Response.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceColumnDefinition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Format) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Priority)) + l = len(m.JSONPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomResourceConversion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Strategy) + n += 1 + l + sovGenerated(uint64(l)) + if m.Webhook != nil { + l = m.Webhook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceDefinition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomResourceDefinitionCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomResourceDefinitionList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomResourceDefinitionNames) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Plural) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Singular) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ShortNames) > 0 { + for _, s := range m.ShortNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ListKind) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Categories) > 0 { + for _, s := range m.Categories { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomResourceDefinitionSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Names.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Scope) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Conversion != nil { + l = m.Conversion.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *CustomResourceDefinitionStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.AcceptedNames.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.StoredVersions) > 0 { + for _, s := range m.StoredVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomResourceDefinitionVersion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Subresources != nil { + l = m.Subresources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AdditionalPrinterColumns) > 0 { + for _, e := range m.AdditionalPrinterColumns { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomResourceSubresourceScale) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SpecReplicasPath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StatusReplicasPath) + n += 1 + l + sovGenerated(uint64(l)) + if m.LabelSelectorPath != nil { + l = len(*m.LabelSelectorPath) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceSubresourceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *CustomResourceSubresources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Scale != nil { + l = m.Scale.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceValidation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OpenAPIV3Schema != nil { + l = m.OpenAPIV3Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ExternalDocumentation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JSON) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *JSONSchemaProps) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Schema) + n += 1 + l + sovGenerated(uint64(l)) + if m.Ref != nil { + l = len(*m.Ref) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Format) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Title) + n += 1 + l + sovGenerated(uint64(l)) + if m.Default != nil { + l = m.Default.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Maximum != nil { + n += 9 + } + n += 2 + if m.Minimum != nil { + n += 9 + } + n += 2 + if m.MaxLength != nil { + n += 1 + sovGenerated(uint64(*m.MaxLength)) + } + if m.MinLength != nil { + n += 1 + sovGenerated(uint64(*m.MinLength)) + } + l = len(m.Pattern) + n += 1 + l + sovGenerated(uint64(l)) + if m.MaxItems != nil { + n += 2 + sovGenerated(uint64(*m.MaxItems)) + } + if m.MinItems != nil { + n += 2 + sovGenerated(uint64(*m.MinItems)) + } + n += 3 + if m.MultipleOf != nil { + n += 10 + } + if len(m.Enum) > 0 { + for _, e := range m.Enum { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.MaxProperties != nil { + n += 2 + sovGenerated(uint64(*m.MaxProperties)) + } + if m.MinProperties != nil { + n += 2 + sovGenerated(uint64(*m.MinProperties)) + } + if len(m.Required) > 0 { + for _, s := range m.Required { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.Items != nil { + l = m.Items.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.AllOf) > 0 { + for _, e := range m.AllOf { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.OneOf) > 0 { + for _, e := range m.OneOf { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AnyOf) > 0 { + for _, e := range m.AnyOf { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.Not != nil { + l = m.Not.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.Properties) > 0 { + for k, v := range m.Properties { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AdditionalProperties != nil { + l = m.AdditionalProperties.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.PatternProperties) > 0 { + for k, v := range m.PatternProperties { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Dependencies) > 0 { + for k, v := range m.Dependencies { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AdditionalItems != nil { + l = m.AdditionalItems.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.Definitions) > 0 { + for k, v := range m.Definitions { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ExternalDocs != nil { + l = m.ExternalDocs.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Example != nil { + l = m.Example.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + n += 3 + if m.XPreserveUnknownFields != nil { + n += 3 + } + n += 3 + n += 3 + if len(m.XListMapKeys) > 0 { + for _, s := range m.XListMapKeys { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.XListType != nil { + l = len(*m.XListType) + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *JSONSchemaPropsOrArray) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.JSONSchemas) > 0 { + for _, e := range m.JSONSchemas { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *JSONSchemaPropsOrBool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *JSONSchemaPropsOrStringArray) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Property) > 0 { + for _, s := range m.Property { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *WebhookConversion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClientConfig != nil { + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ConversionReviewVersions) > 0 { + for _, s := range m.ConversionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ConversionRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForObjects := "[]RawExtension{" + for _, f := range this.Objects { + repeatedStringForObjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForObjects += "}" + s := strings.Join([]string{`&ConversionRequest{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `DesiredAPIVersion:` + fmt.Sprintf("%v", this.DesiredAPIVersion) + `,`, + `Objects:` + repeatedStringForObjects + `,`, + `}`, + }, "") + return s +} +func (this *ConversionResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForConvertedObjects := "[]RawExtension{" + for _, f := range this.ConvertedObjects { + repeatedStringForConvertedObjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConvertedObjects += "}" + s := strings.Join([]string{`&ConversionResponse{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ConvertedObjects:` + repeatedStringForConvertedObjects + `,`, + `Result:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConversionReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConversionReview{`, + `Request:` + strings.Replace(this.Request.String(), "ConversionRequest", "ConversionRequest", 1) + `,`, + `Response:` + strings.Replace(this.Response.String(), "ConversionResponse", "ConversionResponse", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceColumnDefinition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceColumnDefinition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Format:` + fmt.Sprintf("%v", this.Format) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `Priority:` + fmt.Sprintf("%v", this.Priority) + `,`, + `JSONPath:` + fmt.Sprintf("%v", this.JSONPath) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceConversion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceConversion{`, + `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, + `Webhook:` + strings.Replace(this.Webhook.String(), "WebhookConversion", "WebhookConversion", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceDefinition{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CustomResourceDefinitionSpec", "CustomResourceDefinitionSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CustomResourceDefinitionStatus", "CustomResourceDefinitionStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceDefinitionCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]CustomResourceDefinition{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinition", "CustomResourceDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&CustomResourceDefinitionList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionNames) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceDefinitionNames{`, + `Plural:` + fmt.Sprintf("%v", this.Plural) + `,`, + `Singular:` + fmt.Sprintf("%v", this.Singular) + `,`, + `ShortNames:` + fmt.Sprintf("%v", this.ShortNames) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `ListKind:` + fmt.Sprintf("%v", this.ListKind) + `,`, + `Categories:` + fmt.Sprintf("%v", this.Categories) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForVersions := "[]CustomResourceDefinitionVersion{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinitionVersion", "CustomResourceDefinitionVersion", 1), `&`, ``, 1) + "," + } + repeatedStringForVersions += "}" + s := strings.Join([]string{`&CustomResourceDefinitionSpec{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Names:` + strings.Replace(strings.Replace(this.Names.String(), "CustomResourceDefinitionNames", "CustomResourceDefinitionNames", 1), `&`, ``, 1) + `,`, + `Scope:` + fmt.Sprintf("%v", this.Scope) + `,`, + `Versions:` + repeatedStringForVersions + `,`, + `Conversion:` + strings.Replace(this.Conversion.String(), "CustomResourceConversion", "CustomResourceConversion", 1) + `,`, + `PreserveUnknownFields:` + fmt.Sprintf("%v", this.PreserveUnknownFields) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]CustomResourceDefinitionCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinitionCondition", "CustomResourceDefinitionCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&CustomResourceDefinitionStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `AcceptedNames:` + strings.Replace(strings.Replace(this.AcceptedNames.String(), "CustomResourceDefinitionNames", "CustomResourceDefinitionNames", 1), `&`, ``, 1) + `,`, + `StoredVersions:` + fmt.Sprintf("%v", this.StoredVersions) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionVersion) String() string { + if this == nil { + return "nil" + } + repeatedStringForAdditionalPrinterColumns := "[]CustomResourceColumnDefinition{" + for _, f := range this.AdditionalPrinterColumns { + repeatedStringForAdditionalPrinterColumns += strings.Replace(strings.Replace(f.String(), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForAdditionalPrinterColumns += "}" + s := strings.Join([]string{`&CustomResourceDefinitionVersion{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Served:` + fmt.Sprintf("%v", this.Served) + `,`, + `Storage:` + fmt.Sprintf("%v", this.Storage) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, + `Subresources:` + strings.Replace(this.Subresources.String(), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, + `AdditionalPrinterColumns:` + repeatedStringForAdditionalPrinterColumns + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceSubresourceScale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceSubresourceScale{`, + `SpecReplicasPath:` + fmt.Sprintf("%v", this.SpecReplicasPath) + `,`, + `StatusReplicasPath:` + fmt.Sprintf("%v", this.StatusReplicasPath) + `,`, + `LabelSelectorPath:` + valueToStringGenerated(this.LabelSelectorPath) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceSubresourceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceSubresourceStatus{`, + `}`, + }, "") + return s +} +func (this *CustomResourceSubresources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceSubresources{`, + `Status:` + strings.Replace(this.Status.String(), "CustomResourceSubresourceStatus", "CustomResourceSubresourceStatus", 1) + `,`, + `Scale:` + strings.Replace(this.Scale.String(), "CustomResourceSubresourceScale", "CustomResourceSubresourceScale", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceValidation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceValidation{`, + `OpenAPIV3Schema:` + strings.Replace(this.OpenAPIV3Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalDocumentation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExternalDocumentation{`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `}`, + }, "") + return s +} +func (this *JSON) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JSON{`, + `Raw:` + valueToStringGenerated(this.Raw) + `,`, + `}`, + }, "") + return s +} +func (this *JSONSchemaProps) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnum := "[]JSON{" + for _, f := range this.Enum { + repeatedStringForEnum += strings.Replace(strings.Replace(f.String(), "JSON", "JSON", 1), `&`, ``, 1) + "," + } + repeatedStringForEnum += "}" + repeatedStringForAllOf := "[]JSONSchemaProps{" + for _, f := range this.AllOf { + repeatedStringForAllOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForAllOf += "}" + repeatedStringForOneOf := "[]JSONSchemaProps{" + for _, f := range this.OneOf { + repeatedStringForOneOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForOneOf += "}" + repeatedStringForAnyOf := "[]JSONSchemaProps{" + for _, f := range this.AnyOf { + repeatedStringForAnyOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForAnyOf += "}" + keysForProperties := make([]string, 0, len(this.Properties)) + for k := range this.Properties { + keysForProperties = append(keysForProperties, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForProperties) + mapStringForProperties := "map[string]JSONSchemaProps{" + for _, k := range keysForProperties { + mapStringForProperties += fmt.Sprintf("%v: %v,", k, this.Properties[k]) + } + mapStringForProperties += "}" + keysForPatternProperties := make([]string, 0, len(this.PatternProperties)) + for k := range this.PatternProperties { + keysForPatternProperties = append(keysForPatternProperties, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties) + mapStringForPatternProperties := "map[string]JSONSchemaProps{" + for _, k := range keysForPatternProperties { + mapStringForPatternProperties += fmt.Sprintf("%v: %v,", k, this.PatternProperties[k]) + } + mapStringForPatternProperties += "}" + keysForDependencies := make([]string, 0, len(this.Dependencies)) + for k := range this.Dependencies { + keysForDependencies = append(keysForDependencies, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies) + mapStringForDependencies := "JSONSchemaDependencies{" + for _, k := range keysForDependencies { + mapStringForDependencies += fmt.Sprintf("%v: %v,", k, this.Dependencies[k]) + } + mapStringForDependencies += "}" + keysForDefinitions := make([]string, 0, len(this.Definitions)) + for k := range this.Definitions { + keysForDefinitions = append(keysForDefinitions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions) + mapStringForDefinitions := "JSONSchemaDefinitions{" + for _, k := range keysForDefinitions { + mapStringForDefinitions += fmt.Sprintf("%v: %v,", k, this.Definitions[k]) + } + mapStringForDefinitions += "}" + s := strings.Join([]string{`&JSONSchemaProps{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Schema:` + fmt.Sprintf("%v", this.Schema) + `,`, + `Ref:` + valueToStringGenerated(this.Ref) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Format:` + fmt.Sprintf("%v", this.Format) + `,`, + `Title:` + fmt.Sprintf("%v", this.Title) + `,`, + `Default:` + strings.Replace(this.Default.String(), "JSON", "JSON", 1) + `,`, + `Maximum:` + valueToStringGenerated(this.Maximum) + `,`, + `ExclusiveMaximum:` + fmt.Sprintf("%v", this.ExclusiveMaximum) + `,`, + `Minimum:` + valueToStringGenerated(this.Minimum) + `,`, + `ExclusiveMinimum:` + fmt.Sprintf("%v", this.ExclusiveMinimum) + `,`, + `MaxLength:` + valueToStringGenerated(this.MaxLength) + `,`, + `MinLength:` + valueToStringGenerated(this.MinLength) + `,`, + `Pattern:` + fmt.Sprintf("%v", this.Pattern) + `,`, + `MaxItems:` + valueToStringGenerated(this.MaxItems) + `,`, + `MinItems:` + valueToStringGenerated(this.MinItems) + `,`, + `UniqueItems:` + fmt.Sprintf("%v", this.UniqueItems) + `,`, + `MultipleOf:` + valueToStringGenerated(this.MultipleOf) + `,`, + `Enum:` + repeatedStringForEnum + `,`, + `MaxProperties:` + valueToStringGenerated(this.MaxProperties) + `,`, + `MinProperties:` + valueToStringGenerated(this.MinProperties) + `,`, + `Required:` + fmt.Sprintf("%v", this.Required) + `,`, + `Items:` + strings.Replace(this.Items.String(), "JSONSchemaPropsOrArray", "JSONSchemaPropsOrArray", 1) + `,`, + `AllOf:` + repeatedStringForAllOf + `,`, + `OneOf:` + repeatedStringForOneOf + `,`, + `AnyOf:` + repeatedStringForAnyOf + `,`, + `Not:` + strings.Replace(this.Not.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `Properties:` + mapStringForProperties + `,`, + `AdditionalProperties:` + strings.Replace(this.AdditionalProperties.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, + `PatternProperties:` + mapStringForPatternProperties + `,`, + `Dependencies:` + mapStringForDependencies + `,`, + `AdditionalItems:` + strings.Replace(this.AdditionalItems.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, + `Definitions:` + mapStringForDefinitions + `,`, + `ExternalDocs:` + strings.Replace(this.ExternalDocs.String(), "ExternalDocumentation", "ExternalDocumentation", 1) + `,`, + `Example:` + strings.Replace(this.Example.String(), "JSON", "JSON", 1) + `,`, + `Nullable:` + fmt.Sprintf("%v", this.Nullable) + `,`, + `XPreserveUnknownFields:` + valueToStringGenerated(this.XPreserveUnknownFields) + `,`, + `XEmbeddedResource:` + fmt.Sprintf("%v", this.XEmbeddedResource) + `,`, + `XIntOrString:` + fmt.Sprintf("%v", this.XIntOrString) + `,`, + `XListMapKeys:` + fmt.Sprintf("%v", this.XListMapKeys) + `,`, + `XListType:` + valueToStringGenerated(this.XListType) + `,`, + `}`, + }, "") + return s +} +func (this *JSONSchemaPropsOrArray) String() string { + if this == nil { + return "nil" + } + repeatedStringForJSONSchemas := "[]JSONSchemaProps{" + for _, f := range this.JSONSchemas { + repeatedStringForJSONSchemas += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForJSONSchemas += "}" + s := strings.Join([]string{`&JSONSchemaPropsOrArray{`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `JSONSchemas:` + repeatedStringForJSONSchemas + `,`, + `}`, + }, "") + return s +} +func (this *JSONSchemaPropsOrBool) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JSONSchemaPropsOrBool{`, + `Allows:` + fmt.Sprintf("%v", this.Allows) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `}`, + }, "") + return s +} +func (this *JSONSchemaPropsOrStringArray) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JSONSchemaPropsOrStringArray{`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `Property:` + fmt.Sprintf("%v", this.Property) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookConversion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookConversion{`, + `ClientConfig:` + strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1) + `,`, + `ConversionReviewVersions:` + fmt.Sprintf("%v", this.ConversionReviewVersions) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ConversionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConversionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConversionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredAPIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DesiredAPIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Objects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Objects = append(m.Objects, runtime.RawExtension{}) + if err := m.Objects[len(m.Objects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConversionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConversionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConversionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConvertedObjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConvertedObjects = append(m.ConvertedObjects, runtime.RawExtension{}) + if err := m.ConvertedObjects[len(m.ConvertedObjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConversionReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConversionReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConversionReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Request == nil { + m.Request = &ConversionRequest{} + } + if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Response == nil { + m.Response = &ConversionResponse{} + } + if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceColumnDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceColumnDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Format = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + m.Priority = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Priority |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JSONPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceConversion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceConversion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Strategy = ConversionStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Webhook == nil { + m.Webhook = &WebhookConversion{} + } + if err := m.Webhook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = CustomResourceDefinitionConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CustomResourceDefinition{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionNames: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionNames: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plural", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Plural = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Singular", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Singular = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShortNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShortNames = append(m.ShortNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListKind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ListKind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Categories", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Names.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scope = ResourceScope(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, CustomResourceDefinitionVersion{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conversion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Conversion == nil { + m.Conversion = &CustomResourceConversion{} + } + if err := m.Conversion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreserveUnknownFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PreserveUnknownFields = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, CustomResourceDefinitionCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AcceptedNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AcceptedNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoredVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoredVersions = append(m.StoredVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Served", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Served = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Storage = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &CustomResourceValidation{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Subresources == nil { + m.Subresources = &CustomResourceSubresources{} + } + if err := m.Subresources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalPrinterColumns", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdditionalPrinterColumns = append(m.AdditionalPrinterColumns, CustomResourceColumnDefinition{}) + if err := m.AdditionalPrinterColumns[len(m.AdditionalPrinterColumns)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceSubresourceScale: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceSubresourceScale: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpecReplicasPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpecReplicasPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StatusReplicasPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StatusReplicasPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelectorPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.LabelSelectorPath = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceSubresourceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceSubresourceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceSubresourceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceSubresources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceSubresources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &CustomResourceSubresourceStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scale == nil { + m.Scale = &CustomResourceSubresourceScale{} + } + if err := m.Scale.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceValidation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceValidation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OpenAPIV3Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OpenAPIV3Schema == nil { + m.OpenAPIV3Schema = &JSONSchemaProps{} + } + if err := m.OpenAPIV3Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalDocumentation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalDocumentation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSON) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSON: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSON: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaProps: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaProps: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schema = JSONSchemaURL(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Ref = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Format = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Title", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Title = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Default == nil { + m.Default = &JSON{} + } + if err := m.Default.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Maximum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Maximum = &v2 + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExclusiveMaximum", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExclusiveMaximum = bool(v != 0) + case 11: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Minimum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Minimum = &v2 + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExclusiveMinimum", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExclusiveMinimum = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxLength", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxLength = &v + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinLength", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinLength = &v + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pattern", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pattern = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxItems", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxItems = &v + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinItems", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinItems = &v + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UniqueItems", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UniqueItems = bool(v != 0) + case 19: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field MultipleOf", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.MultipleOf = &v2 + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Enum", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Enum = append(m.Enum, JSON{}) + if err := m.Enum[len(m.Enum)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxProperties", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxProperties = &v + case 22: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinProperties", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinProperties = &v + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Required", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Required = append(m.Required, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Items == nil { + m.Items = &JSONSchemaPropsOrArray{} + } + if err := m.Items.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllOf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllOf = append(m.AllOf, JSONSchemaProps{}) + if err := m.AllOf[len(m.AllOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OneOf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OneOf = append(m.OneOf, JSONSchemaProps{}) + if err := m.OneOf[len(m.OneOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AnyOf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AnyOf = append(m.AnyOf, JSONSchemaProps{}) + if err := m.AnyOf[len(m.AnyOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 28: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Not", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Not == nil { + m.Not = &JSONSchemaProps{} + } + if err := m.Not.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 29: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Properties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Properties == nil { + m.Properties = make(map[string]JSONSchemaProps) + } + var mapkey string + mapvalue := &JSONSchemaProps{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaProps{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Properties[mapkey] = *mapvalue + iNdEx = postIndex + case 30: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalProperties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AdditionalProperties == nil { + m.AdditionalProperties = &JSONSchemaPropsOrBool{} + } + if err := m.AdditionalProperties.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 31: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PatternProperties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PatternProperties == nil { + m.PatternProperties = make(map[string]JSONSchemaProps) + } + var mapkey string + mapvalue := &JSONSchemaProps{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaProps{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PatternProperties[mapkey] = *mapvalue + iNdEx = postIndex + case 32: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dependencies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Dependencies == nil { + m.Dependencies = make(JSONSchemaDependencies) + } + var mapkey string + mapvalue := &JSONSchemaPropsOrStringArray{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaPropsOrStringArray{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Dependencies[mapkey] = *mapvalue + iNdEx = postIndex + case 33: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalItems", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AdditionalItems == nil { + m.AdditionalItems = &JSONSchemaPropsOrBool{} + } + if err := m.AdditionalItems.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Definitions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Definitions == nil { + m.Definitions = make(JSONSchemaDefinitions) + } + var mapkey string + mapvalue := &JSONSchemaProps{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaProps{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Definitions[mapkey] = *mapvalue + iNdEx = postIndex + case 35: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalDocs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExternalDocs == nil { + m.ExternalDocs = &ExternalDocumentation{} + } + if err := m.ExternalDocs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 36: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Example", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Example == nil { + m.Example = &JSON{} + } + if err := m.Example.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 37: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nullable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Nullable = bool(v != 0) + case 38: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XPreserveUnknownFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.XPreserveUnknownFields = &b + case 39: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XEmbeddedResource", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.XEmbeddedResource = bool(v != 0) + case 40: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XIntOrString", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.XIntOrString = bool(v != 0) + case 41: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XListMapKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.XListMapKeys = append(m.XListMapKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 42: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XListType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.XListType = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaPropsOrArray: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaPropsOrArray: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &JSONSchemaProps{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONSchemas", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JSONSchemas = append(m.JSONSchemas, JSONSchemaProps{}) + if err := m.JSONSchemas[len(m.JSONSchemas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaPropsOrBool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaPropsOrBool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allows", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allows = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &JSONSchemaProps{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &JSONSchemaProps{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Property", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Property = append(m.Property, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &ServiceReference{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) + if m.CABundle == nil { + m.CABundle = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.URL = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookConversion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookConversion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookConversion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClientConfig == nil { + m.ClientConfig = &WebhookClientConfig{} + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConversionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConversionReviewVersions = append(m.ConversionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto new file mode 100644 index 000000000..77e6a8987 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto @@ -0,0 +1,592 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// ConversionRequest describes the conversion request parameters. +message ConversionRequest { + // uid is an identifier for the individual request/response. It allows distinguishing instances of requests which are + // otherwise identical (parallel requests, etc). + // The UID is meant to track the round trip (request/response) between the Kubernetes API server and the webhook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + optional string uid = 1; + + // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" + optional string desiredAPIVersion = 2; + + // objects is the list of custom resource objects to be converted. + repeated k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; +} + +// ConversionResponse describes a conversion response. +message ConversionResponse { + // uid is an identifier for the individual request/response. + // This should be copied over from the corresponding `request.uid`. + optional string uid = 1; + + // convertedObjects is the list of converted version of `request.objects` if the `result` is successful, otherwise empty. + // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list + // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). + // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. + repeated k8s.io.apimachinery.pkg.runtime.RawExtension convertedObjects = 2; + + // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if + // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the + // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set + // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` + // will be used to construct an error message for the end user. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Status result = 3; +} + +// ConversionReview describes a conversion request/response. +message ConversionReview { + // request describes the attributes for the conversion request. + // +optional + optional ConversionRequest request = 1; + + // response describes the attributes for the conversion response. + // +optional + optional ConversionResponse response = 2; +} + +// CustomResourceColumnDefinition specifies a column for server side printing. +message CustomResourceColumnDefinition { + // name is a human readable name for the column. + optional string name = 1; + + // type is an OpenAPI type definition for this column. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + optional string type = 2; + + // format is an optional OpenAPI type definition for this column. The 'name' format is applied + // to the primary identifier column to assist in clients identifying column is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + // +optional + optional string format = 3; + + // description is a human readable description of this column. + // +optional + optional string description = 4; + + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a priority greater than 0. + // +optional + optional int32 priority = 5; + + // jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against + // each custom resource to produce the value for this column. + optional string jsonPath = 6; +} + +// CustomResourceConversion describes how to convert different versions of a CR. +message CustomResourceConversion { + // strategy specifies how custom resources are converted between versions. Allowed values are: + // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. + // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information + // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set. + optional string strategy = 1; + + // webhook describes how to call the conversion webhook. Required when `strategy` is set to `Webhook`. + // +optional + optional WebhookConversion webhook = 2; +} + +// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format +// <.spec.name>.<.spec.group>. +message CustomResourceDefinition { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec describes how the user wants the resources to appear + optional CustomResourceDefinitionSpec spec = 2; + + // status indicates the actual state of the CustomResourceDefinition + // +optional + optional CustomResourceDefinitionStatus status = 3; +} + +// CustomResourceDefinitionCondition contains details for the current condition of this pod. +message CustomResourceDefinitionCondition { + // type is the type of the condition. Types include Established, NamesAccepted and Terminating. + optional string type = 1; + + // status is the status of the condition. + // Can be True, False, Unknown. + optional string status = 2; + + // lastTransitionTime last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + optional string reason = 4; + + // message is a human-readable message indicating details about last transition. + // +optional + optional string message = 5; +} + +// CustomResourceDefinitionList is a list of CustomResourceDefinition objects. +message CustomResourceDefinitionList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items list individual CustomResourceDefinition objects + repeated CustomResourceDefinition items = 2; +} + +// CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition +message CustomResourceDefinitionNames { + // plural is the plural name of the resource to serve. + // The custom resources are served under `/apis///.../`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + // Must be all lowercase. + optional string plural = 1; + + // singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. + // +optional + optional string singular = 2; + + // shortNames are short names for the resource, exposed in API discovery documents, + // and used by clients to support invocations like `kubectl get `. + // It must be all lowercase. + // +optional + repeated string shortNames = 3; + + // kind is the serialized kind of the resource. It is normally CamelCase and singular. + // Custom resource instances will use this value as the `kind` attribute in API calls. + optional string kind = 4; + + // listKind is the serialized kind of the list for this resource. Defaults to "`kind`List". + // +optional + optional string listKind = 5; + + // categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). + // This is published in API discovery documents, and used by clients to support invocations like + // `kubectl get all`. + // +optional + repeated string categories = 6; +} + +// CustomResourceDefinitionSpec describes how a user wants their resource to appear +message CustomResourceDefinitionSpec { + // group is the API group of the defined custom resource. + // The custom resources are served under `/apis//...`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + optional string group = 1; + + // names specify the resource and kind names for the custom resource. + optional CustomResourceDefinitionNames names = 3; + + // scope indicates whether the defined custom resource is cluster- or namespace-scoped. + // Allowed values are `Cluster` and `Namespaced`. Default is `Namespaced`. + optional string scope = 4; + + // versions is the list of all API versions of the defined custom resource. + // Version names are used to compute the order in which served versions are listed in API discovery. + // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered + // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), + // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing + // major version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + repeated CustomResourceDefinitionVersion versions = 7; + + // conversion defines conversion settings for the CRD. + // +optional + optional CustomResourceConversion conversion = 9; + + // preserveUnknownFields indicates that object fields which are not specified + // in the OpenAPI schema should be preserved when persisting to storage. + // apiVersion, kind, metadata and known fields inside metadata are always preserved. + // This field is deprecated in favor of setting `x-preserve-unknown-fields` to true in `spec.versions[*].schema.openAPIV3Schema`. + // See https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#pruning-versus-preserving-unknown-fields for details. + // +optional + optional bool preserveUnknownFields = 10; +} + +// CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition +message CustomResourceDefinitionStatus { + // conditions indicate state for particular aspects of a CustomResourceDefinition + // +optional + repeated CustomResourceDefinitionCondition conditions = 1; + + // acceptedNames are the names that are actually being used to serve discovery. + // They may be different than the names in spec. + optional CustomResourceDefinitionNames acceptedNames = 2; + + // storedVersions lists all versions of CustomResources that were ever persisted. Tracking these + // versions allows a migration path for stored versions in etcd. The field is mutable + // so a migration controller can finish a migration to another version (ensuring + // no old objects are left in storage), and then remove the rest of the + // versions from this list. + // Versions may not be removed from `spec.versions` while they exist in this list. + repeated string storedVersions = 3; +} + +// CustomResourceDefinitionVersion describes a version for CRD. +message CustomResourceDefinitionVersion { + // name is the version name, e.g. “v1”, “v2beta1”, etc. + // The custom resources are served under this version at `/apis///...` if `served` is true. + optional string name = 1; + + // served is a flag enabling/disabling this version from being served via REST APIs + optional bool served = 2; + + // storage indicates this version should be used when persisting custom resources to storage. + // There must be exactly one version with storage=true. + optional bool storage = 3; + + // schema describes the schema used for validation, pruning, and defaulting of this version of the custom resource. + // +optional + optional CustomResourceValidation schema = 4; + + // subresources specify what subresources this version of the defined custom resource have. + // +optional + optional CustomResourceSubresources subresources = 5; + + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. + // If no columns are specified, a single column displaying the age of the custom resource is used. + // +optional + repeated CustomResourceColumnDefinition additionalPrinterColumns = 6; +} + +// CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. +message CustomResourceSubresourceScale { + // specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.spec`. + // If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. + optional string specReplicasPath = 1; + + // statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.status`. + // If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource + // will default to 0. + optional string statusReplicasPath = 2; + + // labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.status` or `.spec`. + // Must be set to work with HorizontalPodAutoscaler. + // The field pointed by this JSON path must be a string field (not a complex selector struct) + // which contains a serialized label selector in string form. + // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource + // If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` + // subresource will default to the empty string. + // +optional + optional string labelSelectorPath = 3; +} + +// CustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. +// Status is represented by the `.status` JSON path inside of a CustomResource. When set, +// * exposes a /status subresource for the custom resource +// * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza +// * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza +message CustomResourceSubresourceStatus { +} + +// CustomResourceSubresources defines the status and scale subresources for CustomResources. +message CustomResourceSubresources { + // status indicates the custom resource should serve a `/status` subresource. + // When enabled: + // 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. + // 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. + // +optional + optional CustomResourceSubresourceStatus status = 1; + + // scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. + // +optional + optional CustomResourceSubresourceScale scale = 2; +} + +// CustomResourceValidation is a list of validation methods for CustomResources. +message CustomResourceValidation { + // openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. + // +optional + optional JSONSchemaProps openAPIV3Schema = 1; +} + +// ExternalDocumentation allows referencing an external resource for extended documentation. +message ExternalDocumentation { + optional string description = 1; + + optional string url = 2; +} + +// JSON represents any valid JSON value. +// These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. +message JSON { + optional bytes raw = 1; +} + +// JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). +message JSONSchemaProps { + optional string id = 1; + + optional string schema = 2; + + optional string ref = 3; + + optional string description = 4; + + optional string type = 5; + + optional string format = 6; + + optional string title = 7; + + // default is a default value for undefined object fields. + // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. + // Defaulting requires spec.preserveUnknownFields to be false. + optional JSON default = 8; + + optional double maximum = 9; + + optional bool exclusiveMaximum = 10; + + optional double minimum = 11; + + optional bool exclusiveMinimum = 12; + + optional int64 maxLength = 13; + + optional int64 minLength = 14; + + optional string pattern = 15; + + optional int64 maxItems = 16; + + optional int64 minItems = 17; + + optional bool uniqueItems = 18; + + optional double multipleOf = 19; + + repeated JSON enum = 20; + + optional int64 maxProperties = 21; + + optional int64 minProperties = 22; + + repeated string required = 23; + + optional JSONSchemaPropsOrArray items = 24; + + repeated JSONSchemaProps allOf = 25; + + repeated JSONSchemaProps oneOf = 26; + + repeated JSONSchemaProps anyOf = 27; + + optional JSONSchemaProps not = 28; + + map properties = 29; + + optional JSONSchemaPropsOrBool additionalProperties = 30; + + map patternProperties = 31; + + map dependencies = 32; + + optional JSONSchemaPropsOrBool additionalItems = 33; + + map definitions = 34; + + optional ExternalDocumentation externalDocs = 35; + + optional JSON example = 36; + + optional bool nullable = 37; + + // x-kubernetes-preserve-unknown-fields stops the API server + // decoding step from pruning fields which are not specified + // in the validation schema. This affects fields recursively, + // but switches back to normal pruning behaviour if nested + // properties or additionalProperties are specified in the schema. + // This can either be true or undefined. False is forbidden. + optional bool xKubernetesPreserveUnknownFields = 38; + + // x-kubernetes-embedded-resource defines that the value is an + // embedded Kubernetes runtime.Object, with TypeMeta and + // ObjectMeta. The type must be object. It is allowed to further + // restrict the embedded object. kind, apiVersion and metadata + // are validated automatically. x-kubernetes-preserve-unknown-fields + // is allowed to be true, but does not have to be if the object + // is fully specified (up to kind, apiVersion, metadata). + optional bool xKubernetesEmbeddedResource = 39; + + // x-kubernetes-int-or-string specifies that this value is + // either an integer or a string. If this is true, an empty + // type is allowed and type as child of anyOf is permitted + // if following one of the following patterns: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + optional bool xKubernetesIntOrString = 40; + + // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used + // as the index of the map. + // + // This tag MUST only be used on lists that have the "x-kubernetes-list-type" + // extension set to "map". Also, the values specified for this attribute must + // be a scalar typed field of the child structure (no nesting is supported). + // + // +optional + repeated string xKubernetesListMapKeys = 41; + + // x-kubernetes-list-type annotates an array to further describe its topology. + // This extension must only be used on lists and may have 3 possible values: + // + // 1) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic lists will be entirely replaced when updated. This extension + // may be used on any type of list (struct, scalar, ...). + // 2) `set`: + // Sets are lists that must not have multiple items with the same value. Each + // value must be a scalar (or another atomic type). + // 3) `map`: + // These lists are like maps in that their elements have a non-index key + // used to identify them. Order is preserved upon merge. The map tag + // must only be used on a list with elements of type object. + // Defaults to atomic for arrays. + // +optional + optional string xKubernetesListType = 42; +} + +// JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps +// or an array of JSONSchemaProps. Mainly here for serialization purposes. +message JSONSchemaPropsOrArray { + optional JSONSchemaProps schema = 1; + + repeated JSONSchemaProps jSONSchemas = 2; +} + +// JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. +// Defaults to true for the boolean property. +message JSONSchemaPropsOrBool { + optional bool allows = 1; + + optional JSONSchemaProps schema = 2; +} + +// JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. +message JSONSchemaPropsOrStringArray { + optional JSONSchemaProps schema = 1; + + repeated string property = 2; +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +message ServiceReference { + // namespace is the namespace of the service. + // Required + optional string namespace = 1; + + // name is the name of the service. + // Required + optional string name = 2; + + // path is an optional URL path at which the webhook will be contacted. + // +optional + optional string path = 3; + + // port is an optional service port at which the webhook will be contacted. + // `port` should be a valid port number (1-65535, inclusive). + // Defaults to 443 for backward compatibility. + // +optional + optional int32 port = 4; +} + +// WebhookClientConfig contains the information to make a TLS connection with the webhook. +message WebhookClientConfig { + // url gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + optional string url = 3; + + // service is a reference to the service for this webhook. Either + // service or url must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + optional ServiceReference service = 1; + + // caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + optional bytes caBundle = 2; +} + +// WebhookConversion describes how to call a conversion webhook +message WebhookConversion { + // clientConfig is the instructions for how to call the webhook if strategy is `Webhook`. + // +optional + optional WebhookClientConfig clientConfig = 2; + + // conversionReviewVersions is an ordered list of preferred `ConversionReview` + // versions the Webhook expects. The API server will use the first version in + // the list which it supports. If none of the versions specified in this list + // are supported by API server, conversion will fail for the custom resource. + // If a persisted Webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail. + repeated string conversionReviewVersions = 3; +} + diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go new file mode 100644 index 000000000..ba7f286eb --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go @@ -0,0 +1,135 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "errors" + + "k8s.io/apimachinery/pkg/util/json" +) + +var jsTrue = []byte("true") +var jsFalse = []byte("false") + +func (s JSONSchemaPropsOrBool) MarshalJSON() ([]byte, error) { + if s.Schema != nil { + return json.Marshal(s.Schema) + } + + if s.Schema == nil && !s.Allows { + return jsFalse, nil + } + return jsTrue, nil +} + +func (s *JSONSchemaPropsOrBool) UnmarshalJSON(data []byte) error { + var nw JSONSchemaPropsOrBool + switch { + case len(data) == 0: + case data[0] == '{': + var sch JSONSchemaProps + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Allows = true + nw.Schema = &sch + case len(data) == 4 && string(data) == "true": + nw.Allows = true + case len(data) == 5 && string(data) == "false": + nw.Allows = false + default: + return errors.New("boolean or JSON schema expected") + } + *s = nw + return nil +} + +func (s JSONSchemaPropsOrStringArray) MarshalJSON() ([]byte, error) { + if len(s.Property) > 0 { + return json.Marshal(s.Property) + } + if s.Schema != nil { + return json.Marshal(s.Schema) + } + return []byte("null"), nil +} + +func (s *JSONSchemaPropsOrStringArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + var nw JSONSchemaPropsOrStringArray + if first == '{' { + var sch JSONSchemaProps + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Property); err != nil { + return err + } + } + *s = nw + return nil +} + +func (s JSONSchemaPropsOrArray) MarshalJSON() ([]byte, error) { + if len(s.JSONSchemas) > 0 { + return json.Marshal(s.JSONSchemas) + } + return json.Marshal(s.Schema) +} + +func (s *JSONSchemaPropsOrArray) UnmarshalJSON(data []byte) error { + var nw JSONSchemaPropsOrArray + var first byte + if len(data) > 1 { + first = data[0] + } + if first == '{' { + var sch JSONSchemaProps + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.JSONSchemas); err != nil { + return err + } + } + *s = nw + return nil +} + +func (s JSON) MarshalJSON() ([]byte, error) { + if len(s.Raw) > 0 { + return s.Raw, nil + } + return []byte("null"), nil + +} + +func (s *JSON) UnmarshalJSON(data []byte) error { + if len(data) > 0 && string(data) != "null" { + s.Raw = data + } + return nil +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/register.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/register.go new file mode 100644 index 000000000..a1b2b60a6 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/register.go @@ -0,0 +1,62 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "apiextensions.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CustomResourceDefinition{}, + &CustomResourceDefinitionList{}, + &ConversionReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs) +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go new file mode 100644 index 000000000..000f3fa1c --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go @@ -0,0 +1,463 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// ConversionStrategyType describes different conversion types. +type ConversionStrategyType string + +const ( + // KubeAPIApprovedAnnotation is an annotation that must be set to create a CRD for the k8s.io, *.k8s.io, kubernetes.io, or *.kubernetes.io namespaces. + // The value should be a link to a URL where the current spec was approved, so updates to the spec should also update the URL. + // If the API is unapproved, you may set the annotation to a string starting with `"unapproved"`. For instance, `"unapproved, temporarily squatting"` or `"unapproved, experimental-only"`. This is discouraged. + KubeAPIApprovedAnnotation = "api-approved.kubernetes.io" + + // NoneConverter is a converter that only sets apiversion of the CR and leave everything else unchanged. + NoneConverter ConversionStrategyType = "None" + // WebhookConverter is a converter that calls to an external webhook to convert the CR. + WebhookConverter ConversionStrategyType = "Webhook" +) + +// CustomResourceDefinitionSpec describes how a user wants their resource to appear +type CustomResourceDefinitionSpec struct { + // group is the API group of the defined custom resource. + // The custom resources are served under `/apis//...`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + // names specify the resource and kind names for the custom resource. + Names CustomResourceDefinitionNames `json:"names" protobuf:"bytes,3,opt,name=names"` + // scope indicates whether the defined custom resource is cluster- or namespace-scoped. + // Allowed values are `Cluster` and `Namespaced`. Default is `Namespaced`. + Scope ResourceScope `json:"scope" protobuf:"bytes,4,opt,name=scope,casttype=ResourceScope"` + // versions is the list of all API versions of the defined custom resource. + // Version names are used to compute the order in which served versions are listed in API discovery. + // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered + // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), + // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing + // major version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + Versions []CustomResourceDefinitionVersion `json:"versions" protobuf:"bytes,7,rep,name=versions"` + + // conversion defines conversion settings for the CRD. + // +optional + Conversion *CustomResourceConversion `json:"conversion,omitempty" protobuf:"bytes,9,opt,name=conversion"` + + // preserveUnknownFields indicates that object fields which are not specified + // in the OpenAPI schema should be preserved when persisting to storage. + // apiVersion, kind, metadata and known fields inside metadata are always preserved. + // This field is deprecated in favor of setting `x-preserve-unknown-fields` to true in `spec.versions[*].schema.openAPIV3Schema`. + // See https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#pruning-versus-preserving-unknown-fields for details. + // +optional + PreserveUnknownFields bool `json:"preserveUnknownFields,omitempty" protobuf:"varint,10,opt,name=preserveUnknownFields"` +} + +// CustomResourceConversion describes how to convert different versions of a CR. +type CustomResourceConversion struct { + // strategy specifies how custom resources are converted between versions. Allowed values are: + // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. + // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information + // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set. + Strategy ConversionStrategyType `json:"strategy" protobuf:"bytes,1,name=strategy"` + + // webhook describes how to call the conversion webhook. Required when `strategy` is set to `Webhook`. + // +optional + Webhook *WebhookConversion `json:"webhook,omitempty" protobuf:"bytes,2,opt,name=webhook"` +} + +// WebhookConversion describes how to call a conversion webhook +type WebhookConversion struct { + // clientConfig is the instructions for how to call the webhook if strategy is `Webhook`. + // +optional + ClientConfig *WebhookClientConfig `json:"clientConfig,omitempty" protobuf:"bytes,2,name=clientConfig"` + + // conversionReviewVersions is an ordered list of preferred `ConversionReview` + // versions the Webhook expects. The API server will use the first version in + // the list which it supports. If none of the versions specified in this list + // are supported by API server, conversion will fail for the custom resource. + // If a persisted Webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail. + ConversionReviewVersions []string `json:"conversionReviewVersions" protobuf:"bytes,3,rep,name=conversionReviewVersions"` +} + +// WebhookClientConfig contains the information to make a TLS connection with the webhook. +type WebhookClientConfig struct { + // url gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"` + + // service is a reference to the service for this webhook. Either + // service or url must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` + + // caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // namespace is the namespace of the service. + // Required + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // name is the name of the service. + // Required + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // path is an optional URL path at which the webhook will be contacted. + // +optional + Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` + + // port is an optional service port at which the webhook will be contacted. + // `port` should be a valid port number (1-65535, inclusive). + // Defaults to 443 for backward compatibility. + // +optional + Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` +} + +// CustomResourceDefinitionVersion describes a version for CRD. +type CustomResourceDefinitionVersion struct { + // name is the version name, e.g. “v1”, “v2beta1”, etc. + // The custom resources are served under this version at `/apis///...` if `served` is true. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // served is a flag enabling/disabling this version from being served via REST APIs + Served bool `json:"served" protobuf:"varint,2,opt,name=served"` + // storage indicates this version should be used when persisting custom resources to storage. + // There must be exactly one version with storage=true. + Storage bool `json:"storage" protobuf:"varint,3,opt,name=storage"` + // schema describes the schema used for validation, pruning, and defaulting of this version of the custom resource. + // +optional + Schema *CustomResourceValidation `json:"schema,omitempty" protobuf:"bytes,4,opt,name=schema"` + // subresources specify what subresources this version of the defined custom resource have. + // +optional + Subresources *CustomResourceSubresources `json:"subresources,omitempty" protobuf:"bytes,5,opt,name=subresources"` + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. + // If no columns are specified, a single column displaying the age of the custom resource is used. + // +optional + AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,6,rep,name=additionalPrinterColumns"` +} + +// CustomResourceColumnDefinition specifies a column for server side printing. +type CustomResourceColumnDefinition struct { + // name is a human readable name for the column. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // type is an OpenAPI type definition for this column. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + Type string `json:"type" protobuf:"bytes,2,opt,name=type"` + // format is an optional OpenAPI type definition for this column. The 'name' format is applied + // to the primary identifier column to assist in clients identifying column is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + // +optional + Format string `json:"format,omitempty" protobuf:"bytes,3,opt,name=format"` + // description is a human readable description of this column. + // +optional + Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a priority greater than 0. + // +optional + Priority int32 `json:"priority,omitempty" protobuf:"bytes,5,opt,name=priority"` + // jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against + // each custom resource to produce the value for this column. + JSONPath string `json:"jsonPath" protobuf:"bytes,6,opt,name=jsonPath"` +} + +// CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition +type CustomResourceDefinitionNames struct { + // plural is the plural name of the resource to serve. + // The custom resources are served under `/apis///.../`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + // Must be all lowercase. + Plural string `json:"plural" protobuf:"bytes,1,opt,name=plural"` + // singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. + // +optional + Singular string `json:"singular,omitempty" protobuf:"bytes,2,opt,name=singular"` + // shortNames are short names for the resource, exposed in API discovery documents, + // and used by clients to support invocations like `kubectl get `. + // It must be all lowercase. + // +optional + ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,3,opt,name=shortNames"` + // kind is the serialized kind of the resource. It is normally CamelCase and singular. + // Custom resource instances will use this value as the `kind` attribute in API calls. + Kind string `json:"kind" protobuf:"bytes,4,opt,name=kind"` + // listKind is the serialized kind of the list for this resource. Defaults to "`kind`List". + // +optional + ListKind string `json:"listKind,omitempty" protobuf:"bytes,5,opt,name=listKind"` + // categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). + // This is published in API discovery documents, and used by clients to support invocations like + // `kubectl get all`. + // +optional + Categories []string `json:"categories,omitempty" protobuf:"bytes,6,rep,name=categories"` +} + +// ResourceScope is an enum defining the different scopes available to a custom resource +type ResourceScope string + +const ( + ClusterScoped ResourceScope = "Cluster" + NamespaceScoped ResourceScope = "Namespaced" +) + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// CustomResourceDefinitionConditionType is a valid value for CustomResourceDefinitionCondition.Type +type CustomResourceDefinitionConditionType string + +const ( + // Established means that the resource has become active. A resource is established when all names are + // accepted without a conflict for the first time. A resource stays established until deleted, even during + // a later NamesAccepted due to changed names. Note that not all names can be changed. + Established CustomResourceDefinitionConditionType = "Established" + // NamesAccepted means the names chosen for this CustomResourceDefinition do not conflict with others in + // the group and are therefore accepted. + NamesAccepted CustomResourceDefinitionConditionType = "NamesAccepted" + // NonStructuralSchema means that one or more OpenAPI schema is not structural. + // + // A schema is structural if it specifies types for all values, with the only exceptions of those with + // - x-kubernetes-int-or-string: true — for fields which can be integer or string + // - x-kubernetes-preserve-unknown-fields: true — for raw, unspecified JSON values + // and there is no type, additionalProperties, default, nullable or x-kubernetes-* vendor extenions + // specified under allOf, anyOf, oneOf or not. + // + // Non-structural schemas will not be allowed anymore in v1 API groups. Moreover, new features will not be + // available for non-structural CRDs: + // - pruning + // - defaulting + // - read-only + // - OpenAPI publishing + // - webhook conversion + NonStructuralSchema CustomResourceDefinitionConditionType = "NonStructuralSchema" + // Terminating means that the CustomResourceDefinition has been deleted and is cleaning up. + Terminating CustomResourceDefinitionConditionType = "Terminating" + // KubernetesAPIApprovalPolicyConformant indicates that an API in *.k8s.io or *.kubernetes.io is or is not approved. For CRDs + // outside those groups, this condition will not be set. For CRDs inside those groups, the condition will + // be true if .metadata.annotations["api-approved.kubernetes.io"] is set to a URL, otherwise it will be false. + // See https://github.com/kubernetes/enhancements/pull/1111 for more details. + KubernetesAPIApprovalPolicyConformant CustomResourceDefinitionConditionType = "KubernetesAPIApprovalPolicyConformant" +) + +// CustomResourceDefinitionCondition contains details for the current condition of this pod. +type CustomResourceDefinitionCondition struct { + // type is the type of the condition. Types include Established, NamesAccepted and Terminating. + Type CustomResourceDefinitionConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=CustomResourceDefinitionConditionType"` + // status is the status of the condition. + // Can be True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // lastTransitionTime last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition +type CustomResourceDefinitionStatus struct { + // conditions indicate state for particular aspects of a CustomResourceDefinition + // +optional + Conditions []CustomResourceDefinitionCondition `json:"conditions" protobuf:"bytes,1,opt,name=conditions"` + + // acceptedNames are the names that are actually being used to serve discovery. + // They may be different than the names in spec. + AcceptedNames CustomResourceDefinitionNames `json:"acceptedNames" protobuf:"bytes,2,opt,name=acceptedNames"` + + // storedVersions lists all versions of CustomResources that were ever persisted. Tracking these + // versions allows a migration path for stored versions in etcd. The field is mutable + // so a migration controller can finish a migration to another version (ensuring + // no old objects are left in storage), and then remove the rest of the + // versions from this list. + // Versions may not be removed from `spec.versions` while they exist in this list. + StoredVersions []string `json:"storedVersions" protobuf:"bytes,3,rep,name=storedVersions"` +} + +// CustomResourceCleanupFinalizer is the name of the finalizer which will delete instances of +// a CustomResourceDefinition +const CustomResourceCleanupFinalizer = "customresourcecleanup.apiextensions.k8s.io" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format +// <.spec.name>.<.spec.group>. +type CustomResourceDefinition struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec describes how the user wants the resources to appear + Spec CustomResourceDefinitionSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status indicates the actual state of the CustomResourceDefinition + // +optional + Status CustomResourceDefinitionStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CustomResourceDefinitionList is a list of CustomResourceDefinition objects. +type CustomResourceDefinitionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items list individual CustomResourceDefinition objects + Items []CustomResourceDefinition `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// CustomResourceValidation is a list of validation methods for CustomResources. +type CustomResourceValidation struct { + // openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. + // +optional + OpenAPIV3Schema *JSONSchemaProps `json:"openAPIV3Schema,omitempty" protobuf:"bytes,1,opt,name=openAPIV3Schema"` +} + +// CustomResourceSubresources defines the status and scale subresources for CustomResources. +type CustomResourceSubresources struct { + // status indicates the custom resource should serve a `/status` subresource. + // When enabled: + // 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. + // 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. + // +optional + Status *CustomResourceSubresourceStatus `json:"status,omitempty" protobuf:"bytes,1,opt,name=status"` + // scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. + // +optional + Scale *CustomResourceSubresourceScale `json:"scale,omitempty" protobuf:"bytes,2,opt,name=scale"` +} + +// CustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. +// Status is represented by the `.status` JSON path inside of a CustomResource. When set, +// * exposes a /status subresource for the custom resource +// * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza +// * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza +type CustomResourceSubresourceStatus struct{} + +// CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. +type CustomResourceSubresourceScale struct { + // specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.spec`. + // If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. + SpecReplicasPath string `json:"specReplicasPath" protobuf:"bytes,1,name=specReplicasPath"` + // statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.status`. + // If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource + // will default to 0. + StatusReplicasPath string `json:"statusReplicasPath" protobuf:"bytes,2,opt,name=statusReplicasPath"` + // labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.status` or `.spec`. + // Must be set to work with HorizontalPodAutoscaler. + // The field pointed by this JSON path must be a string field (not a complex selector struct) + // which contains a serialized label selector in string form. + // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource + // If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` + // subresource will default to the empty string. + // +optional + LabelSelectorPath *string `json:"labelSelectorPath,omitempty" protobuf:"bytes,3,opt,name=labelSelectorPath"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConversionReview describes a conversion request/response. +type ConversionReview struct { + metav1.TypeMeta `json:",inline"` + // request describes the attributes for the conversion request. + // +optional + Request *ConversionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` + // response describes the attributes for the conversion response. + // +optional + Response *ConversionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` +} + +// ConversionRequest describes the conversion request parameters. +type ConversionRequest struct { + // uid is an identifier for the individual request/response. It allows distinguishing instances of requests which are + // otherwise identical (parallel requests, etc). + // The UID is meant to track the round trip (request/response) between the Kubernetes API server and the webhook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` + // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" + DesiredAPIVersion string `json:"desiredAPIVersion" protobuf:"bytes,2,name=desiredAPIVersion"` + // objects is the list of custom resource objects to be converted. + Objects []runtime.RawExtension `json:"objects" protobuf:"bytes,3,rep,name=objects"` +} + +// ConversionResponse describes a conversion response. +type ConversionResponse struct { + // uid is an identifier for the individual request/response. + // This should be copied over from the corresponding `request.uid`. + UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` + // convertedObjects is the list of converted version of `request.objects` if the `result` is successful, otherwise empty. + // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list + // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). + // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. + ConvertedObjects []runtime.RawExtension `json:"convertedObjects" protobuf:"bytes,2,rep,name=convertedObjects"` + // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if + // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the + // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set + // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` + // will be used to construct an error message for the end user. + Result metav1.Status `json:"result" protobuf:"bytes,3,name=result"` +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go new file mode 100644 index 000000000..bc6827987 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go @@ -0,0 +1,213 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). +type JSONSchemaProps struct { + ID string `json:"id,omitempty" protobuf:"bytes,1,opt,name=id"` + Schema JSONSchemaURL `json:"$schema,omitempty" protobuf:"bytes,2,opt,name=schema"` + Ref *string `json:"$ref,omitempty" protobuf:"bytes,3,opt,name=ref"` + Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + Type string `json:"type,omitempty" protobuf:"bytes,5,opt,name=type"` + Format string `json:"format,omitempty" protobuf:"bytes,6,opt,name=format"` + Title string `json:"title,omitempty" protobuf:"bytes,7,opt,name=title"` + // default is a default value for undefined object fields. + // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. + // Defaulting requires spec.preserveUnknownFields to be false. + Default *JSON `json:"default,omitempty" protobuf:"bytes,8,opt,name=default"` + Maximum *float64 `json:"maximum,omitempty" protobuf:"bytes,9,opt,name=maximum"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty" protobuf:"bytes,10,opt,name=exclusiveMaximum"` + Minimum *float64 `json:"minimum,omitempty" protobuf:"bytes,11,opt,name=minimum"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty" protobuf:"bytes,12,opt,name=exclusiveMinimum"` + MaxLength *int64 `json:"maxLength,omitempty" protobuf:"bytes,13,opt,name=maxLength"` + MinLength *int64 `json:"minLength,omitempty" protobuf:"bytes,14,opt,name=minLength"` + Pattern string `json:"pattern,omitempty" protobuf:"bytes,15,opt,name=pattern"` + MaxItems *int64 `json:"maxItems,omitempty" protobuf:"bytes,16,opt,name=maxItems"` + MinItems *int64 `json:"minItems,omitempty" protobuf:"bytes,17,opt,name=minItems"` + UniqueItems bool `json:"uniqueItems,omitempty" protobuf:"bytes,18,opt,name=uniqueItems"` + MultipleOf *float64 `json:"multipleOf,omitempty" protobuf:"bytes,19,opt,name=multipleOf"` + Enum []JSON `json:"enum,omitempty" protobuf:"bytes,20,rep,name=enum"` + MaxProperties *int64 `json:"maxProperties,omitempty" protobuf:"bytes,21,opt,name=maxProperties"` + MinProperties *int64 `json:"minProperties,omitempty" protobuf:"bytes,22,opt,name=minProperties"` + Required []string `json:"required,omitempty" protobuf:"bytes,23,rep,name=required"` + Items *JSONSchemaPropsOrArray `json:"items,omitempty" protobuf:"bytes,24,opt,name=items"` + AllOf []JSONSchemaProps `json:"allOf,omitempty" protobuf:"bytes,25,rep,name=allOf"` + OneOf []JSONSchemaProps `json:"oneOf,omitempty" protobuf:"bytes,26,rep,name=oneOf"` + AnyOf []JSONSchemaProps `json:"anyOf,omitempty" protobuf:"bytes,27,rep,name=anyOf"` + Not *JSONSchemaProps `json:"not,omitempty" protobuf:"bytes,28,opt,name=not"` + Properties map[string]JSONSchemaProps `json:"properties,omitempty" protobuf:"bytes,29,rep,name=properties"` + AdditionalProperties *JSONSchemaPropsOrBool `json:"additionalProperties,omitempty" protobuf:"bytes,30,opt,name=additionalProperties"` + PatternProperties map[string]JSONSchemaProps `json:"patternProperties,omitempty" protobuf:"bytes,31,rep,name=patternProperties"` + Dependencies JSONSchemaDependencies `json:"dependencies,omitempty" protobuf:"bytes,32,opt,name=dependencies"` + AdditionalItems *JSONSchemaPropsOrBool `json:"additionalItems,omitempty" protobuf:"bytes,33,opt,name=additionalItems"` + Definitions JSONSchemaDefinitions `json:"definitions,omitempty" protobuf:"bytes,34,opt,name=definitions"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty" protobuf:"bytes,35,opt,name=externalDocs"` + Example *JSON `json:"example,omitempty" protobuf:"bytes,36,opt,name=example"` + Nullable bool `json:"nullable,omitempty" protobuf:"bytes,37,opt,name=nullable"` + + // x-kubernetes-preserve-unknown-fields stops the API server + // decoding step from pruning fields which are not specified + // in the validation schema. This affects fields recursively, + // but switches back to normal pruning behaviour if nested + // properties or additionalProperties are specified in the schema. + // This can either be true or undefined. False is forbidden. + XPreserveUnknownFields *bool `json:"x-kubernetes-preserve-unknown-fields,omitempty" protobuf:"bytes,38,opt,name=xKubernetesPreserveUnknownFields"` + + // x-kubernetes-embedded-resource defines that the value is an + // embedded Kubernetes runtime.Object, with TypeMeta and + // ObjectMeta. The type must be object. It is allowed to further + // restrict the embedded object. kind, apiVersion and metadata + // are validated automatically. x-kubernetes-preserve-unknown-fields + // is allowed to be true, but does not have to be if the object + // is fully specified (up to kind, apiVersion, metadata). + XEmbeddedResource bool `json:"x-kubernetes-embedded-resource,omitempty" protobuf:"bytes,39,opt,name=xKubernetesEmbeddedResource"` + + // x-kubernetes-int-or-string specifies that this value is + // either an integer or a string. If this is true, an empty + // type is allowed and type as child of anyOf is permitted + // if following one of the following patterns: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + XIntOrString bool `json:"x-kubernetes-int-or-string,omitempty" protobuf:"bytes,40,opt,name=xKubernetesIntOrString"` + + // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used + // as the index of the map. + // + // This tag MUST only be used on lists that have the "x-kubernetes-list-type" + // extension set to "map". Also, the values specified for this attribute must + // be a scalar typed field of the child structure (no nesting is supported). + // + // +optional + XListMapKeys []string `json:"x-kubernetes-list-map-keys,omitempty" protobuf:"bytes,41,rep,name=xKubernetesListMapKeys"` + + // x-kubernetes-list-type annotates an array to further describe its topology. + // This extension must only be used on lists and may have 3 possible values: + // + // 1) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic lists will be entirely replaced when updated. This extension + // may be used on any type of list (struct, scalar, ...). + // 2) `set`: + // Sets are lists that must not have multiple items with the same value. Each + // value must be a scalar (or another atomic type). + // 3) `map`: + // These lists are like maps in that their elements have a non-index key + // used to identify them. Order is preserved upon merge. The map tag + // must only be used on a list with elements of type object. + // Defaults to atomic for arrays. + // +optional + XListType *string `json:"x-kubernetes-list-type,omitempty" protobuf:"bytes,42,opt,name=xKubernetesListType"` +} + +// JSON represents any valid JSON value. +// These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. +type JSON struct { + Raw []byte `protobuf:"bytes,1,opt,name=raw"` +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ JSON) OpenAPISchemaType() []string { + // TODO: return actual types when anyOf is supported + return nil +} + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ JSON) OpenAPISchemaFormat() string { return "" } + +// JSONSchemaURL represents a schema url. +type JSONSchemaURL string + +// JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps +// or an array of JSONSchemaProps. Mainly here for serialization purposes. +type JSONSchemaPropsOrArray struct { + Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` + JSONSchemas []JSONSchemaProps `protobuf:"bytes,2,rep,name=jSONSchemas"` +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ JSONSchemaPropsOrArray) OpenAPISchemaType() []string { + // TODO: return actual types when anyOf is supported + return nil +} + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ JSONSchemaPropsOrArray) OpenAPISchemaFormat() string { return "" } + +// JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. +// Defaults to true for the boolean property. +type JSONSchemaPropsOrBool struct { + Allows bool `protobuf:"varint,1,opt,name=allows"` + Schema *JSONSchemaProps `protobuf:"bytes,2,opt,name=schema"` +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ JSONSchemaPropsOrBool) OpenAPISchemaType() []string { + // TODO: return actual types when anyOf is supported + return nil +} + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ JSONSchemaPropsOrBool) OpenAPISchemaFormat() string { return "" } + +// JSONSchemaDependencies represent a dependencies property. +type JSONSchemaDependencies map[string]JSONSchemaPropsOrStringArray + +// JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. +type JSONSchemaPropsOrStringArray struct { + Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` + Property []string `protobuf:"bytes,2,rep,name=property"` +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ JSONSchemaPropsOrStringArray) OpenAPISchemaType() []string { + // TODO: return actual types when anyOf is supported + return nil +} + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ JSONSchemaPropsOrStringArray) OpenAPISchemaFormat() string { return "" } + +// JSONSchemaDefinitions contains the models explicitly defined in this spec. +type JSONSchemaDefinitions map[string]JSONSchemaProps + +// ExternalDocumentation allows referencing an external resource for extended documentation. +type ExternalDocumentation struct { + Description string `json:"description,omitempty" protobuf:"bytes,1,opt,name=description"` + URL string `json:"url,omitempty" protobuf:"bytes,2,opt,name=url"` +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go new file mode 100644 index 000000000..de5c8089a --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go @@ -0,0 +1,1307 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + unsafe "unsafe" + + apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*CustomResourceColumnDefinition)(nil), (*apiextensions.CustomResourceColumnDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(a.(*CustomResourceColumnDefinition), b.(*apiextensions.CustomResourceColumnDefinition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceColumnDefinition)(nil), (*CustomResourceColumnDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition(a.(*apiextensions.CustomResourceColumnDefinition), b.(*CustomResourceColumnDefinition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceConversion)(nil), (*apiextensions.CustomResourceConversion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(a.(*CustomResourceConversion), b.(*apiextensions.CustomResourceConversion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceConversion)(nil), (*CustomResourceConversion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(a.(*apiextensions.CustomResourceConversion), b.(*CustomResourceConversion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinition)(nil), (*apiextensions.CustomResourceDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(a.(*CustomResourceDefinition), b.(*apiextensions.CustomResourceDefinition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinition)(nil), (*CustomResourceDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(a.(*apiextensions.CustomResourceDefinition), b.(*CustomResourceDefinition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionCondition)(nil), (*apiextensions.CustomResourceDefinitionCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(a.(*CustomResourceDefinitionCondition), b.(*apiextensions.CustomResourceDefinitionCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionCondition)(nil), (*CustomResourceDefinitionCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionCondition_To_v1_CustomResourceDefinitionCondition(a.(*apiextensions.CustomResourceDefinitionCondition), b.(*CustomResourceDefinitionCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionList)(nil), (*apiextensions.CustomResourceDefinitionList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(a.(*CustomResourceDefinitionList), b.(*apiextensions.CustomResourceDefinitionList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionList)(nil), (*CustomResourceDefinitionList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionList_To_v1_CustomResourceDefinitionList(a.(*apiextensions.CustomResourceDefinitionList), b.(*CustomResourceDefinitionList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionNames)(nil), (*apiextensions.CustomResourceDefinitionNames)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(a.(*CustomResourceDefinitionNames), b.(*apiextensions.CustomResourceDefinitionNames), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionNames)(nil), (*CustomResourceDefinitionNames)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(a.(*apiextensions.CustomResourceDefinitionNames), b.(*CustomResourceDefinitionNames), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionSpec)(nil), (*apiextensions.CustomResourceDefinitionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(a.(*CustomResourceDefinitionSpec), b.(*apiextensions.CustomResourceDefinitionSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionSpec)(nil), (*CustomResourceDefinitionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(a.(*apiextensions.CustomResourceDefinitionSpec), b.(*CustomResourceDefinitionSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionStatus)(nil), (*apiextensions.CustomResourceDefinitionStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(a.(*CustomResourceDefinitionStatus), b.(*apiextensions.CustomResourceDefinitionStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionStatus)(nil), (*CustomResourceDefinitionStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus(a.(*apiextensions.CustomResourceDefinitionStatus), b.(*CustomResourceDefinitionStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionVersion)(nil), (*apiextensions.CustomResourceDefinitionVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(a.(*CustomResourceDefinitionVersion), b.(*apiextensions.CustomResourceDefinitionVersion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionVersion)(nil), (*CustomResourceDefinitionVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion(a.(*apiextensions.CustomResourceDefinitionVersion), b.(*CustomResourceDefinitionVersion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceSubresourceScale)(nil), (*apiextensions.CustomResourceSubresourceScale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(a.(*CustomResourceSubresourceScale), b.(*apiextensions.CustomResourceSubresourceScale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceSubresourceScale)(nil), (*CustomResourceSubresourceScale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceSubresourceScale_To_v1_CustomResourceSubresourceScale(a.(*apiextensions.CustomResourceSubresourceScale), b.(*CustomResourceSubresourceScale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceSubresourceStatus)(nil), (*apiextensions.CustomResourceSubresourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(a.(*CustomResourceSubresourceStatus), b.(*apiextensions.CustomResourceSubresourceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceSubresourceStatus)(nil), (*CustomResourceSubresourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceSubresourceStatus_To_v1_CustomResourceSubresourceStatus(a.(*apiextensions.CustomResourceSubresourceStatus), b.(*CustomResourceSubresourceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceSubresources)(nil), (*apiextensions.CustomResourceSubresources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(a.(*CustomResourceSubresources), b.(*apiextensions.CustomResourceSubresources), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceSubresources)(nil), (*CustomResourceSubresources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources(a.(*apiextensions.CustomResourceSubresources), b.(*CustomResourceSubresources), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceValidation)(nil), (*apiextensions.CustomResourceValidation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(a.(*CustomResourceValidation), b.(*apiextensions.CustomResourceValidation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceValidation)(nil), (*CustomResourceValidation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(a.(*apiextensions.CustomResourceValidation), b.(*CustomResourceValidation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExternalDocumentation)(nil), (*apiextensions.ExternalDocumentation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(a.(*ExternalDocumentation), b.(*apiextensions.ExternalDocumentation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.ExternalDocumentation)(nil), (*ExternalDocumentation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_ExternalDocumentation_To_v1_ExternalDocumentation(a.(*apiextensions.ExternalDocumentation), b.(*ExternalDocumentation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSON)(nil), (*apiextensions.JSON)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_JSON_To_apiextensions_JSON(a.(*JSON), b.(*apiextensions.JSON), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.JSON)(nil), (*JSON)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSON_To_v1_JSON(a.(*apiextensions.JSON), b.(*JSON), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSONSchemaProps)(nil), (*apiextensions.JSONSchemaProps)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(a.(*JSONSchemaProps), b.(*apiextensions.JSONSchemaProps), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaProps)(nil), (*JSONSchemaProps)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(a.(*apiextensions.JSONSchemaProps), b.(*JSONSchemaProps), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSONSchemaPropsOrArray)(nil), (*apiextensions.JSONSchemaPropsOrArray)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(a.(*JSONSchemaPropsOrArray), b.(*apiextensions.JSONSchemaPropsOrArray), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaPropsOrArray)(nil), (*JSONSchemaPropsOrArray)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray(a.(*apiextensions.JSONSchemaPropsOrArray), b.(*JSONSchemaPropsOrArray), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSONSchemaPropsOrBool)(nil), (*apiextensions.JSONSchemaPropsOrBool)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(a.(*JSONSchemaPropsOrBool), b.(*apiextensions.JSONSchemaPropsOrBool), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaPropsOrBool)(nil), (*JSONSchemaPropsOrBool)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(a.(*apiextensions.JSONSchemaPropsOrBool), b.(*JSONSchemaPropsOrBool), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSONSchemaPropsOrStringArray)(nil), (*apiextensions.JSONSchemaPropsOrStringArray)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(a.(*JSONSchemaPropsOrStringArray), b.(*apiextensions.JSONSchemaPropsOrStringArray), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaPropsOrStringArray)(nil), (*JSONSchemaPropsOrStringArray)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray(a.(*apiextensions.JSONSchemaPropsOrStringArray), b.(*JSONSchemaPropsOrStringArray), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ServiceReference)(nil), (*apiextensions.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServiceReference_To_apiextensions_ServiceReference(a.(*ServiceReference), b.(*apiextensions.ServiceReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.ServiceReference)(nil), (*ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_ServiceReference_To_v1_ServiceReference(a.(*apiextensions.ServiceReference), b.(*ServiceReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WebhookClientConfig)(nil), (*apiextensions.WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(a.(*WebhookClientConfig), b.(*apiextensions.WebhookClientConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.WebhookClientConfig)(nil), (*WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig(a.(*apiextensions.WebhookClientConfig), b.(*WebhookClientConfig), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apiextensions.CustomResourceConversion)(nil), (*CustomResourceConversion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(a.(*apiextensions.CustomResourceConversion), b.(*CustomResourceConversion), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apiextensions.CustomResourceDefinitionSpec)(nil), (*CustomResourceDefinitionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(a.(*apiextensions.CustomResourceDefinitionSpec), b.(*CustomResourceDefinitionSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apiextensions.JSONSchemaProps)(nil), (*JSONSchemaProps)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(a.(*apiextensions.JSONSchemaProps), b.(*JSONSchemaProps), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apiextensions.JSON)(nil), (*JSON)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSON_To_v1_JSON(a.(*apiextensions.JSON), b.(*JSON), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*CustomResourceConversion)(nil), (*apiextensions.CustomResourceConversion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(a.(*CustomResourceConversion), b.(*apiextensions.CustomResourceConversion), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*CustomResourceDefinitionSpec)(nil), (*apiextensions.CustomResourceDefinitionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(a.(*CustomResourceDefinitionSpec), b.(*apiextensions.CustomResourceDefinitionSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*JSON)(nil), (*apiextensions.JSON)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_JSON_To_apiextensions_JSON(a.(*JSON), b.(*apiextensions.JSON), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in *CustomResourceColumnDefinition, out *apiextensions.CustomResourceColumnDefinition, s conversion.Scope) error { + out.Name = in.Name + out.Type = in.Type + out.Format = in.Format + out.Description = in.Description + out.Priority = in.Priority + out.JSONPath = in.JSONPath + return nil +} + +// Convert_v1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition is an autogenerated conversion function. +func Convert_v1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in *CustomResourceColumnDefinition, out *apiextensions.CustomResourceColumnDefinition, s conversion.Scope) error { + return autoConvert_v1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition(in *apiextensions.CustomResourceColumnDefinition, out *CustomResourceColumnDefinition, s conversion.Scope) error { + out.Name = in.Name + out.Type = in.Type + out.Format = in.Format + out.Description = in.Description + out.Priority = in.Priority + out.JSONPath = in.JSONPath + return nil +} + +// Convert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition(in *apiextensions.CustomResourceColumnDefinition, out *CustomResourceColumnDefinition, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition(in, out, s) +} + +func autoConvert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in *CustomResourceConversion, out *apiextensions.CustomResourceConversion, s conversion.Scope) error { + out.Strategy = apiextensions.ConversionStrategyType(in.Strategy) + // WARNING: in.Webhook requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(in *apiextensions.CustomResourceConversion, out *CustomResourceConversion, s conversion.Scope) error { + out.Strategy = ConversionStrategyType(in.Strategy) + // WARNING: in.WebhookClientConfig requires manual conversion: does not exist in peer-type + // WARNING: in.ConversionReviewVersions requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in *CustomResourceDefinition, out *apiextensions.CustomResourceDefinition, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition is an autogenerated conversion function. +func Convert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in *CustomResourceDefinition, out *apiextensions.CustomResourceDefinition, s conversion.Scope) error { + return autoConvert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(in *apiextensions.CustomResourceDefinition, out *CustomResourceDefinition, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(in *apiextensions.CustomResourceDefinition, out *CustomResourceDefinition, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(in, out, s) +} + +func autoConvert_v1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(in *CustomResourceDefinitionCondition, out *apiextensions.CustomResourceDefinitionCondition, s conversion.Scope) error { + out.Type = apiextensions.CustomResourceDefinitionConditionType(in.Type) + out.Status = apiextensions.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition is an autogenerated conversion function. +func Convert_v1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(in *CustomResourceDefinitionCondition, out *apiextensions.CustomResourceDefinitionCondition, s conversion.Scope) error { + return autoConvert_v1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionCondition_To_v1_CustomResourceDefinitionCondition(in *apiextensions.CustomResourceDefinitionCondition, out *CustomResourceDefinitionCondition, s conversion.Scope) error { + out.Type = CustomResourceDefinitionConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionCondition_To_v1_CustomResourceDefinitionCondition is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionCondition_To_v1_CustomResourceDefinitionCondition(in *apiextensions.CustomResourceDefinitionCondition, out *CustomResourceDefinitionCondition, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionCondition_To_v1_CustomResourceDefinitionCondition(in, out, s) +} + +func autoConvert_v1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(in *CustomResourceDefinitionList, out *apiextensions.CustomResourceDefinitionList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]apiextensions.CustomResourceDefinition, len(*in)) + for i := range *in { + if err := Convert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList is an autogenerated conversion function. +func Convert_v1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(in *CustomResourceDefinitionList, out *apiextensions.CustomResourceDefinitionList, s conversion.Scope) error { + return autoConvert_v1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionList_To_v1_CustomResourceDefinitionList(in *apiextensions.CustomResourceDefinitionList, out *CustomResourceDefinitionList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomResourceDefinition, len(*in)) + for i := range *in { + if err := Convert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionList_To_v1_CustomResourceDefinitionList is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionList_To_v1_CustomResourceDefinitionList(in *apiextensions.CustomResourceDefinitionList, out *CustomResourceDefinitionList, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionList_To_v1_CustomResourceDefinitionList(in, out, s) +} + +func autoConvert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(in *CustomResourceDefinitionNames, out *apiextensions.CustomResourceDefinitionNames, s conversion.Scope) error { + out.Plural = in.Plural + out.Singular = in.Singular + out.ShortNames = *(*[]string)(unsafe.Pointer(&in.ShortNames)) + out.Kind = in.Kind + out.ListKind = in.ListKind + out.Categories = *(*[]string)(unsafe.Pointer(&in.Categories)) + return nil +} + +// Convert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames is an autogenerated conversion function. +func Convert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(in *CustomResourceDefinitionNames, out *apiextensions.CustomResourceDefinitionNames, s conversion.Scope) error { + return autoConvert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(in *apiextensions.CustomResourceDefinitionNames, out *CustomResourceDefinitionNames, s conversion.Scope) error { + out.Plural = in.Plural + out.Singular = in.Singular + out.ShortNames = *(*[]string)(unsafe.Pointer(&in.ShortNames)) + out.Kind = in.Kind + out.ListKind = in.ListKind + out.Categories = *(*[]string)(unsafe.Pointer(&in.Categories)) + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(in *apiextensions.CustomResourceDefinitionNames, out *CustomResourceDefinitionNames, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(in, out, s) +} + +func autoConvert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in *CustomResourceDefinitionSpec, out *apiextensions.CustomResourceDefinitionSpec, s conversion.Scope) error { + out.Group = in.Group + if err := Convert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(&in.Names, &out.Names, s); err != nil { + return err + } + out.Scope = apiextensions.ResourceScope(in.Scope) + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]apiextensions.CustomResourceDefinitionVersion, len(*in)) + for i := range *in { + if err := Convert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Versions = nil + } + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(apiextensions.CustomResourceConversion) + if err := Convert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(*in, *out, s); err != nil { + return err + } + } else { + out.Conversion = nil + } + if err := metav1.Convert_bool_To_Pointer_bool(&in.PreserveUnknownFields, &out.PreserveUnknownFields, s); err != nil { + return err + } + return nil +} + +func autoConvert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(in *apiextensions.CustomResourceDefinitionSpec, out *CustomResourceDefinitionSpec, s conversion.Scope) error { + out.Group = in.Group + // WARNING: in.Version requires manual conversion: does not exist in peer-type + if err := Convert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(&in.Names, &out.Names, s); err != nil { + return err + } + out.Scope = ResourceScope(in.Scope) + // WARNING: in.Validation requires manual conversion: does not exist in peer-type + // WARNING: in.Subresources requires manual conversion: does not exist in peer-type + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]CustomResourceDefinitionVersion, len(*in)) + for i := range *in { + if err := Convert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Versions = nil + } + // WARNING: in.AdditionalPrinterColumns requires manual conversion: does not exist in peer-type + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(CustomResourceConversion) + if err := Convert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(*in, *out, s); err != nil { + return err + } + } else { + out.Conversion = nil + } + if err := metav1.Convert_Pointer_bool_To_bool(&in.PreserveUnknownFields, &out.PreserveUnknownFields, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(in *CustomResourceDefinitionStatus, out *apiextensions.CustomResourceDefinitionStatus, s conversion.Scope) error { + out.Conditions = *(*[]apiextensions.CustomResourceDefinitionCondition)(unsafe.Pointer(&in.Conditions)) + if err := Convert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(&in.AcceptedNames, &out.AcceptedNames, s); err != nil { + return err + } + out.StoredVersions = *(*[]string)(unsafe.Pointer(&in.StoredVersions)) + return nil +} + +// Convert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus is an autogenerated conversion function. +func Convert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(in *CustomResourceDefinitionStatus, out *apiextensions.CustomResourceDefinitionStatus, s conversion.Scope) error { + return autoConvert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus(in *apiextensions.CustomResourceDefinitionStatus, out *CustomResourceDefinitionStatus, s conversion.Scope) error { + out.Conditions = *(*[]CustomResourceDefinitionCondition)(unsafe.Pointer(&in.Conditions)) + if err := Convert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(&in.AcceptedNames, &out.AcceptedNames, s); err != nil { + return err + } + out.StoredVersions = *(*[]string)(unsafe.Pointer(&in.StoredVersions)) + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus(in *apiextensions.CustomResourceDefinitionStatus, out *CustomResourceDefinitionStatus, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus(in, out, s) +} + +func autoConvert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in *CustomResourceDefinitionVersion, out *apiextensions.CustomResourceDefinitionVersion, s conversion.Scope) error { + out.Name = in.Name + out.Served = in.Served + out.Storage = in.Storage + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(apiextensions.CustomResourceValidation) + if err := Convert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Subresources = (*apiextensions.CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) + out.AdditionalPrinterColumns = *(*[]apiextensions.CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + return nil +} + +// Convert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion is an autogenerated conversion function. +func Convert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in *CustomResourceDefinitionVersion, out *apiextensions.CustomResourceDefinitionVersion, s conversion.Scope) error { + return autoConvert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion(in *apiextensions.CustomResourceDefinitionVersion, out *CustomResourceDefinitionVersion, s conversion.Scope) error { + out.Name = in.Name + out.Served = in.Served + out.Storage = in.Storage + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(CustomResourceValidation) + if err := Convert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Subresources = (*CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) + out.AdditionalPrinterColumns = *(*[]CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion(in *apiextensions.CustomResourceDefinitionVersion, out *CustomResourceDefinitionVersion, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion(in, out, s) +} + +func autoConvert_v1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in *CustomResourceSubresourceScale, out *apiextensions.CustomResourceSubresourceScale, s conversion.Scope) error { + out.SpecReplicasPath = in.SpecReplicasPath + out.StatusReplicasPath = in.StatusReplicasPath + out.LabelSelectorPath = (*string)(unsafe.Pointer(in.LabelSelectorPath)) + return nil +} + +// Convert_v1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale is an autogenerated conversion function. +func Convert_v1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in *CustomResourceSubresourceScale, out *apiextensions.CustomResourceSubresourceScale, s conversion.Scope) error { + return autoConvert_v1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceSubresourceScale_To_v1_CustomResourceSubresourceScale(in *apiextensions.CustomResourceSubresourceScale, out *CustomResourceSubresourceScale, s conversion.Scope) error { + out.SpecReplicasPath = in.SpecReplicasPath + out.StatusReplicasPath = in.StatusReplicasPath + out.LabelSelectorPath = (*string)(unsafe.Pointer(in.LabelSelectorPath)) + return nil +} + +// Convert_apiextensions_CustomResourceSubresourceScale_To_v1_CustomResourceSubresourceScale is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceSubresourceScale_To_v1_CustomResourceSubresourceScale(in *apiextensions.CustomResourceSubresourceScale, out *CustomResourceSubresourceScale, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceSubresourceScale_To_v1_CustomResourceSubresourceScale(in, out, s) +} + +func autoConvert_v1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(in *CustomResourceSubresourceStatus, out *apiextensions.CustomResourceSubresourceStatus, s conversion.Scope) error { + return nil +} + +// Convert_v1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus is an autogenerated conversion function. +func Convert_v1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(in *CustomResourceSubresourceStatus, out *apiextensions.CustomResourceSubresourceStatus, s conversion.Scope) error { + return autoConvert_v1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceSubresourceStatus_To_v1_CustomResourceSubresourceStatus(in *apiextensions.CustomResourceSubresourceStatus, out *CustomResourceSubresourceStatus, s conversion.Scope) error { + return nil +} + +// Convert_apiextensions_CustomResourceSubresourceStatus_To_v1_CustomResourceSubresourceStatus is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceSubresourceStatus_To_v1_CustomResourceSubresourceStatus(in *apiextensions.CustomResourceSubresourceStatus, out *CustomResourceSubresourceStatus, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceSubresourceStatus_To_v1_CustomResourceSubresourceStatus(in, out, s) +} + +func autoConvert_v1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(in *CustomResourceSubresources, out *apiextensions.CustomResourceSubresources, s conversion.Scope) error { + out.Status = (*apiextensions.CustomResourceSubresourceStatus)(unsafe.Pointer(in.Status)) + out.Scale = (*apiextensions.CustomResourceSubresourceScale)(unsafe.Pointer(in.Scale)) + return nil +} + +// Convert_v1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources is an autogenerated conversion function. +func Convert_v1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(in *CustomResourceSubresources, out *apiextensions.CustomResourceSubresources, s conversion.Scope) error { + return autoConvert_v1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources(in *apiextensions.CustomResourceSubresources, out *CustomResourceSubresources, s conversion.Scope) error { + out.Status = (*CustomResourceSubresourceStatus)(unsafe.Pointer(in.Status)) + out.Scale = (*CustomResourceSubresourceScale)(unsafe.Pointer(in.Scale)) + return nil +} + +// Convert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources(in *apiextensions.CustomResourceSubresources, out *CustomResourceSubresources, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources(in, out, s) +} + +func autoConvert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(in *CustomResourceValidation, out *apiextensions.CustomResourceValidation, s conversion.Scope) error { + if in.OpenAPIV3Schema != nil { + in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.OpenAPIV3Schema = nil + } + return nil +} + +// Convert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation is an autogenerated conversion function. +func Convert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(in *CustomResourceValidation, out *apiextensions.CustomResourceValidation, s conversion.Scope) error { + return autoConvert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(in *apiextensions.CustomResourceValidation, out *CustomResourceValidation, s conversion.Scope) error { + if in.OpenAPIV3Schema != nil { + in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.OpenAPIV3Schema = nil + } + return nil +} + +// Convert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(in *apiextensions.CustomResourceValidation, out *CustomResourceValidation, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(in, out, s) +} + +func autoConvert_v1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(in *ExternalDocumentation, out *apiextensions.ExternalDocumentation, s conversion.Scope) error { + out.Description = in.Description + out.URL = in.URL + return nil +} + +// Convert_v1_ExternalDocumentation_To_apiextensions_ExternalDocumentation is an autogenerated conversion function. +func Convert_v1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(in *ExternalDocumentation, out *apiextensions.ExternalDocumentation, s conversion.Scope) error { + return autoConvert_v1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(in, out, s) +} + +func autoConvert_apiextensions_ExternalDocumentation_To_v1_ExternalDocumentation(in *apiextensions.ExternalDocumentation, out *ExternalDocumentation, s conversion.Scope) error { + out.Description = in.Description + out.URL = in.URL + return nil +} + +// Convert_apiextensions_ExternalDocumentation_To_v1_ExternalDocumentation is an autogenerated conversion function. +func Convert_apiextensions_ExternalDocumentation_To_v1_ExternalDocumentation(in *apiextensions.ExternalDocumentation, out *ExternalDocumentation, s conversion.Scope) error { + return autoConvert_apiextensions_ExternalDocumentation_To_v1_ExternalDocumentation(in, out, s) +} + +func autoConvert_v1_JSON_To_apiextensions_JSON(in *JSON, out *apiextensions.JSON, s conversion.Scope) error { + // WARNING: in.Raw requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_apiextensions_JSON_To_v1_JSON(in *apiextensions.JSON, out *JSON, s conversion.Scope) error { + // FIXME: Type apiextensions.JSON is unsupported. + return nil +} + +func autoConvert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in *JSONSchemaProps, out *apiextensions.JSONSchemaProps, s conversion.Scope) error { + out.ID = in.ID + out.Schema = apiextensions.JSONSchemaURL(in.Schema) + out.Ref = (*string)(unsafe.Pointer(in.Ref)) + out.Description = in.Description + out.Type = in.Type + out.Format = in.Format + out.Title = in.Title + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(apiextensions.JSON) + if err := Convert_v1_JSON_To_apiextensions_JSON(*in, *out, s); err != nil { + return err + } + } else { + out.Default = nil + } + out.Maximum = (*float64)(unsafe.Pointer(in.Maximum)) + out.ExclusiveMaximum = in.ExclusiveMaximum + out.Minimum = (*float64)(unsafe.Pointer(in.Minimum)) + out.ExclusiveMinimum = in.ExclusiveMinimum + out.MaxLength = (*int64)(unsafe.Pointer(in.MaxLength)) + out.MinLength = (*int64)(unsafe.Pointer(in.MinLength)) + out.Pattern = in.Pattern + out.MaxItems = (*int64)(unsafe.Pointer(in.MaxItems)) + out.MinItems = (*int64)(unsafe.Pointer(in.MinItems)) + out.UniqueItems = in.UniqueItems + out.MultipleOf = (*float64)(unsafe.Pointer(in.MultipleOf)) + if in.Enum != nil { + in, out := &in.Enum, &out.Enum + *out = make([]apiextensions.JSON, len(*in)) + for i := range *in { + if err := Convert_v1_JSON_To_apiextensions_JSON(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Enum = nil + } + out.MaxProperties = (*int64)(unsafe.Pointer(in.MaxProperties)) + out.MinProperties = (*int64)(unsafe.Pointer(in.MinProperties)) + out.Required = *(*[]string)(unsafe.Pointer(&in.Required)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = new(apiextensions.JSONSchemaPropsOrArray) + if err := Convert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(*in, *out, s); err != nil { + return err + } + } else { + out.Items = nil + } + if in.AllOf != nil { + in, out := &in.AllOf, &out.AllOf + *out = make([]apiextensions.JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.AllOf = nil + } + if in.OneOf != nil { + in, out := &in.OneOf, &out.OneOf + *out = make([]apiextensions.JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.OneOf = nil + } + if in.AnyOf != nil { + in, out := &in.AnyOf, &out.AnyOf + *out = make([]apiextensions.JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.AnyOf = nil + } + if in.Not != nil { + in, out := &in.Not, &out.Not + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Not = nil + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]apiextensions.JSONSchemaProps, len(*in)) + for key, val := range *in { + newVal := new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Properties = nil + } + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = new(apiextensions.JSONSchemaPropsOrBool) + if err := Convert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(*in, *out, s); err != nil { + return err + } + } else { + out.AdditionalProperties = nil + } + if in.PatternProperties != nil { + in, out := &in.PatternProperties, &out.PatternProperties + *out = make(map[string]apiextensions.JSONSchemaProps, len(*in)) + for key, val := range *in { + newVal := new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.PatternProperties = nil + } + if in.Dependencies != nil { + in, out := &in.Dependencies, &out.Dependencies + *out = make(apiextensions.JSONSchemaDependencies, len(*in)) + for key, val := range *in { + newVal := new(apiextensions.JSONSchemaPropsOrStringArray) + if err := Convert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Dependencies = nil + } + if in.AdditionalItems != nil { + in, out := &in.AdditionalItems, &out.AdditionalItems + *out = new(apiextensions.JSONSchemaPropsOrBool) + if err := Convert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(*in, *out, s); err != nil { + return err + } + } else { + out.AdditionalItems = nil + } + if in.Definitions != nil { + in, out := &in.Definitions, &out.Definitions + *out = make(apiextensions.JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + newVal := new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Definitions = nil + } + out.ExternalDocs = (*apiextensions.ExternalDocumentation)(unsafe.Pointer(in.ExternalDocs)) + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = new(apiextensions.JSON) + if err := Convert_v1_JSON_To_apiextensions_JSON(*in, *out, s); err != nil { + return err + } + } else { + out.Example = nil + } + out.Nullable = in.Nullable + out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) + out.XEmbeddedResource = in.XEmbeddedResource + out.XIntOrString = in.XIntOrString + out.XListMapKeys = *(*[]string)(unsafe.Pointer(&in.XListMapKeys)) + out.XListType = (*string)(unsafe.Pointer(in.XListType)) + return nil +} + +// Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps is an autogenerated conversion function. +func Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in *JSONSchemaProps, out *apiextensions.JSONSchemaProps, s conversion.Scope) error { + return autoConvert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in, out, s) +} + +func autoConvert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(in *apiextensions.JSONSchemaProps, out *JSONSchemaProps, s conversion.Scope) error { + out.ID = in.ID + out.Schema = JSONSchemaURL(in.Schema) + out.Ref = (*string)(unsafe.Pointer(in.Ref)) + out.Description = in.Description + out.Type = in.Type + out.Nullable = in.Nullable + out.Format = in.Format + out.Title = in.Title + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(JSON) + if err := Convert_apiextensions_JSON_To_v1_JSON(*in, *out, s); err != nil { + return err + } + } else { + out.Default = nil + } + out.Maximum = (*float64)(unsafe.Pointer(in.Maximum)) + out.ExclusiveMaximum = in.ExclusiveMaximum + out.Minimum = (*float64)(unsafe.Pointer(in.Minimum)) + out.ExclusiveMinimum = in.ExclusiveMinimum + out.MaxLength = (*int64)(unsafe.Pointer(in.MaxLength)) + out.MinLength = (*int64)(unsafe.Pointer(in.MinLength)) + out.Pattern = in.Pattern + out.MaxItems = (*int64)(unsafe.Pointer(in.MaxItems)) + out.MinItems = (*int64)(unsafe.Pointer(in.MinItems)) + out.UniqueItems = in.UniqueItems + out.MultipleOf = (*float64)(unsafe.Pointer(in.MultipleOf)) + if in.Enum != nil { + in, out := &in.Enum, &out.Enum + *out = make([]JSON, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSON_To_v1_JSON(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Enum = nil + } + out.MaxProperties = (*int64)(unsafe.Pointer(in.MaxProperties)) + out.MinProperties = (*int64)(unsafe.Pointer(in.MinProperties)) + out.Required = *(*[]string)(unsafe.Pointer(&in.Required)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = new(JSONSchemaPropsOrArray) + if err := Convert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray(*in, *out, s); err != nil { + return err + } + } else { + out.Items = nil + } + if in.AllOf != nil { + in, out := &in.AllOf, &out.AllOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.AllOf = nil + } + if in.OneOf != nil { + in, out := &in.OneOf, &out.OneOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.OneOf = nil + } + if in.AnyOf != nil { + in, out := &in.AnyOf, &out.AnyOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.AnyOf = nil + } + if in.Not != nil { + in, out := &in.Not, &out.Not + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Not = nil + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + newVal := new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Properties = nil + } + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = new(JSONSchemaPropsOrBool) + if err := Convert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(*in, *out, s); err != nil { + return err + } + } else { + out.AdditionalProperties = nil + } + if in.PatternProperties != nil { + in, out := &in.PatternProperties, &out.PatternProperties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + newVal := new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.PatternProperties = nil + } + if in.Dependencies != nil { + in, out := &in.Dependencies, &out.Dependencies + *out = make(JSONSchemaDependencies, len(*in)) + for key, val := range *in { + newVal := new(JSONSchemaPropsOrStringArray) + if err := Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Dependencies = nil + } + if in.AdditionalItems != nil { + in, out := &in.AdditionalItems, &out.AdditionalItems + *out = new(JSONSchemaPropsOrBool) + if err := Convert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(*in, *out, s); err != nil { + return err + } + } else { + out.AdditionalItems = nil + } + if in.Definitions != nil { + in, out := &in.Definitions, &out.Definitions + *out = make(JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + newVal := new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Definitions = nil + } + out.ExternalDocs = (*ExternalDocumentation)(unsafe.Pointer(in.ExternalDocs)) + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = new(JSON) + if err := Convert_apiextensions_JSON_To_v1_JSON(*in, *out, s); err != nil { + return err + } + } else { + out.Example = nil + } + out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) + out.XEmbeddedResource = in.XEmbeddedResource + out.XIntOrString = in.XIntOrString + out.XListMapKeys = *(*[]string)(unsafe.Pointer(&in.XListMapKeys)) + out.XListType = (*string)(unsafe.Pointer(in.XListType)) + return nil +} + +func autoConvert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(in *JSONSchemaPropsOrArray, out *apiextensions.JSONSchemaPropsOrArray, s conversion.Scope) error { + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + if in.JSONSchemas != nil { + in, out := &in.JSONSchemas, &out.JSONSchemas + *out = make([]apiextensions.JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.JSONSchemas = nil + } + return nil +} + +// Convert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray is an autogenerated conversion function. +func Convert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(in *JSONSchemaPropsOrArray, out *apiextensions.JSONSchemaPropsOrArray, s conversion.Scope) error { + return autoConvert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(in, out, s) +} + +func autoConvert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray(in *apiextensions.JSONSchemaPropsOrArray, out *JSONSchemaPropsOrArray, s conversion.Scope) error { + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + if in.JSONSchemas != nil { + in, out := &in.JSONSchemas, &out.JSONSchemas + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.JSONSchemas = nil + } + return nil +} + +// Convert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray is an autogenerated conversion function. +func Convert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray(in *apiextensions.JSONSchemaPropsOrArray, out *JSONSchemaPropsOrArray, s conversion.Scope) error { + return autoConvert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray(in, out, s) +} + +func autoConvert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(in *JSONSchemaPropsOrBool, out *apiextensions.JSONSchemaPropsOrBool, s conversion.Scope) error { + out.Allows = in.Allows + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + return nil +} + +// Convert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool is an autogenerated conversion function. +func Convert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(in *JSONSchemaPropsOrBool, out *apiextensions.JSONSchemaPropsOrBool, s conversion.Scope) error { + return autoConvert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(in, out, s) +} + +func autoConvert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(in *apiextensions.JSONSchemaPropsOrBool, out *JSONSchemaPropsOrBool, s conversion.Scope) error { + out.Allows = in.Allows + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + return nil +} + +// Convert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool is an autogenerated conversion function. +func Convert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(in *apiextensions.JSONSchemaPropsOrBool, out *JSONSchemaPropsOrBool, s conversion.Scope) error { + return autoConvert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(in, out, s) +} + +func autoConvert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(in *JSONSchemaPropsOrStringArray, out *apiextensions.JSONSchemaPropsOrStringArray, s conversion.Scope) error { + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Property = *(*[]string)(unsafe.Pointer(&in.Property)) + return nil +} + +// Convert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray is an autogenerated conversion function. +func Convert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(in *JSONSchemaPropsOrStringArray, out *apiextensions.JSONSchemaPropsOrStringArray, s conversion.Scope) error { + return autoConvert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(in, out, s) +} + +func autoConvert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray(in *apiextensions.JSONSchemaPropsOrStringArray, out *JSONSchemaPropsOrStringArray, s conversion.Scope) error { + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Property = *(*[]string)(unsafe.Pointer(&in.Property)) + return nil +} + +// Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray is an autogenerated conversion function. +func Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray(in *apiextensions.JSONSchemaPropsOrStringArray, out *JSONSchemaPropsOrStringArray, s conversion.Scope) error { + return autoConvert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray(in, out, s) +} + +func autoConvert_v1_ServiceReference_To_apiextensions_ServiceReference(in *ServiceReference, out *apiextensions.ServiceReference, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + out.Path = (*string)(unsafe.Pointer(in.Path)) + if err := metav1.Convert_Pointer_int32_To_int32(&in.Port, &out.Port, s); err != nil { + return err + } + return nil +} + +// Convert_v1_ServiceReference_To_apiextensions_ServiceReference is an autogenerated conversion function. +func Convert_v1_ServiceReference_To_apiextensions_ServiceReference(in *ServiceReference, out *apiextensions.ServiceReference, s conversion.Scope) error { + return autoConvert_v1_ServiceReference_To_apiextensions_ServiceReference(in, out, s) +} + +func autoConvert_apiextensions_ServiceReference_To_v1_ServiceReference(in *apiextensions.ServiceReference, out *ServiceReference, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + out.Path = (*string)(unsafe.Pointer(in.Path)) + if err := metav1.Convert_int32_To_Pointer_int32(&in.Port, &out.Port, s); err != nil { + return err + } + return nil +} + +// Convert_apiextensions_ServiceReference_To_v1_ServiceReference is an autogenerated conversion function. +func Convert_apiextensions_ServiceReference_To_v1_ServiceReference(in *apiextensions.ServiceReference, out *ServiceReference, s conversion.Scope) error { + return autoConvert_apiextensions_ServiceReference_To_v1_ServiceReference(in, out, s) +} + +func autoConvert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in *WebhookClientConfig, out *apiextensions.WebhookClientConfig, s conversion.Scope) error { + out.URL = (*string)(unsafe.Pointer(in.URL)) + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(apiextensions.ServiceReference) + if err := Convert_v1_ServiceReference_To_apiextensions_ServiceReference(*in, *out, s); err != nil { + return err + } + } else { + out.Service = nil + } + out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) + return nil +} + +// Convert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig is an autogenerated conversion function. +func Convert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in *WebhookClientConfig, out *apiextensions.WebhookClientConfig, s conversion.Scope) error { + return autoConvert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in, out, s) +} + +func autoConvert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig(in *apiextensions.WebhookClientConfig, out *WebhookClientConfig, s conversion.Scope) error { + out.URL = (*string)(unsafe.Pointer(in.URL)) + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + if err := Convert_apiextensions_ServiceReference_To_v1_ServiceReference(*in, *out, s); err != nil { + return err + } + } else { + out.Service = nil + } + out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) + return nil +} + +// Convert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig is an autogenerated conversion function. +func Convert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig(in *apiextensions.WebhookClientConfig, out *WebhookClientConfig, s conversion.Scope) error { + return autoConvert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig(in, out, s) +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..5fa854585 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go @@ -0,0 +1,663 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConversionRequest) DeepCopyInto(out *ConversionRequest) { + *out = *in + if in.Objects != nil { + in, out := &in.Objects, &out.Objects + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionRequest. +func (in *ConversionRequest) DeepCopy() *ConversionRequest { + if in == nil { + return nil + } + out := new(ConversionRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConversionResponse) DeepCopyInto(out *ConversionResponse) { + *out = *in + if in.ConvertedObjects != nil { + in, out := &in.ConvertedObjects, &out.ConvertedObjects + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Result.DeepCopyInto(&out.Result) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionResponse. +func (in *ConversionResponse) DeepCopy() *ConversionResponse { + if in == nil { + return nil + } + out := new(ConversionResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConversionReview) DeepCopyInto(out *ConversionReview) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(ConversionRequest) + (*in).DeepCopyInto(*out) + } + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = new(ConversionResponse) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionReview. +func (in *ConversionReview) DeepCopy() *ConversionReview { + if in == nil { + return nil + } + out := new(ConversionReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConversionReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceColumnDefinition) DeepCopyInto(out *CustomResourceColumnDefinition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceColumnDefinition. +func (in *CustomResourceColumnDefinition) DeepCopy() *CustomResourceColumnDefinition { + if in == nil { + return nil + } + out := new(CustomResourceColumnDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceConversion) DeepCopyInto(out *CustomResourceConversion) { + *out = *in + if in.Webhook != nil { + in, out := &in.Webhook, &out.Webhook + *out = new(WebhookConversion) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceConversion. +func (in *CustomResourceConversion) DeepCopy() *CustomResourceConversion { + if in == nil { + return nil + } + out := new(CustomResourceConversion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinition) DeepCopyInto(out *CustomResourceDefinition) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinition. +func (in *CustomResourceDefinition) DeepCopy() *CustomResourceDefinition { + if in == nil { + return nil + } + out := new(CustomResourceDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomResourceDefinition) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionCondition) DeepCopyInto(out *CustomResourceDefinitionCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionCondition. +func (in *CustomResourceDefinitionCondition) DeepCopy() *CustomResourceDefinitionCondition { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionList) DeepCopyInto(out *CustomResourceDefinitionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomResourceDefinition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionList. +func (in *CustomResourceDefinitionList) DeepCopy() *CustomResourceDefinitionList { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomResourceDefinitionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionNames) DeepCopyInto(out *CustomResourceDefinitionNames) { + *out = *in + if in.ShortNames != nil { + in, out := &in.ShortNames, &out.ShortNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionNames. +func (in *CustomResourceDefinitionNames) DeepCopy() *CustomResourceDefinitionNames { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionNames) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionSpec) DeepCopyInto(out *CustomResourceDefinitionSpec) { + *out = *in + in.Names.DeepCopyInto(&out.Names) + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]CustomResourceDefinitionVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(CustomResourceConversion) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionSpec. +func (in *CustomResourceDefinitionSpec) DeepCopy() *CustomResourceDefinitionSpec { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionStatus) DeepCopyInto(out *CustomResourceDefinitionStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]CustomResourceDefinitionCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.AcceptedNames.DeepCopyInto(&out.AcceptedNames) + if in.StoredVersions != nil { + in, out := &in.StoredVersions, &out.StoredVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionStatus. +func (in *CustomResourceDefinitionStatus) DeepCopy() *CustomResourceDefinitionStatus { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionVersion) DeepCopyInto(out *CustomResourceDefinitionVersion) { + *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(CustomResourceValidation) + (*in).DeepCopyInto(*out) + } + if in.Subresources != nil { + in, out := &in.Subresources, &out.Subresources + *out = new(CustomResourceSubresources) + (*in).DeepCopyInto(*out) + } + if in.AdditionalPrinterColumns != nil { + in, out := &in.AdditionalPrinterColumns, &out.AdditionalPrinterColumns + *out = make([]CustomResourceColumnDefinition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionVersion. +func (in *CustomResourceDefinitionVersion) DeepCopy() *CustomResourceDefinitionVersion { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceSubresourceScale) DeepCopyInto(out *CustomResourceSubresourceScale) { + *out = *in + if in.LabelSelectorPath != nil { + in, out := &in.LabelSelectorPath, &out.LabelSelectorPath + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresourceScale. +func (in *CustomResourceSubresourceScale) DeepCopy() *CustomResourceSubresourceScale { + if in == nil { + return nil + } + out := new(CustomResourceSubresourceScale) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceSubresourceStatus) DeepCopyInto(out *CustomResourceSubresourceStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresourceStatus. +func (in *CustomResourceSubresourceStatus) DeepCopy() *CustomResourceSubresourceStatus { + if in == nil { + return nil + } + out := new(CustomResourceSubresourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceSubresources) DeepCopyInto(out *CustomResourceSubresources) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(CustomResourceSubresourceStatus) + **out = **in + } + if in.Scale != nil { + in, out := &in.Scale, &out.Scale + *out = new(CustomResourceSubresourceScale) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresources. +func (in *CustomResourceSubresources) DeepCopy() *CustomResourceSubresources { + if in == nil { + return nil + } + out := new(CustomResourceSubresources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceValidation) DeepCopyInto(out *CustomResourceValidation) { + *out = *in + if in.OpenAPIV3Schema != nil { + in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceValidation. +func (in *CustomResourceValidation) DeepCopy() *CustomResourceValidation { + if in == nil { + return nil + } + out := new(CustomResourceValidation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalDocumentation) DeepCopyInto(out *ExternalDocumentation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalDocumentation. +func (in *ExternalDocumentation) DeepCopy() *ExternalDocumentation { + if in == nil { + return nil + } + out := new(ExternalDocumentation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSON) DeepCopyInto(out *JSON) { + *out = *in + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSON. +func (in *JSON) DeepCopy() *JSON { + if in == nil { + return nil + } + out := new(JSON) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in JSONSchemaDefinitions) DeepCopyInto(out *JSONSchemaDefinitions) { + { + in := &in + *out = make(JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaDefinitions. +func (in JSONSchemaDefinitions) DeepCopy() JSONSchemaDefinitions { + if in == nil { + return nil + } + out := new(JSONSchemaDefinitions) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in JSONSchemaDependencies) DeepCopyInto(out *JSONSchemaDependencies) { + { + in := &in + *out = make(JSONSchemaDependencies, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaDependencies. +func (in JSONSchemaDependencies) DeepCopy() JSONSchemaDependencies { + if in == nil { + return nil + } + out := new(JSONSchemaDependencies) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaProps) DeepCopyInto(out *JSONSchemaProps) { + clone := in.DeepCopy() + *out = *clone + return +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaPropsOrArray) DeepCopyInto(out *JSONSchemaPropsOrArray) { + *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = (*in).DeepCopy() + } + if in.JSONSchemas != nil { + in, out := &in.JSONSchemas, &out.JSONSchemas + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrArray. +func (in *JSONSchemaPropsOrArray) DeepCopy() *JSONSchemaPropsOrArray { + if in == nil { + return nil + } + out := new(JSONSchemaPropsOrArray) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaPropsOrBool) DeepCopyInto(out *JSONSchemaPropsOrBool) { + *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrBool. +func (in *JSONSchemaPropsOrBool) DeepCopy() *JSONSchemaPropsOrBool { + if in == nil { + return nil + } + out := new(JSONSchemaPropsOrBool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaPropsOrStringArray) DeepCopyInto(out *JSONSchemaPropsOrStringArray) { + *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = (*in).DeepCopy() + } + if in.Property != nil { + in, out := &in.Property, &out.Property + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrStringArray. +func (in *JSONSchemaPropsOrStringArray) DeepCopy() *JSONSchemaPropsOrStringArray { + if in == nil { + return nil + } + out := new(JSONSchemaPropsOrStringArray) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConversion) DeepCopyInto(out *WebhookConversion) { + *out = *in + if in.ClientConfig != nil { + in, out := &in.ClientConfig, &out.ClientConfig + *out = new(WebhookClientConfig) + (*in).DeepCopyInto(*out) + } + if in.ConversionReviewVersions != nil { + in, out := &in.ConversionReviewVersions, &out.ConversionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConversion. +func (in *WebhookConversion) DeepCopy() *WebhookConversion { + if in == nil { + return nil + } + out := new(WebhookConversion) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.defaults.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.defaults.go new file mode 100644 index 000000000..ed03cdd88 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.defaults.go @@ -0,0 +1,57 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&CustomResourceDefinition{}, func(obj interface{}) { SetObjectDefaults_CustomResourceDefinition(obj.(*CustomResourceDefinition)) }) + scheme.AddTypeDefaultingFunc(&CustomResourceDefinitionList{}, func(obj interface{}) { + SetObjectDefaults_CustomResourceDefinitionList(obj.(*CustomResourceDefinitionList)) + }) + return nil +} + +func SetObjectDefaults_CustomResourceDefinition(in *CustomResourceDefinition) { + SetDefaults_CustomResourceDefinition(in) + SetDefaults_CustomResourceDefinitionSpec(&in.Spec) + if in.Spec.Conversion != nil { + if in.Spec.Conversion.Webhook != nil { + if in.Spec.Conversion.Webhook.ClientConfig != nil { + if in.Spec.Conversion.Webhook.ClientConfig.Service != nil { + SetDefaults_ServiceReference(in.Spec.Conversion.Webhook.ClientConfig.Service) + } + } + } + } +} + +func SetObjectDefaults_CustomResourceDefinitionList(in *CustomResourceDefinitionList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_CustomResourceDefinition(a) + } +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go index f6a114e2b..a4560dc5f 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go @@ -234,5 +234,31 @@ func (in *JSONSchemaProps) DeepCopy() *JSONSchemaProps { } } + if in.XPreserveUnknownFields != nil { + in, out := &in.XPreserveUnknownFields, &out.XPreserveUnknownFields + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + + if in.XListMapKeys != nil { + in, out := &in.XListMapKeys, &out.XListMapKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + + if in.XListType != nil { + in, out := &in.XListType, &out.XListType + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return out } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go index 7bea2d698..1a9c2238e 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go @@ -20,13 +20,11 @@ import ( "strings" "k8s.io/apimachinery/pkg/runtime" + utilpointer "k8s.io/utils/pointer" ) func addDefaultingFuncs(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&CustomResourceDefinition{}, func(obj interface{}) { SetDefaults_CustomResourceDefinition(obj.(*CustomResourceDefinition)) }) - // TODO figure out why I can't seem to get my defaulter generated - // return RegisterDefaults(scheme) - return nil + return RegisterDefaults(scheme) } func SetDefaults_CustomResourceDefinition(obj *CustomResourceDefinition) { @@ -71,14 +69,14 @@ func SetDefaults_CustomResourceDefinitionSpec(obj *CustomResourceDefinitionSpec) if obj.Conversion.Strategy == WebhookConverter && len(obj.Conversion.ConversionReviewVersions) == 0 { obj.Conversion.ConversionReviewVersions = []string{SchemeGroupVersion.Version} } + if obj.PreserveUnknownFields == nil { + obj.PreserveUnknownFields = utilpointer.BoolPtr(true) + } } -// hasPerVersionColumns returns true if a CRD uses per-version columns. -func hasPerVersionColumns(versions []CustomResourceDefinitionVersion) bool { - for _, v := range versions { - if len(v.AdditionalPrinterColumns) > 0 { - return true - } +// SetDefaults_ServiceReference sets defaults for Webhook's ServiceReference +func SetDefaults_ServiceReference(obj *ServiceReference) { + if obj.Port == nil { + obj.Port = utilpointer.Int32Ptr(443) } - return false } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go index ea46688ec..c28384c22 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go @@ -17,55 +17,25 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto - - It has these top-level messages: - ConversionRequest - ConversionResponse - ConversionReview - CustomResourceColumnDefinition - CustomResourceConversion - CustomResourceDefinition - CustomResourceDefinitionCondition - CustomResourceDefinitionList - CustomResourceDefinitionNames - CustomResourceDefinitionSpec - CustomResourceDefinitionStatus - CustomResourceDefinitionVersion - CustomResourceSubresourceScale - CustomResourceSubresourceStatus - CustomResourceSubresources - CustomResourceValidation - ExternalDocumentation - JSON - JSONSchemaProps - JSONSchemaPropsOrArray - JSONSchemaPropsOrBool - JSONSchemaPropsOrStringArray - ServiceReference - WebhookClientConfig -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + encoding_binary "encoding/binary" + fmt "fmt" -import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" + io "io" -import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + runtime "k8s.io/apimachinery/pkg/runtime" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" -import encoding_binary "encoding/binary" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import strings "strings" -import reflect "reflect" - -import io "io" + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -78,129 +48,677 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ConversionRequest) Reset() { *m = ConversionRequest{} } -func (*ConversionRequest) ProtoMessage() {} -func (*ConversionRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ConversionRequest) Reset() { *m = ConversionRequest{} } +func (*ConversionRequest) ProtoMessage() {} +func (*ConversionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{0} +} +func (m *ConversionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionRequest.Merge(m, src) +} +func (m *ConversionRequest) XXX_Size() int { + return m.Size() +} +func (m *ConversionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ConversionRequest proto.InternalMessageInfo + +func (m *ConversionResponse) Reset() { *m = ConversionResponse{} } +func (*ConversionResponse) ProtoMessage() {} +func (*ConversionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{1} +} +func (m *ConversionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionResponse.Merge(m, src) +} +func (m *ConversionResponse) XXX_Size() int { + return m.Size() +} +func (m *ConversionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ConversionResponse proto.InternalMessageInfo -func (m *ConversionResponse) Reset() { *m = ConversionResponse{} } -func (*ConversionResponse) ProtoMessage() {} -func (*ConversionResponse) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *ConversionReview) Reset() { *m = ConversionReview{} } +func (*ConversionReview) ProtoMessage() {} +func (*ConversionReview) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{2} +} +func (m *ConversionReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionReview.Merge(m, src) +} +func (m *ConversionReview) XXX_Size() int { + return m.Size() +} +func (m *ConversionReview) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionReview.DiscardUnknown(m) +} -func (m *ConversionReview) Reset() { *m = ConversionReview{} } -func (*ConversionReview) ProtoMessage() {} -func (*ConversionReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_ConversionReview proto.InternalMessageInfo func (m *CustomResourceColumnDefinition) Reset() { *m = CustomResourceColumnDefinition{} } func (*CustomResourceColumnDefinition) ProtoMessage() {} func (*CustomResourceColumnDefinition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} + return fileDescriptor_98a4cc6918394e53, []int{3} +} +func (m *CustomResourceColumnDefinition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceColumnDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceColumnDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceColumnDefinition.Merge(m, src) +} +func (m *CustomResourceColumnDefinition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceColumnDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceColumnDefinition.DiscardUnknown(m) } +var xxx_messageInfo_CustomResourceColumnDefinition proto.InternalMessageInfo + func (m *CustomResourceConversion) Reset() { *m = CustomResourceConversion{} } func (*CustomResourceConversion) ProtoMessage() {} func (*CustomResourceConversion) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptor_98a4cc6918394e53, []int{4} +} +func (m *CustomResourceConversion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceConversion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } +func (m *CustomResourceConversion) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceConversion.Merge(m, src) +} +func (m *CustomResourceConversion) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceConversion) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceConversion.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceConversion proto.InternalMessageInfo func (m *CustomResourceDefinition) Reset() { *m = CustomResourceDefinition{} } func (*CustomResourceDefinition) ProtoMessage() {} func (*CustomResourceDefinition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptor_98a4cc6918394e53, []int{5} +} +func (m *CustomResourceDefinition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinition.Merge(m, src) +} +func (m *CustomResourceDefinition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinition.DiscardUnknown(m) } +var xxx_messageInfo_CustomResourceDefinition proto.InternalMessageInfo + func (m *CustomResourceDefinitionCondition) Reset() { *m = CustomResourceDefinitionCondition{} } func (*CustomResourceDefinitionCondition) ProtoMessage() {} func (*CustomResourceDefinitionCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptor_98a4cc6918394e53, []int{6} +} +func (m *CustomResourceDefinitionCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionCondition.Merge(m, src) +} +func (m *CustomResourceDefinitionCondition) XXX_Size() int { + return m.Size() } +func (m *CustomResourceDefinitionCondition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionCondition proto.InternalMessageInfo func (m *CustomResourceDefinitionList) Reset() { *m = CustomResourceDefinitionList{} } func (*CustomResourceDefinitionList) ProtoMessage() {} func (*CustomResourceDefinitionList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_98a4cc6918394e53, []int{7} +} +func (m *CustomResourceDefinitionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionList.Merge(m, src) +} +func (m *CustomResourceDefinitionList) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionList) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionList.DiscardUnknown(m) } +var xxx_messageInfo_CustomResourceDefinitionList proto.InternalMessageInfo + func (m *CustomResourceDefinitionNames) Reset() { *m = CustomResourceDefinitionNames{} } func (*CustomResourceDefinitionNames) ProtoMessage() {} func (*CustomResourceDefinitionNames) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{8} + return fileDescriptor_98a4cc6918394e53, []int{8} +} +func (m *CustomResourceDefinitionNames) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *CustomResourceDefinitionNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionNames) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionNames.Merge(m, src) +} +func (m *CustomResourceDefinitionNames) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionNames) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionNames.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionNames proto.InternalMessageInfo func (m *CustomResourceDefinitionSpec) Reset() { *m = CustomResourceDefinitionSpec{} } func (*CustomResourceDefinitionSpec) ProtoMessage() {} func (*CustomResourceDefinitionSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} + return fileDescriptor_98a4cc6918394e53, []int{9} +} +func (m *CustomResourceDefinitionSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionSpec.Merge(m, src) +} +func (m *CustomResourceDefinitionSpec) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionSpec.DiscardUnknown(m) } +var xxx_messageInfo_CustomResourceDefinitionSpec proto.InternalMessageInfo + func (m *CustomResourceDefinitionStatus) Reset() { *m = CustomResourceDefinitionStatus{} } func (*CustomResourceDefinitionStatus) ProtoMessage() {} func (*CustomResourceDefinitionStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} + return fileDescriptor_98a4cc6918394e53, []int{10} +} +func (m *CustomResourceDefinitionStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionStatus.Merge(m, src) +} +func (m *CustomResourceDefinitionStatus) XXX_Size() int { + return m.Size() } +func (m *CustomResourceDefinitionStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionStatus proto.InternalMessageInfo func (m *CustomResourceDefinitionVersion) Reset() { *m = CustomResourceDefinitionVersion{} } func (*CustomResourceDefinitionVersion) ProtoMessage() {} func (*CustomResourceDefinitionVersion) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} + return fileDescriptor_98a4cc6918394e53, []int{11} +} +func (m *CustomResourceDefinitionVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionVersion.Merge(m, src) +} +func (m *CustomResourceDefinitionVersion) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionVersion) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionVersion.DiscardUnknown(m) } +var xxx_messageInfo_CustomResourceDefinitionVersion proto.InternalMessageInfo + func (m *CustomResourceSubresourceScale) Reset() { *m = CustomResourceSubresourceScale{} } func (*CustomResourceSubresourceScale) ProtoMessage() {} func (*CustomResourceSubresourceScale) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{12} + return fileDescriptor_98a4cc6918394e53, []int{12} +} +func (m *CustomResourceSubresourceScale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresourceScale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } +func (m *CustomResourceSubresourceScale) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresourceScale.Merge(m, src) +} +func (m *CustomResourceSubresourceScale) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresourceScale) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresourceScale.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceSubresourceScale proto.InternalMessageInfo func (m *CustomResourceSubresourceStatus) Reset() { *m = CustomResourceSubresourceStatus{} } func (*CustomResourceSubresourceStatus) ProtoMessage() {} func (*CustomResourceSubresourceStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} + return fileDescriptor_98a4cc6918394e53, []int{13} +} +func (m *CustomResourceSubresourceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceSubresourceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresourceStatus.Merge(m, src) +} +func (m *CustomResourceSubresourceStatus) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresourceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresourceStatus.DiscardUnknown(m) } +var xxx_messageInfo_CustomResourceSubresourceStatus proto.InternalMessageInfo + func (m *CustomResourceSubresources) Reset() { *m = CustomResourceSubresources{} } func (*CustomResourceSubresources) ProtoMessage() {} func (*CustomResourceSubresources) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{14} + return fileDescriptor_98a4cc6918394e53, []int{14} +} +func (m *CustomResourceSubresources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceSubresources) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresources.Merge(m, src) +} +func (m *CustomResourceSubresources) XXX_Size() int { + return m.Size() } +func (m *CustomResourceSubresources) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresources.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceSubresources proto.InternalMessageInfo func (m *CustomResourceValidation) Reset() { *m = CustomResourceValidation{} } func (*CustomResourceValidation) ProtoMessage() {} func (*CustomResourceValidation) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{15} + return fileDescriptor_98a4cc6918394e53, []int{15} +} +func (m *CustomResourceValidation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceValidation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceValidation) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceValidation.Merge(m, src) +} +func (m *CustomResourceValidation) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceValidation) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceValidation.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceValidation proto.InternalMessageInfo + +func (m *ExternalDocumentation) Reset() { *m = ExternalDocumentation{} } +func (*ExternalDocumentation) ProtoMessage() {} +func (*ExternalDocumentation) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{16} +} +func (m *ExternalDocumentation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalDocumentation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalDocumentation) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalDocumentation.Merge(m, src) +} +func (m *ExternalDocumentation) XXX_Size() int { + return m.Size() +} +func (m *ExternalDocumentation) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalDocumentation.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalDocumentation proto.InternalMessageInfo + +func (m *JSON) Reset() { *m = JSON{} } +func (*JSON) ProtoMessage() {} +func (*JSON) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{17} +} +func (m *JSON) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSON) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSON) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSON.Merge(m, src) +} +func (m *JSON) XXX_Size() int { + return m.Size() +} +func (m *JSON) XXX_DiscardUnknown() { + xxx_messageInfo_JSON.DiscardUnknown(m) +} + +var xxx_messageInfo_JSON proto.InternalMessageInfo + +func (m *JSONSchemaProps) Reset() { *m = JSONSchemaProps{} } +func (*JSONSchemaProps) ProtoMessage() {} +func (*JSONSchemaProps) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{18} +} +func (m *JSONSchemaProps) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaProps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaProps) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaProps.Merge(m, src) +} +func (m *JSONSchemaProps) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaProps) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaProps.DiscardUnknown(m) } -func (m *ExternalDocumentation) Reset() { *m = ExternalDocumentation{} } -func (*ExternalDocumentation) ProtoMessage() {} -func (*ExternalDocumentation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +var xxx_messageInfo_JSONSchemaProps proto.InternalMessageInfo -func (m *JSON) Reset() { *m = JSON{} } -func (*JSON) ProtoMessage() {} -func (*JSON) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (m *JSONSchemaPropsOrArray) Reset() { *m = JSONSchemaPropsOrArray{} } +func (*JSONSchemaPropsOrArray) ProtoMessage() {} +func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{19} +} +func (m *JSONSchemaPropsOrArray) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrArray.Merge(m, src) +} +func (m *JSONSchemaPropsOrArray) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrArray) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrArray.DiscardUnknown(m) +} -func (m *JSONSchemaProps) Reset() { *m = JSONSchemaProps{} } -func (*JSONSchemaProps) ProtoMessage() {} -func (*JSONSchemaProps) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +var xxx_messageInfo_JSONSchemaPropsOrArray proto.InternalMessageInfo -func (m *JSONSchemaPropsOrArray) Reset() { *m = JSONSchemaPropsOrArray{} } -func (*JSONSchemaPropsOrArray) ProtoMessage() {} -func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (m *JSONSchemaPropsOrBool) Reset() { *m = JSONSchemaPropsOrBool{} } +func (*JSONSchemaPropsOrBool) ProtoMessage() {} +func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{20} +} +func (m *JSONSchemaPropsOrBool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrBool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrBool) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrBool.Merge(m, src) +} +func (m *JSONSchemaPropsOrBool) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrBool) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrBool.DiscardUnknown(m) +} -func (m *JSONSchemaPropsOrBool) Reset() { *m = JSONSchemaPropsOrBool{} } -func (*JSONSchemaPropsOrBool) ProtoMessage() {} -func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +var xxx_messageInfo_JSONSchemaPropsOrBool proto.InternalMessageInfo func (m *JSONSchemaPropsOrStringArray) Reset() { *m = JSONSchemaPropsOrStringArray{} } func (*JSONSchemaPropsOrStringArray) ProtoMessage() {} func (*JSONSchemaPropsOrStringArray) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{21} + return fileDescriptor_98a4cc6918394e53, []int{21} +} +func (m *JSONSchemaPropsOrStringArray) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrStringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrStringArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrStringArray.Merge(m, src) +} +func (m *JSONSchemaPropsOrStringArray) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrStringArray) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrStringArray.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaPropsOrStringArray proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{22} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) } -func (m *ServiceReference) Reset() { *m = ServiceReference{} } -func (*ServiceReference) ProtoMessage() {} -func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{23} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} -func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } -func (*WebhookClientConfig) ProtoMessage() {} -func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo func init() { proto.RegisterType((*ConversionRequest)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ConversionRequest") @@ -222,16 +740,214 @@ func init() { proto.RegisterType((*ExternalDocumentation)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ExternalDocumentation") proto.RegisterType((*JSON)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSON") proto.RegisterType((*JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps") + proto.RegisterMapType((JSONSchemaDefinitions)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.DefinitionsEntry") + proto.RegisterMapType((JSONSchemaDependencies)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.DependenciesEntry") + proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.PatternPropertiesEntry") + proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.PropertiesEntry") proto.RegisterType((*JSONSchemaPropsOrArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrArray") proto.RegisterType((*JSONSchemaPropsOrBool)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrBool") proto.RegisterType((*JSONSchemaPropsOrStringArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrStringArray") proto.RegisterType((*ServiceReference)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ServiceReference") proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.WebhookClientConfig") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto", fileDescriptor_98a4cc6918394e53) +} + +var fileDescriptor_98a4cc6918394e53 = []byte{ + // 2955 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcf, 0x73, 0x23, 0x47, + 0xf5, 0xdf, 0x91, 0x2c, 0x5b, 0x6e, 0xdb, 0x6b, 0xbb, 0x77, 0xed, 0xcc, 0x3a, 0x1b, 0xcb, 0xab, + 0x7c, 0xb3, 0x5f, 0x27, 0xec, 0xca, 0xc9, 0x92, 0x90, 0x90, 0x2a, 0x8a, 0xb2, 0x6c, 0x27, 0x38, + 0x59, 0x5b, 0xa6, 0xb5, 0x9b, 0x18, 0xf2, 0xb3, 0xad, 0x69, 0xc9, 0xb3, 0x9e, 0x5f, 0x3b, 0x3d, + 0x23, 0xdb, 0x15, 0xa0, 0xf8, 0x51, 0x29, 0x28, 0x0a, 0x08, 0x45, 0x72, 0xa1, 0x0a, 0x0e, 0x81, + 0xe2, 0xc2, 0x01, 0x0e, 0x70, 0x83, 0x3f, 0x20, 0xc7, 0x14, 0xa7, 0x1c, 0x28, 0x15, 0xab, 0x5c, + 0x39, 0x52, 0x05, 0xe5, 0x13, 0xd5, 0x3f, 0xa6, 0x67, 0x34, 0x92, 0x76, 0x5d, 0x59, 0x29, 0xcb, + 0xcd, 0x7a, 0xbf, 0x3e, 0xaf, 0x5f, 0xbf, 0x7e, 0xfd, 0xfa, 0x8d, 0x41, 0xfd, 0xe0, 0x39, 0x5a, + 0x32, 0xdd, 0x95, 0x83, 0x70, 0x8f, 0xf8, 0x0e, 0x09, 0x08, 0x5d, 0x69, 0x12, 0xc7, 0x70, 0xfd, + 0x15, 0xc9, 0xc0, 0x9e, 0x49, 0x8e, 0x02, 0xe2, 0x50, 0xd3, 0x75, 0xe8, 0x55, 0xec, 0x99, 0x94, + 0xf8, 0x4d, 0xe2, 0xaf, 0x78, 0x07, 0x0d, 0xc6, 0xa3, 0x9d, 0x02, 0x2b, 0xcd, 0xa7, 0xf6, 0x48, + 0x80, 0x9f, 0x5a, 0x69, 0x10, 0x87, 0xf8, 0x38, 0x20, 0x46, 0xc9, 0xf3, 0xdd, 0xc0, 0x85, 0x5f, + 0x11, 0xe6, 0x4a, 0x1d, 0xd2, 0x6f, 0x29, 0x73, 0x25, 0xef, 0xa0, 0xc1, 0x78, 0xb4, 0x53, 0xa0, + 0x24, 0xcd, 0x2d, 0x5c, 0x6d, 0x98, 0xc1, 0x7e, 0xb8, 0x57, 0xaa, 0xb9, 0xf6, 0x4a, 0xc3, 0x6d, + 0xb8, 0x2b, 0xdc, 0xea, 0x5e, 0x58, 0xe7, 0xbf, 0xf8, 0x0f, 0xfe, 0x97, 0x40, 0x5b, 0x78, 0x3a, + 0x76, 0xde, 0xc6, 0xb5, 0x7d, 0xd3, 0x21, 0xfe, 0x71, 0xec, 0xb1, 0x4d, 0x02, 0xbc, 0xd2, 0xec, + 0xf2, 0x71, 0x61, 0xa5, 0x9f, 0x96, 0x1f, 0x3a, 0x81, 0x69, 0x93, 0x2e, 0x85, 0x2f, 0xdd, 0x4b, + 0x81, 0xd6, 0xf6, 0x89, 0x8d, 0xd3, 0x7a, 0xc5, 0x13, 0x0d, 0xcc, 0xae, 0xb9, 0x4e, 0x93, 0xf8, + 0x6c, 0x95, 0x88, 0xdc, 0x0e, 0x09, 0x0d, 0x60, 0x19, 0x64, 0x43, 0xd3, 0xd0, 0xb5, 0x25, 0x6d, + 0x79, 0xbc, 0xfc, 0xe4, 0x47, 0xad, 0xc2, 0x99, 0x76, 0xab, 0x90, 0xbd, 0xb9, 0xb9, 0x7e, 0xd2, + 0x2a, 0x5c, 0xea, 0x87, 0x14, 0x1c, 0x7b, 0x84, 0x96, 0x6e, 0x6e, 0xae, 0x23, 0xa6, 0x0c, 0x5f, + 0x04, 0xb3, 0x06, 0xa1, 0xa6, 0x4f, 0x8c, 0xd5, 0x9d, 0xcd, 0x57, 0x84, 0x7d, 0x3d, 0xc3, 0x2d, + 0x5e, 0x90, 0x16, 0x67, 0xd7, 0xd3, 0x02, 0xa8, 0x5b, 0x07, 0xee, 0x82, 0x31, 0x77, 0xef, 0x16, + 0xa9, 0x05, 0x54, 0xcf, 0x2e, 0x65, 0x97, 0x27, 0xae, 0x5d, 0x2d, 0xc5, 0x3b, 0xa8, 0x5c, 0xe0, + 0xdb, 0x26, 0x17, 0x5b, 0x42, 0xf8, 0x70, 0x23, 0xda, 0xb9, 0xf2, 0xb4, 0x44, 0x1b, 0xab, 0x08, + 0x2b, 0x28, 0x32, 0x57, 0xfc, 0x6d, 0x06, 0xc0, 0xe4, 0xe2, 0xa9, 0xe7, 0x3a, 0x94, 0x0c, 0x64, + 0xf5, 0x14, 0xcc, 0xd4, 0xb8, 0xe5, 0x80, 0x18, 0x12, 0x57, 0xcf, 0x7c, 0x16, 0xef, 0x75, 0x89, + 0x3f, 0xb3, 0x96, 0x32, 0x87, 0xba, 0x00, 0xe0, 0x0d, 0x30, 0xea, 0x13, 0x1a, 0x5a, 0x81, 0x9e, + 0x5d, 0xd2, 0x96, 0x27, 0xae, 0x5d, 0xe9, 0x0b, 0xc5, 0xf3, 0x9b, 0x25, 0x5f, 0xa9, 0xf9, 0x54, + 0xa9, 0x1a, 0xe0, 0x20, 0xa4, 0xe5, 0xb3, 0x12, 0x69, 0x14, 0x71, 0x1b, 0x48, 0xda, 0x2a, 0xfe, + 0x28, 0x03, 0x66, 0x92, 0x51, 0x6a, 0x9a, 0xe4, 0x10, 0x1e, 0x82, 0x31, 0x5f, 0x24, 0x0b, 0x8f, + 0xd3, 0xc4, 0xb5, 0x9d, 0xd2, 0x7d, 0x1d, 0xab, 0x52, 0x57, 0x12, 0x96, 0x27, 0xd8, 0x9e, 0xc9, + 0x1f, 0x28, 0x42, 0x83, 0xef, 0x80, 0xbc, 0x2f, 0x37, 0x8a, 0x67, 0xd3, 0xc4, 0xb5, 0xaf, 0x0f, + 0x10, 0x59, 0x18, 0x2e, 0x4f, 0xb6, 0x5b, 0x85, 0x7c, 0xf4, 0x0b, 0x29, 0xc0, 0xe2, 0xfb, 0x19, + 0xb0, 0xb8, 0x16, 0xd2, 0xc0, 0xb5, 0x11, 0xa1, 0x6e, 0xe8, 0xd7, 0xc8, 0x9a, 0x6b, 0x85, 0xb6, + 0xb3, 0x4e, 0xea, 0xa6, 0x63, 0x06, 0x2c, 0x5b, 0x97, 0xc0, 0x88, 0x83, 0x6d, 0x22, 0xb3, 0x67, + 0x52, 0xc6, 0x74, 0x64, 0x1b, 0xdb, 0x04, 0x71, 0x0e, 0x93, 0x60, 0xc9, 0x22, 0xcf, 0x82, 0x92, + 0xb8, 0x71, 0xec, 0x11, 0xc4, 0x39, 0xf0, 0x32, 0x18, 0xad, 0xbb, 0xbe, 0x8d, 0xc5, 0x3e, 0x8e, + 0xc7, 0x3b, 0xf3, 0x02, 0xa7, 0x22, 0xc9, 0x85, 0xcf, 0x80, 0x09, 0x83, 0xd0, 0x9a, 0x6f, 0x7a, + 0x0c, 0x5a, 0x1f, 0xe1, 0xc2, 0xe7, 0xa4, 0xf0, 0xc4, 0x7a, 0xcc, 0x42, 0x49, 0x39, 0x78, 0x05, + 0xe4, 0x3d, 0xdf, 0x74, 0x7d, 0x33, 0x38, 0xd6, 0x73, 0x4b, 0xda, 0x72, 0xae, 0x3c, 0x23, 0x75, + 0xf2, 0x3b, 0x92, 0x8e, 0x94, 0x04, 0x5c, 0x02, 0xf9, 0x97, 0xaa, 0x95, 0xed, 0x1d, 0x1c, 0xec, + 0xeb, 0xa3, 0x1c, 0x61, 0x84, 0x49, 0xa3, 0xfc, 0x2d, 0x49, 0x2d, 0xfe, 0x3d, 0x03, 0xf4, 0x74, + 0x54, 0xa2, 0x90, 0xc2, 0x17, 0x40, 0x9e, 0x06, 0xac, 0xe2, 0x34, 0x8e, 0x65, 0x4c, 0x9e, 0x88, + 0xc0, 0xaa, 0x92, 0x7e, 0xd2, 0x2a, 0xcc, 0xc7, 0x1a, 0x11, 0x95, 0xc7, 0x43, 0xe9, 0xc2, 0x5f, + 0x6b, 0xe0, 0xdc, 0x21, 0xd9, 0xdb, 0x77, 0xdd, 0x83, 0x35, 0xcb, 0x24, 0x4e, 0xb0, 0xe6, 0x3a, + 0x75, 0xb3, 0x21, 0x73, 0x00, 0xdd, 0x67, 0x0e, 0xbc, 0xda, 0x6d, 0xb9, 0xfc, 0x50, 0xbb, 0x55, + 0x38, 0xd7, 0x83, 0x81, 0x7a, 0xf9, 0x01, 0x77, 0x81, 0x5e, 0x4b, 0x1d, 0x12, 0x59, 0xc0, 0x44, + 0xd9, 0x1a, 0x2f, 0x5f, 0x6c, 0xb7, 0x0a, 0xfa, 0x5a, 0x1f, 0x19, 0xd4, 0x57, 0xbb, 0xf8, 0x83, + 0x6c, 0x3a, 0xbc, 0x89, 0x74, 0x7b, 0x1b, 0xe4, 0xd9, 0x31, 0x36, 0x70, 0x80, 0xe5, 0x41, 0x7c, + 0xf2, 0x74, 0x87, 0x5e, 0xd4, 0x8c, 0x2d, 0x12, 0xe0, 0x32, 0x94, 0x1b, 0x02, 0x62, 0x1a, 0x52, + 0x56, 0xe1, 0xb7, 0xc1, 0x08, 0xf5, 0x48, 0x4d, 0x06, 0xfa, 0xb5, 0xfb, 0x3d, 0x6c, 0x7d, 0x16, + 0x52, 0xf5, 0x48, 0x2d, 0x3e, 0x0b, 0xec, 0x17, 0xe2, 0xb0, 0xf0, 0x5d, 0x0d, 0x8c, 0x52, 0x5e, + 0xa0, 0x64, 0x51, 0x7b, 0x63, 0x58, 0x1e, 0xa4, 0xaa, 0xa0, 0xf8, 0x8d, 0x24, 0x78, 0xf1, 0x5f, + 0x19, 0x70, 0xa9, 0x9f, 0xea, 0x9a, 0xeb, 0x18, 0x62, 0x3b, 0x36, 0xe5, 0xd9, 0x16, 0x99, 0xfe, + 0x4c, 0xf2, 0x6c, 0x9f, 0xb4, 0x0a, 0x8f, 0xdd, 0xd3, 0x40, 0xa2, 0x08, 0x7c, 0x59, 0xad, 0x5b, + 0x14, 0x8a, 0x4b, 0x9d, 0x8e, 0x9d, 0xb4, 0x0a, 0xd3, 0x4a, 0xad, 0xd3, 0x57, 0xd8, 0x04, 0xd0, + 0xc2, 0x34, 0xb8, 0xe1, 0x63, 0x87, 0x0a, 0xb3, 0xa6, 0x4d, 0x64, 0xf8, 0x9e, 0x38, 0x5d, 0x7a, + 0x30, 0x8d, 0xf2, 0x82, 0x84, 0x84, 0xd7, 0xbb, 0xac, 0xa1, 0x1e, 0x08, 0xac, 0x6e, 0xf9, 0x04, + 0x53, 0x55, 0x8a, 0x12, 0x37, 0x0a, 0xa3, 0x22, 0xc9, 0x85, 0x8f, 0x83, 0x31, 0x9b, 0x50, 0x8a, + 0x1b, 0x84, 0xd7, 0x9f, 0xf1, 0xf8, 0x8a, 0xde, 0x12, 0x64, 0x14, 0xf1, 0x59, 0x7f, 0x72, 0xb1, + 0x5f, 0xd4, 0xae, 0x9b, 0x34, 0x80, 0xaf, 0x77, 0x1d, 0x80, 0xd2, 0xe9, 0x56, 0xc8, 0xb4, 0x79, + 0xfa, 0xab, 0xe2, 0x17, 0x51, 0x12, 0xc9, 0xff, 0x2d, 0x90, 0x33, 0x03, 0x62, 0x47, 0x77, 0xf7, + 0xab, 0x43, 0xca, 0xbd, 0xf2, 0x94, 0xf4, 0x21, 0xb7, 0xc9, 0xd0, 0x90, 0x00, 0x2d, 0xfe, 0x2e, + 0x03, 0x1e, 0xe9, 0xa7, 0xc2, 0x2e, 0x14, 0xca, 0x22, 0xee, 0x59, 0xa1, 0x8f, 0x2d, 0x99, 0x71, + 0x2a, 0xe2, 0x3b, 0x9c, 0x8a, 0x24, 0x97, 0x95, 0x7c, 0x6a, 0x3a, 0x8d, 0xd0, 0xc2, 0xbe, 0x4c, + 0x27, 0xb5, 0xea, 0xaa, 0xa4, 0x23, 0x25, 0x01, 0x4b, 0x00, 0xd0, 0x7d, 0xd7, 0x0f, 0x38, 0x86, + 0xac, 0x5e, 0x67, 0x59, 0x81, 0xa8, 0x2a, 0x2a, 0x4a, 0x48, 0xb0, 0x1b, 0xed, 0xc0, 0x74, 0x0c, + 0xb9, 0xeb, 0xea, 0x14, 0xbf, 0x6c, 0x3a, 0x06, 0xe2, 0x1c, 0x86, 0x6f, 0x99, 0x34, 0x60, 0x14, + 0xb9, 0xe5, 0x1d, 0x51, 0xe7, 0x92, 0x4a, 0x82, 0xe1, 0xd7, 0x58, 0xd5, 0x77, 0x7d, 0x93, 0x50, + 0x7d, 0x34, 0xc6, 0x5f, 0x53, 0x54, 0x94, 0x90, 0x28, 0xfe, 0x33, 0xdf, 0x3f, 0x49, 0x58, 0x29, + 0x81, 0x8f, 0x82, 0x5c, 0xc3, 0x77, 0x43, 0x4f, 0x46, 0x49, 0x45, 0xfb, 0x45, 0x46, 0x44, 0x82, + 0xc7, 0xb2, 0xb2, 0xd9, 0xd1, 0xa6, 0xaa, 0xac, 0x8c, 0x9a, 0xd3, 0x88, 0x0f, 0xbf, 0xa7, 0x81, + 0x9c, 0x23, 0x83, 0xc3, 0x52, 0xee, 0xf5, 0x21, 0xe5, 0x05, 0x0f, 0x6f, 0xec, 0xae, 0x88, 0xbc, + 0x40, 0x86, 0x4f, 0x83, 0x1c, 0xad, 0xb9, 0x1e, 0x91, 0x51, 0x5f, 0x8c, 0x84, 0xaa, 0x8c, 0x78, + 0xd2, 0x2a, 0x4c, 0x45, 0xe6, 0x38, 0x01, 0x09, 0x61, 0xf8, 0x43, 0x0d, 0x80, 0x26, 0xb6, 0x4c, + 0x03, 0xf3, 0x96, 0x21, 0xc7, 0xdd, 0x1f, 0x6c, 0x5a, 0xbf, 0xa2, 0xcc, 0x8b, 0x4d, 0x8b, 0x7f, + 0xa3, 0x04, 0x34, 0x7c, 0x4f, 0x03, 0x93, 0x34, 0xdc, 0xf3, 0xa5, 0x16, 0xe5, 0xcd, 0xc5, 0xc4, + 0xb5, 0x6f, 0x0c, 0xd4, 0x97, 0x6a, 0x02, 0xa0, 0x3c, 0xd3, 0x6e, 0x15, 0x26, 0x93, 0x14, 0xd4, + 0xe1, 0x00, 0xfc, 0x89, 0x06, 0xf2, 0xcd, 0xe8, 0xce, 0x1e, 0xe3, 0x07, 0xfe, 0xcd, 0x21, 0x6d, + 0xac, 0xcc, 0xa8, 0xf8, 0x14, 0xa8, 0x3e, 0x40, 0x79, 0x00, 0xff, 0xa2, 0x01, 0x1d, 0x1b, 0xa2, + 0xc0, 0x63, 0x6b, 0xc7, 0x37, 0x9d, 0x80, 0xf8, 0xa2, 0xdf, 0xa4, 0x7a, 0x9e, 0xbb, 0x37, 0xd8, + 0xbb, 0x30, 0xdd, 0xcb, 0x96, 0x97, 0xa4, 0x77, 0xfa, 0x6a, 0x1f, 0x37, 0x50, 0x5f, 0x07, 0x79, + 0xa2, 0xc5, 0x2d, 0x8d, 0x3e, 0x3e, 0x84, 0x44, 0x8b, 0x7b, 0x29, 0x59, 0x1d, 0xe2, 0x0e, 0x2a, + 0x01, 0x0d, 0x2b, 0x60, 0xce, 0xf3, 0x09, 0x07, 0xb8, 0xe9, 0x1c, 0x38, 0xee, 0xa1, 0xf3, 0x82, + 0x49, 0x2c, 0x83, 0xea, 0x60, 0x49, 0x5b, 0xce, 0x97, 0x2f, 0xb4, 0x5b, 0x85, 0xb9, 0x9d, 0x5e, + 0x02, 0xa8, 0xb7, 0x5e, 0xf1, 0xbd, 0x6c, 0xfa, 0x15, 0x90, 0xee, 0x22, 0xe0, 0x07, 0x62, 0xf5, + 0x22, 0x36, 0x54, 0xd7, 0xf8, 0x6e, 0xbd, 0x3d, 0xa4, 0x64, 0x52, 0x6d, 0x40, 0xdc, 0xc9, 0x29, + 0x12, 0x45, 0x09, 0x3f, 0xe0, 0x2f, 0x35, 0x30, 0x85, 0x6b, 0x35, 0xe2, 0x05, 0xc4, 0x10, 0xc5, + 0x3d, 0xf3, 0x39, 0xd4, 0xaf, 0x39, 0xe9, 0xd5, 0xd4, 0x6a, 0x12, 0x1a, 0x75, 0x7a, 0x02, 0x9f, + 0x07, 0x67, 0x69, 0xe0, 0xfa, 0xc4, 0x48, 0xb5, 0xcd, 0xb0, 0xdd, 0x2a, 0x9c, 0xad, 0x76, 0x70, + 0x50, 0x4a, 0xb2, 0xf8, 0xe9, 0x08, 0x28, 0xdc, 0xe3, 0xa8, 0x9d, 0xe2, 0x61, 0x76, 0x19, 0x8c, + 0xf2, 0xe5, 0x1a, 0x3c, 0x2a, 0xf9, 0x44, 0x2b, 0xc8, 0xa9, 0x48, 0x72, 0xd9, 0x45, 0xc1, 0xf0, + 0x59, 0xfb, 0x92, 0xe5, 0x82, 0xea, 0xa2, 0xa8, 0x0a, 0x32, 0x8a, 0xf8, 0xf0, 0x1d, 0x30, 0x2a, + 0x06, 0x2f, 0xbc, 0x4a, 0x0f, 0xb1, 0xd2, 0x02, 0xee, 0x27, 0x87, 0x42, 0x12, 0xb2, 0xbb, 0xc2, + 0xe6, 0x1e, 0x74, 0x85, 0xbd, 0x6b, 0x49, 0x1b, 0xfd, 0x1f, 0x2f, 0x69, 0xc5, 0x7f, 0x6b, 0xe9, + 0x73, 0x9f, 0x58, 0x6a, 0xb5, 0x86, 0x2d, 0x02, 0xd7, 0xc1, 0x0c, 0x7b, 0xb5, 0x20, 0xe2, 0x59, + 0x66, 0x0d, 0x53, 0xfe, 0x68, 0x16, 0x09, 0xa7, 0xe6, 0x38, 0xd5, 0x14, 0x1f, 0x75, 0x69, 0xc0, + 0x97, 0x00, 0x14, 0x9d, 0x7c, 0x87, 0x1d, 0xd1, 0x94, 0xa8, 0x9e, 0xbc, 0xda, 0x25, 0x81, 0x7a, + 0x68, 0xc1, 0x35, 0x30, 0x6b, 0xe1, 0x3d, 0x62, 0x55, 0x89, 0x45, 0x6a, 0x81, 0xeb, 0x73, 0x53, + 0x62, 0xac, 0x30, 0xd7, 0x6e, 0x15, 0x66, 0xaf, 0xa7, 0x99, 0xa8, 0x5b, 0xbe, 0x78, 0x29, 0x7d, + 0xbc, 0x92, 0x0b, 0x17, 0xef, 0xa3, 0x0f, 0x33, 0x60, 0xa1, 0x7f, 0x66, 0xc0, 0xef, 0xc7, 0xcf, + 0x38, 0xd1, 0xa5, 0xbf, 0x39, 0xac, 0x2c, 0x94, 0xef, 0x38, 0xd0, 0xfd, 0x86, 0x83, 0xdf, 0x61, + 0x2d, 0x13, 0xb6, 0xa2, 0xc1, 0xd1, 0x1b, 0x43, 0x73, 0x81, 0x81, 0x94, 0xc7, 0x45, 0x37, 0x86, + 0x2d, 0xde, 0x7c, 0x61, 0x8b, 0x14, 0x7f, 0xaf, 0xa5, 0x5f, 0xf2, 0xf1, 0x09, 0x86, 0x3f, 0xd5, + 0xc0, 0xb4, 0xeb, 0x11, 0x67, 0x75, 0x67, 0xf3, 0x95, 0x2f, 0x8a, 0x93, 0x2c, 0x43, 0xb5, 0x7d, + 0x9f, 0x7e, 0xbe, 0x54, 0xad, 0x6c, 0x0b, 0x83, 0x3b, 0xbe, 0xeb, 0xd1, 0xf2, 0xb9, 0x76, 0xab, + 0x30, 0x5d, 0xe9, 0x84, 0x42, 0x69, 0xec, 0xa2, 0x0d, 0xe6, 0x36, 0x8e, 0x02, 0xe2, 0x3b, 0xd8, + 0x5a, 0x77, 0x6b, 0xa1, 0x4d, 0x9c, 0x40, 0x38, 0x9a, 0x9a, 0x3a, 0x69, 0xa7, 0x9c, 0x3a, 0x3d, + 0x02, 0xb2, 0xa1, 0x6f, 0xc9, 0x2c, 0x9e, 0x50, 0x53, 0x55, 0x74, 0x1d, 0x31, 0x7a, 0xf1, 0x12, + 0x18, 0x61, 0x7e, 0xc2, 0x0b, 0x20, 0xeb, 0xe3, 0x43, 0x6e, 0x75, 0xb2, 0x3c, 0xc6, 0x44, 0x10, + 0x3e, 0x44, 0x8c, 0x56, 0xfc, 0x4f, 0x01, 0x4c, 0xa7, 0xd6, 0x02, 0x17, 0x40, 0x46, 0x8d, 0x6a, + 0x81, 0x34, 0x9a, 0xd9, 0x5c, 0x47, 0x19, 0xd3, 0x80, 0xcf, 0xaa, 0xe2, 0x2b, 0x40, 0x0b, 0xaa, + 0x9e, 0x73, 0x2a, 0xeb, 0x91, 0x63, 0x73, 0xcc, 0x91, 0xa8, 0x70, 0x32, 0x1f, 0x48, 0x5d, 0x9e, + 0x12, 0xe1, 0x03, 0xa9, 0x23, 0x46, 0xfb, 0xac, 0x23, 0xb7, 0x68, 0xe6, 0x97, 0x3b, 0xc5, 0xcc, + 0x6f, 0xf4, 0xae, 0x33, 0xbf, 0x47, 0x41, 0x2e, 0x30, 0x03, 0x8b, 0xe8, 0x63, 0x9d, 0x4f, 0x99, + 0x1b, 0x8c, 0x88, 0x04, 0x0f, 0xde, 0x02, 0x63, 0x06, 0xa9, 0xe3, 0xd0, 0x0a, 0xf4, 0x3c, 0x4f, + 0xa1, 0xb5, 0x01, 0xa4, 0x90, 0x18, 0xc8, 0xae, 0x0b, 0xbb, 0x28, 0x02, 0x80, 0x8f, 0x81, 0x31, + 0x1b, 0x1f, 0x99, 0x76, 0x68, 0xf3, 0x26, 0x4f, 0x13, 0x62, 0x5b, 0x82, 0x84, 0x22, 0x1e, 0xab, + 0x8c, 0xe4, 0xa8, 0x66, 0x85, 0xd4, 0x6c, 0x12, 0xc9, 0x94, 0x0d, 0x98, 0xaa, 0x8c, 0x1b, 0x29, + 0x3e, 0xea, 0xd2, 0xe0, 0x60, 0xa6, 0xc3, 0x95, 0x27, 0x12, 0x60, 0x82, 0x84, 0x22, 0x5e, 0x27, + 0x98, 0x94, 0x9f, 0xec, 0x07, 0x26, 0x95, 0xbb, 0x34, 0xe0, 0x17, 0xc0, 0xb8, 0x8d, 0x8f, 0xae, + 0x13, 0xa7, 0x11, 0xec, 0xeb, 0x53, 0x4b, 0xda, 0x72, 0xb6, 0x3c, 0xd5, 0x6e, 0x15, 0xc6, 0xb7, + 0x22, 0x22, 0x8a, 0xf9, 0x5c, 0xd8, 0x74, 0xa4, 0xf0, 0xd9, 0x84, 0x70, 0x44, 0x44, 0x31, 0x9f, + 0x75, 0x10, 0x1e, 0x0e, 0xd8, 0xe1, 0xd2, 0xa7, 0x3b, 0x9f, 0x9a, 0x3b, 0x82, 0x8c, 0x22, 0x3e, + 0x5c, 0x06, 0x79, 0x1b, 0x1f, 0xf1, 0xb1, 0x80, 0x3e, 0xc3, 0xcd, 0xf2, 0xe1, 0xf4, 0x96, 0xa4, + 0x21, 0xc5, 0xe5, 0x92, 0xa6, 0x23, 0x24, 0x67, 0x13, 0x92, 0x92, 0x86, 0x14, 0x97, 0x25, 0x71, + 0xe8, 0x98, 0xb7, 0x43, 0x22, 0x84, 0x21, 0x8f, 0x8c, 0x4a, 0xe2, 0x9b, 0x31, 0x0b, 0x25, 0xe5, + 0xd8, 0xb3, 0xdc, 0x0e, 0xad, 0xc0, 0xf4, 0x2c, 0x52, 0xa9, 0xeb, 0xe7, 0x78, 0xfc, 0x79, 0xe3, + 0xbd, 0xa5, 0xa8, 0x28, 0x21, 0x01, 0x09, 0x18, 0x21, 0x4e, 0x68, 0xeb, 0xe7, 0xf9, 0xc5, 0x3e, + 0x90, 0x14, 0x54, 0x27, 0x67, 0xc3, 0x09, 0x6d, 0xc4, 0xcd, 0xc3, 0x67, 0xc1, 0x94, 0x8d, 0x8f, + 0x58, 0x39, 0x20, 0x7e, 0x60, 0x12, 0xaa, 0xcf, 0xf1, 0xc5, 0xcf, 0xb2, 0x8e, 0x73, 0x2b, 0xc9, + 0x40, 0x9d, 0x72, 0x5c, 0xd1, 0x74, 0x12, 0x8a, 0xf3, 0x09, 0xc5, 0x24, 0x03, 0x75, 0xca, 0xb1, + 0x48, 0xfb, 0xe4, 0x76, 0x68, 0xfa, 0xc4, 0xd0, 0x1f, 0xe2, 0x4d, 0xaa, 0xfc, 0x60, 0x20, 0x68, + 0x48, 0x71, 0x61, 0x33, 0x9a, 0x1f, 0xe9, 0xfc, 0x18, 0xde, 0x1c, 0x6c, 0x25, 0xaf, 0xf8, 0xab, + 0xbe, 0x8f, 0x8f, 0xc5, 0x4d, 0x93, 0x9c, 0x1c, 0x41, 0x0a, 0x72, 0xd8, 0xb2, 0x2a, 0x75, 0xfd, + 0x02, 0x8f, 0xfd, 0xa0, 0x6f, 0x10, 0x55, 0x75, 0x56, 0x19, 0x08, 0x12, 0x58, 0x0c, 0xd4, 0x75, + 0x58, 0x6a, 0x2c, 0x0c, 0x17, 0xb4, 0xc2, 0x40, 0x90, 0xc0, 0xe2, 0x2b, 0x75, 0x8e, 0x2b, 0x75, + 0xfd, 0xe1, 0x21, 0xaf, 0x94, 0x81, 0x20, 0x81, 0x05, 0x4d, 0x90, 0x75, 0xdc, 0x40, 0xbf, 0x38, + 0x94, 0xeb, 0x99, 0x5f, 0x38, 0xdb, 0x6e, 0x80, 0x18, 0x06, 0xfc, 0x85, 0x06, 0x80, 0x17, 0xa7, + 0xe8, 0x23, 0x03, 0x19, 0x4b, 0xa4, 0x20, 0x4b, 0x71, 0x6e, 0x6f, 0x38, 0x81, 0x7f, 0x1c, 0xbf, + 0x23, 0x13, 0x67, 0x20, 0xe1, 0x05, 0xfc, 0x8d, 0x06, 0xce, 0x27, 0xdb, 0x64, 0xe5, 0xde, 0x22, + 0x8f, 0xc8, 0x8d, 0x41, 0xa7, 0x79, 0xd9, 0x75, 0xad, 0xb2, 0xde, 0x6e, 0x15, 0xce, 0xaf, 0xf6, + 0x40, 0x45, 0x3d, 0x7d, 0x81, 0x7f, 0xd0, 0xc0, 0xac, 0xac, 0xa2, 0x09, 0x0f, 0x0b, 0x3c, 0x80, + 0x64, 0xd0, 0x01, 0x4c, 0xe3, 0x88, 0x38, 0xaa, 0x0f, 0xdd, 0x5d, 0x7c, 0xd4, 0xed, 0x1a, 0xfc, + 0xb3, 0x06, 0x26, 0x0d, 0xe2, 0x11, 0xc7, 0x20, 0x4e, 0x8d, 0xf9, 0xba, 0x34, 0x90, 0xb1, 0x41, + 0xda, 0xd7, 0xf5, 0x04, 0x84, 0x70, 0xb3, 0x24, 0xdd, 0x9c, 0x4c, 0xb2, 0x4e, 0x5a, 0x85, 0xf9, + 0x58, 0x35, 0xc9, 0x41, 0x1d, 0x5e, 0xc2, 0xf7, 0x35, 0x30, 0x1d, 0x6f, 0x80, 0xb8, 0x52, 0x2e, + 0x0d, 0x31, 0x0f, 0x78, 0xfb, 0xba, 0xda, 0x09, 0x88, 0xd2, 0x1e, 0xc0, 0x3f, 0x6a, 0xac, 0x53, + 0x8b, 0xde, 0x7d, 0x54, 0x2f, 0xf2, 0x58, 0xbe, 0x35, 0xf0, 0x58, 0x2a, 0x04, 0x11, 0xca, 0x2b, + 0x71, 0x2b, 0xa8, 0x38, 0x27, 0xad, 0xc2, 0x5c, 0x32, 0x92, 0x8a, 0x81, 0x92, 0x1e, 0xc2, 0x1f, + 0x6b, 0x60, 0x92, 0xc4, 0x1d, 0x37, 0xd5, 0x1f, 0x1d, 0x48, 0x10, 0x7b, 0x36, 0xf1, 0xe2, 0xa5, + 0x9e, 0x60, 0x51, 0xd4, 0x81, 0xcd, 0x3a, 0x48, 0x72, 0x84, 0x6d, 0xcf, 0x22, 0xfa, 0xff, 0x0d, + 0xb8, 0x83, 0xdc, 0x10, 0x76, 0x51, 0x04, 0x00, 0xaf, 0x80, 0xbc, 0x13, 0x5a, 0x16, 0xde, 0xb3, + 0x88, 0xfe, 0x18, 0xef, 0x45, 0xd4, 0x58, 0x74, 0x5b, 0xd2, 0x91, 0x92, 0x80, 0x75, 0xb0, 0x74, + 0xf4, 0xb2, 0xfa, 0x17, 0xa1, 0x9e, 0x83, 0x3b, 0xfd, 0x32, 0xb7, 0xb2, 0xd0, 0x6e, 0x15, 0xe6, + 0x77, 0x7b, 0x8f, 0xf6, 0xee, 0x69, 0x03, 0xbe, 0x06, 0x1e, 0x4e, 0xc8, 0x6c, 0xd8, 0x7b, 0xc4, + 0x30, 0x88, 0x11, 0x3d, 0xdc, 0xf4, 0xff, 0x17, 0xc3, 0xc3, 0xe8, 0x80, 0xef, 0xa6, 0x05, 0xd0, + 0xdd, 0xb4, 0xe1, 0x75, 0x30, 0x9f, 0x60, 0x6f, 0x3a, 0x41, 0xc5, 0xaf, 0x06, 0xbe, 0xe9, 0x34, + 0xf4, 0x65, 0x6e, 0xf7, 0x7c, 0x74, 0x22, 0x77, 0x13, 0x3c, 0xd4, 0x47, 0x07, 0x7e, 0xad, 0xc3, + 0x1a, 0xff, 0x8c, 0x85, 0xbd, 0x97, 0xc9, 0x31, 0xd5, 0x1f, 0xe7, 0xdd, 0x09, 0xdf, 0xec, 0xdd, + 0x04, 0x1d, 0xf5, 0x91, 0x87, 0x5f, 0x05, 0xe7, 0x52, 0x1c, 0xf6, 0x44, 0xd1, 0x9f, 0x10, 0x6f, + 0x0d, 0xd6, 0xcf, 0xee, 0x46, 0x44, 0xd4, 0x4b, 0x72, 0x81, 0xbd, 0x62, 0x53, 0x55, 0x10, 0xce, + 0x80, 0xec, 0x01, 0x91, 0x5f, 0xff, 0x11, 0xfb, 0x13, 0x1a, 0x20, 0xd7, 0xc4, 0x56, 0x18, 0x3d, + 0xc4, 0x07, 0x7c, 0x83, 0x22, 0x61, 0xfc, 0xf9, 0xcc, 0x73, 0xda, 0xc2, 0x07, 0x1a, 0x98, 0xef, + 0x5d, 0x9c, 0x1f, 0xa8, 0x5b, 0xbf, 0xd2, 0xc0, 0x6c, 0x57, 0x1d, 0xee, 0xe1, 0xd1, 0xed, 0x4e, + 0x8f, 0x5e, 0x1b, 0x74, 0x41, 0x15, 0x09, 0xc4, 0xbb, 0xc8, 0xa4, 0x7b, 0x3f, 0xd3, 0xc0, 0x4c, + 0xba, 0xb4, 0x3d, 0xc8, 0x78, 0x15, 0x3f, 0xc8, 0x80, 0xf9, 0xde, 0xcd, 0x2f, 0xf4, 0xd5, 0x2b, + 0x7f, 0x38, 0xd3, 0x92, 0x5e, 0x93, 0xd5, 0x77, 0x35, 0x30, 0x71, 0x4b, 0xc9, 0x45, 0x5f, 0x87, + 0x07, 0x3e, 0xa7, 0x89, 0xee, 0x92, 0x98, 0x41, 0x51, 0x12, 0xb7, 0xf8, 0x27, 0x0d, 0xcc, 0xf5, + 0xbc, 0x24, 0xe1, 0x65, 0x30, 0x8a, 0x2d, 0xcb, 0x3d, 0x14, 0xe3, 0xb6, 0xc4, 0x2c, 0x7b, 0x95, + 0x53, 0x91, 0xe4, 0x26, 0xa2, 0x97, 0xf9, 0xbc, 0xa2, 0x57, 0xfc, 0xab, 0x06, 0x2e, 0xde, 0x2d, + 0x13, 0x1f, 0xc8, 0x96, 0x2e, 0x83, 0xbc, 0x6c, 0x70, 0x8f, 0xf9, 0x76, 0xca, 0x37, 0x9d, 0x2c, + 0x1a, 0xfc, 0x1f, 0xa2, 0xc4, 0x5f, 0xc5, 0x0f, 0x35, 0x30, 0x53, 0x25, 0x7e, 0xd3, 0xac, 0x11, + 0x44, 0xea, 0xc4, 0x27, 0x4e, 0x8d, 0xc0, 0x15, 0x30, 0xce, 0x3f, 0xcb, 0x7a, 0xb8, 0x16, 0x7d, + 0x62, 0x98, 0x95, 0x21, 0x1f, 0xdf, 0x8e, 0x18, 0x28, 0x96, 0x51, 0x9f, 0x23, 0x32, 0x7d, 0x3f, + 0x47, 0x5c, 0x04, 0x23, 0x5e, 0x3c, 0xac, 0xcd, 0x33, 0x2e, 0x9f, 0xcf, 0x72, 0x2a, 0xe7, 0xba, + 0x7e, 0xc0, 0x27, 0x50, 0x39, 0xc9, 0x75, 0xfd, 0x00, 0x71, 0x6a, 0xf1, 0x6f, 0x1a, 0xe8, 0xf5, + 0xaf, 0x4b, 0xf0, 0x82, 0x18, 0xc2, 0x25, 0x26, 0x5b, 0xd1, 0x00, 0x0e, 0x36, 0xc1, 0x18, 0x15, + 0xab, 0x92, 0x51, 0xaf, 0xdc, 0x67, 0xd4, 0xd3, 0x31, 0x12, 0xb7, 0x7f, 0x44, 0x8d, 0xc0, 0x58, + 0xe0, 0x6b, 0xb8, 0x1c, 0x3a, 0x86, 0x9c, 0xcb, 0x4e, 0x8a, 0xc0, 0xaf, 0xad, 0x0a, 0x1a, 0x52, + 0xdc, 0xf2, 0xd5, 0x8f, 0xee, 0x2c, 0x9e, 0xf9, 0xf8, 0xce, 0xe2, 0x99, 0x4f, 0xee, 0x2c, 0x9e, + 0xf9, 0x6e, 0x7b, 0x51, 0xfb, 0xa8, 0xbd, 0xa8, 0x7d, 0xdc, 0x5e, 0xd4, 0x3e, 0x69, 0x2f, 0x6a, + 0xff, 0x68, 0x2f, 0x6a, 0x3f, 0xff, 0x74, 0xf1, 0xcc, 0x37, 0xc7, 0x24, 0xfe, 0x7f, 0x03, 0x00, + 0x00, 0xff, 0xff, 0x80, 0x3e, 0x52, 0x72, 0x50, 0x2c, 0x00, 0x00, +} + func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -239,37 +955,46 @@ func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { } func (m *ConversionRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DesiredAPIVersion))) - i += copy(dAtA[i:], m.DesiredAPIVersion) if len(m.Objects) > 0 { - for _, msg := range m.Objects { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Objects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Objects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1a } } - return i, nil + i -= len(m.DesiredAPIVersion) + copy(dAtA[i:], m.DesiredAPIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DesiredAPIVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ConversionResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -277,41 +1002,51 @@ func (m *ConversionResponse) Marshal() (dAtA []byte, err error) { } func (m *ConversionResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - if len(m.ConvertedObjects) > 0 { - for _, msg := range m.ConvertedObjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) - n1, err := m.Result.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.ConvertedObjects) > 0 { + for iNdEx := len(m.ConvertedObjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ConvertedObjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - i += n1 - return i, nil + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ConversionReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -319,37 +1054,46 @@ func (m *ConversionReview) Marshal() (dAtA []byte, err error) { } func (m *ConversionReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Request != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Request.Size())) - n2, err := m.Request.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Response.Size())) - n3, err := m.Response.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Request != nil { + { + size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *CustomResourceColumnDefinition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -357,40 +1101,50 @@ func (m *CustomResourceColumnDefinition) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceColumnDefinition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceColumnDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) - i += copy(dAtA[i:], m.Format) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Priority)) - dAtA[i] = 0x32 - i++ + i -= len(m.JSONPath) + copy(dAtA[i:], m.JSONPath) i = encodeVarintGenerated(dAtA, i, uint64(len(m.JSONPath))) - i += copy(dAtA[i:], m.JSONPath) - return i, nil + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x28 + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i -= len(m.Format) + copy(dAtA[i:], m.Format) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) + i-- + dAtA[i] = 0x1a + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceConversion) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -398,46 +1152,48 @@ func (m *CustomResourceConversion) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceConversion) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceConversion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) - i += copy(dAtA[i:], m.Strategy) - if m.WebhookClientConfig != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.WebhookClientConfig.Size())) - n4, err := m.WebhookClientConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } if len(m.ConversionReviewVersions) > 0 { - for _, s := range m.ConversionReviewVersions { + for iNdEx := len(m.ConversionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ConversionReviewVersions[iNdEx]) + copy(dAtA[i:], m.ConversionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConversionReviewVersions[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + } + } + if m.WebhookClientConfig != nil { + { + size, err := m.WebhookClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Strategy) + copy(dAtA[i:], m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceDefinition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -445,41 +1201,52 @@ func (m *CustomResourceDefinition) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceDefinition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n5, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n6, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n7, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceDefinitionCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -487,41 +1254,52 @@ func (m *CustomResourceDefinitionCondition) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceDefinitionCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n8, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceDefinitionList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -529,37 +1307,46 @@ func (m *CustomResourceDefinitionList) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceDefinitionList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceDefinitionNames) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -567,63 +1354,60 @@ func (m *CustomResourceDefinitionNames) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceDefinitionNames) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionNames) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Plural))) - i += copy(dAtA[i:], m.Plural) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Singular))) - i += copy(dAtA[i:], m.Singular) - if len(m.ShortNames) > 0 { - for _, s := range m.ShortNames { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.Categories) > 0 { + for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Categories[iNdEx]) + copy(dAtA[i:], m.Categories[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx]))) + i-- + dAtA[i] = 0x32 } } - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x2a - i++ + i -= len(m.ListKind) + copy(dAtA[i:], m.ListKind) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ListKind))) - i += copy(dAtA[i:], m.ListKind) - if len(m.Categories) > 0 { - for _, s := range m.Categories { - dAtA[i] = 0x32 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x2a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x22 + if len(m.ShortNames) > 0 { + for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ShortNames[iNdEx]) + copy(dAtA[i:], m.ShortNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - return i, nil + i -= len(m.Singular) + copy(dAtA[i:], m.Singular) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Singular))) + i-- + dAtA[i] = 0x12 + i -= len(m.Plural) + copy(dAtA[i:], m.Plural) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Plural))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceDefinitionSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -631,91 +1415,121 @@ func (m *CustomResourceDefinitionSpec) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceDefinitionSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Names.Size())) - n10, err := m.Names.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PreserveUnknownFields != nil { + i-- + if *m.PreserveUnknownFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 } - i += n10 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scope))) - i += copy(dAtA[i:], m.Scope) - if m.Validation != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Validation.Size())) - n11, err := m.Validation.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Conversion != nil { + { + size, err := m.Conversion.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 + i-- + dAtA[i] = 0x4a } - if m.Subresources != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Subresources.Size())) - n12, err := m.Subresources.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AdditionalPrinterColumns) > 0 { + for iNdEx := len(m.AdditionalPrinterColumns) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AdditionalPrinterColumns[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 } - i += n12 } if len(m.Versions) > 0 { - for _, msg := range m.Versions { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + } + } + if m.Subresources != nil { + { + size, err := m.Subresources.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x32 } - if len(m.AdditionalPrinterColumns) > 0 { - for _, msg := range m.AdditionalPrinterColumns { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.Validation != nil { + { + size, err := m.Validation.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - if m.Conversion != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Conversion.Size())) - n13, err := m.Conversion.MarshalTo(dAtA[i:]) + i -= len(m.Scope) + copy(dAtA[i:], m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scope))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Names.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n13 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceDefinitionStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -723,122 +1537,137 @@ func (m *CustomResourceDefinitionStatus) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceDefinitionStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AcceptedNames.Size())) - n14, err := m.AcceptedNames.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 if len(m.StoredVersions) > 0 { - for _, s := range m.StoredVersions { + for iNdEx := len(m.StoredVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.StoredVersions[iNdEx]) + copy(dAtA[i:], m.StoredVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoredVersions[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil -} - -func (m *CustomResourceDefinitionVersion) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } + { + size, err := m.AcceptedNames.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } return dAtA[:n], nil } func (m *CustomResourceDefinitionVersion) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - if m.Served { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x18 - i++ - if m.Storage { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.Schema != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Schema.Size())) - n15, err := m.Schema.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AdditionalPrinterColumns) > 0 { + for iNdEx := len(m.AdditionalPrinterColumns) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AdditionalPrinterColumns[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } - i += n15 } if m.Subresources != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Subresources.Size())) - n16, err := m.Subresources.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Subresources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- + dAtA[i] = 0x2a } - if len(m.AdditionalPrinterColumns) > 0 { - for _, msg := range m.AdditionalPrinterColumns { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 + } + i-- + if m.Storage { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x18 + i-- + if m.Served { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceSubresourceScale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -846,31 +1675,39 @@ func (m *CustomResourceSubresourceScale) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceSubresourceScale) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresourceScale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SpecReplicasPath))) - i += copy(dAtA[i:], m.SpecReplicasPath) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StatusReplicasPath))) - i += copy(dAtA[i:], m.StatusReplicasPath) if m.LabelSelectorPath != nil { - dAtA[i] = 0x1a - i++ + i -= len(*m.LabelSelectorPath) + copy(dAtA[i:], *m.LabelSelectorPath) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.LabelSelectorPath))) - i += copy(dAtA[i:], *m.LabelSelectorPath) + i-- + dAtA[i] = 0x1a } - return i, nil + i -= len(m.StatusReplicasPath) + copy(dAtA[i:], m.StatusReplicasPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StatusReplicasPath))) + i-- + dAtA[i] = 0x12 + i -= len(m.SpecReplicasPath) + copy(dAtA[i:], m.SpecReplicasPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SpecReplicasPath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceSubresourceStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -878,17 +1715,22 @@ func (m *CustomResourceSubresourceStatus) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceSubresourceStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - return i, nil + return len(dAtA) - i, nil } func (m *CustomResourceSubresources) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -896,37 +1738,46 @@ func (m *CustomResourceSubresources) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceSubresources) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Status != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n17, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - } if m.Scale != nil { + { + size, err := m.Scale.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Scale.Size())) - n18, err := m.Scale.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n18 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *CustomResourceValidation) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -934,27 +1785,34 @@ func (m *CustomResourceValidation) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceValidation) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceValidation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.OpenAPIV3Schema != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.OpenAPIV3Schema.Size())) - n19, err := m.OpenAPIV3Schema.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.OpenAPIV3Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n19 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *ExternalDocumentation) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -962,25 +1820,32 @@ func (m *ExternalDocumentation) Marshal() (dAtA []byte, err error) { } func (m *ExternalDocumentation) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalDocumentation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - dAtA[i] = 0x12 - i++ + i -= len(m.URL) + copy(dAtA[i:], m.URL) i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) - i += copy(dAtA[i:], m.URL) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JSON) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -988,23 +1853,29 @@ func (m *JSON) Marshal() (dAtA []byte, err error) { } func (m *JSON) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSON) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.Raw != nil { - dAtA[i] = 0xa - i++ + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) - i += copy(dAtA[i:], m.Raw) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *JSONSchemaProps) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1012,234 +1883,225 @@ func (m *JSONSchemaProps) Marshal() (dAtA []byte, err error) { } func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaProps) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) - i += copy(dAtA[i:], m.ID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schema))) - i += copy(dAtA[i:], m.Schema) - if m.Ref != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Ref))) - i += copy(dAtA[i:], *m.Ref) - } - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) - i += copy(dAtA[i:], m.Format) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Title))) - i += copy(dAtA[i:], m.Title) - if m.Default != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Default.Size())) - n20, err := m.Default.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.XListType != nil { + i -= len(*m.XListType) + copy(dAtA[i:], *m.XListType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.XListType))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xd2 + } + if len(m.XListMapKeys) > 0 { + for iNdEx := len(m.XListMapKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.XListMapKeys[iNdEx]) + copy(dAtA[i:], m.XListMapKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.XListMapKeys[iNdEx]))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca } - i += n20 } - if m.Maximum != nil { - dAtA[i] = 0x49 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Maximum)))) - i += 8 - } - dAtA[i] = 0x50 - i++ - if m.ExclusiveMaximum { + i-- + if m.XIntOrString { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.Minimum != nil { - dAtA[i] = 0x59 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Minimum)))) - i += 8 - } - dAtA[i] = 0x60 - i++ - if m.ExclusiveMinimum { + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc0 + i-- + if m.XEmbeddedResource { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.MaxLength != nil { - dAtA[i] = 0x68 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxLength)) - } - if m.MinLength != nil { - dAtA[i] = 0x70 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinLength)) - } - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pattern))) - i += copy(dAtA[i:], m.Pattern) - if m.MaxItems != nil { - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxItems)) - } - if m.MinItems != nil { - dAtA[i] = 0x88 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinItems)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb8 + if m.XPreserveUnknownFields != nil { + i-- + if *m.XPreserveUnknownFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb0 } - dAtA[i] = 0x90 - i++ - dAtA[i] = 0x1 - i++ - if m.UniqueItems { + i-- + if m.Nullable { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.MultipleOf != nil { - dAtA[i] = 0x99 - i++ - dAtA[i] = 0x1 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.MultipleOf)))) - i += 8 - } - if len(m.Enum) > 0 { - for _, msg := range m.Enum { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa8 + if m.Example != nil { + { + size, err := m.Example.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 } - if m.MaxProperties != nil { - dAtA[i] = 0xa8 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxProperties)) - } - if m.MinProperties != nil { - dAtA[i] = 0xb0 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinProperties)) - } - if len(m.Required) > 0 { - for _, s := range m.Required { - dAtA[i] = 0xba - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.ExternalDocs != nil { + { + size, err := m.ExternalDocs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a } - if m.Items != nil { - dAtA[i] = 0xc2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Items.Size())) - n21, err := m.Items.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Definitions) > 0 { + keysForDefinitions := make([]string, 0, len(m.Definitions)) + for k := range m.Definitions { + keysForDefinitions = append(keysForDefinitions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions) + for iNdEx := len(keysForDefinitions) - 1; iNdEx >= 0; iNdEx-- { + v := m.Definitions[string(keysForDefinitions[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForDefinitions[iNdEx]) + copy(dAtA[i:], keysForDefinitions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefinitions[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 } - i += n21 } - if len(m.AllOf) > 0 { - for _, msg := range m.AllOf { - dAtA[i] = 0xca - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.AdditionalItems != nil { + { + size, err := m.AdditionalItems.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if len(m.OneOf) > 0 { - for _, msg := range m.OneOf { - dAtA[i] = 0xd2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if len(m.Dependencies) > 0 { + keysForDependencies := make([]string, 0, len(m.Dependencies)) + for k := range m.Dependencies { + keysForDependencies = append(keysForDependencies, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies) + for iNdEx := len(keysForDependencies) - 1; iNdEx >= 0; iNdEx-- { + v := m.Dependencies[string(keysForDependencies[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + i -= len(keysForDependencies[iNdEx]) + copy(dAtA[i:], keysForDependencies[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDependencies[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 } } - if len(m.AnyOf) > 0 { - for _, msg := range m.AnyOf { - dAtA[i] = 0xda - i++ + if len(m.PatternProperties) > 0 { + keysForPatternProperties := make([]string, 0, len(m.PatternProperties)) + for k := range m.PatternProperties { + keysForPatternProperties = append(keysForPatternProperties, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties) + for iNdEx := len(keysForPatternProperties) - 1; iNdEx >= 0; iNdEx-- { + v := m.PatternProperties[string(keysForPatternProperties[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForPatternProperties[iNdEx]) + copy(dAtA[i:], keysForPatternProperties[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPatternProperties[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0xfa + } + } + if m.AdditionalProperties != nil { + { + size, err := m.AdditionalProperties.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.Not != nil { - dAtA[i] = 0xe2 - i++ + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Not.Size())) - n22, err := m.Not.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 + i-- + dAtA[i] = 0xf2 } if len(m.Properties) > 0 { keysForProperties := make([]string, 0, len(m.Properties)) @@ -1247,197 +2109,279 @@ func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { keysForProperties = append(keysForProperties, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForProperties) - for _, k := range keysForProperties { - dAtA[i] = 0xea - i++ - dAtA[i] = 0x1 - i++ - v := m.Properties[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + for iNdEx := len(keysForProperties) - 1; iNdEx >= 0; iNdEx-- { + v := m.Properties[string(keysForProperties[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n23, err := (&v).MarshalTo(dAtA[i:]) + i -= len(keysForProperties[iNdEx]) + copy(dAtA[i:], keysForProperties[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForProperties[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + } + if m.Not != nil { + { + size, err := m.Not.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n23 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.AdditionalProperties != nil { - dAtA[i] = 0xf2 - i++ + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AdditionalProperties.Size())) - n24, err := m.AdditionalProperties.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 + i-- + dAtA[i] = 0xe2 } - if len(m.PatternProperties) > 0 { - keysForPatternProperties := make([]string, 0, len(m.PatternProperties)) - for k := range m.PatternProperties { - keysForPatternProperties = append(keysForPatternProperties, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties) - for _, k := range keysForPatternProperties { - dAtA[i] = 0xfa - i++ - dAtA[i] = 0x1 - i++ - v := m.PatternProperties[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n25, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AnyOf) > 0 { + for iNdEx := len(m.AnyOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AnyOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n25 + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda } } - if len(m.Dependencies) > 0 { - keysForDependencies := make([]string, 0, len(m.Dependencies)) - for k := range m.Dependencies { - keysForDependencies = append(keysForDependencies, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies) - for _, k := range keysForDependencies { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x2 - i++ - v := m.Dependencies[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n26, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.OneOf) > 0 { + for iNdEx := len(m.OneOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.OneOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n26 + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 } } - if m.AdditionalItems != nil { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x2 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AdditionalItems.Size())) - n27, err := m.AdditionalItems.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AllOf) > 0 { + for iNdEx := len(m.AllOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca } - i += n27 } - if len(m.Definitions) > 0 { - keysForDefinitions := make([]string, 0, len(m.Definitions)) - for k := range m.Definitions { - keysForDefinitions = append(keysForDefinitions, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions) - for _, k := range keysForDefinitions { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x2 - i++ - v := m.Definitions[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n28, err := (&v).MarshalTo(dAtA[i:]) + if m.Items != nil { + { + size, err := m.Items.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n28 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 } - if m.ExternalDocs != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x2 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ExternalDocs.Size())) - n29, err := m.ExternalDocs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Required) > 0 { + for iNdEx := len(m.Required) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Required[iNdEx]) + copy(dAtA[i:], m.Required[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Required[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba } - i += n29 } - if m.Example != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x2 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Example.Size())) - n30, err := m.Example.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MinProperties != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinProperties)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if m.MaxProperties != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxProperties)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if len(m.Enum) > 0 { + for iNdEx := len(m.Enum) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Enum[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 } - i += n30 } - dAtA[i] = 0xa8 - i++ - dAtA[i] = 0x2 - i++ - if m.Nullable { + if m.MultipleOf != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.MultipleOf)))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x99 + } + i-- + if m.UniqueItems { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + if m.MinItems != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinItems)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.MaxItems != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxItems)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + i -= len(m.Pattern) + copy(dAtA[i:], m.Pattern) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pattern))) + i-- + dAtA[i] = 0x7a + if m.MinLength != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinLength)) + i-- + dAtA[i] = 0x70 + } + if m.MaxLength != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxLength)) + i-- + dAtA[i] = 0x68 + } + i-- + if m.ExclusiveMinimum { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + if m.Minimum != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Minimum)))) + i-- + dAtA[i] = 0x59 + } + i-- + if m.ExclusiveMaximum { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x50 + if m.Maximum != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Maximum)))) + i-- + dAtA[i] = 0x49 + } + if m.Default != nil { + { + size, err := m.Default.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + i -= len(m.Title) + copy(dAtA[i:], m.Title) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Title))) + i-- + dAtA[i] = 0x3a + i -= len(m.Format) + copy(dAtA[i:], m.Format) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) + i-- + dAtA[i] = 0x32 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x2a + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + if m.Ref != nil { + i -= len(*m.Ref) + copy(dAtA[i:], *m.Ref) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Ref))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Schema) + copy(dAtA[i:], m.Schema) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schema))) + i-- + dAtA[i] = 0x12 + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JSONSchemaPropsOrArray) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1445,39 +2389,48 @@ func (m *JSONSchemaPropsOrArray) Marshal() (dAtA []byte, err error) { } func (m *JSONSchemaPropsOrArray) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Schema != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Schema.Size())) - n31, err := m.Schema.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - } if len(m.JSONSchemas) > 0 { - for _, msg := range m.JSONSchemas { + for iNdEx := len(m.JSONSchemas) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.JSONSchemas[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + } + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *JSONSchemaPropsOrBool) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1485,35 +2438,42 @@ func (m *JSONSchemaPropsOrBool) Marshal() (dAtA []byte, err error) { } func (m *JSONSchemaPropsOrBool) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrBool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i-- if m.Allows { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.Schema != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Schema.Size())) - n32, err := m.Schema.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 - } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *JSONSchemaPropsOrStringArray) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1521,42 +2481,43 @@ func (m *JSONSchemaPropsOrStringArray) Marshal() (dAtA []byte, err error) { } func (m *JSONSchemaPropsOrStringArray) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrStringArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Schema != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Schema.Size())) - n33, err := m.Schema.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - } if len(m.Property) > 0 { - for _, s := range m.Property { + for iNdEx := len(m.Property) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Property[iNdEx]) + copy(dAtA[i:], m.Property[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Property[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + } + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *ServiceReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1564,31 +2525,44 @@ func (m *ServiceReference) Marshal() (dAtA []byte, err error) { } func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } if m.Path != nil { - dAtA[i] = 0x1a - i++ + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) - i += copy(dAtA[i:], *m.Path) + i-- + dAtA[i] = 0x1a } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1596,45 +2570,59 @@ func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { } func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Service != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) - n34, err := m.Service.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n34 + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a } if m.CABundle != nil { - dAtA[i] = 0x12 - i++ + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i += copy(dAtA[i:], m.CABundle) + i-- + dAtA[i] = 0x12 } - if m.URL != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i += copy(dAtA[i:], *m.URL) + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *ConversionRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.UID) @@ -1651,6 +2639,9 @@ func (m *ConversionRequest) Size() (n int) { } func (m *ConversionResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.UID) @@ -1667,6 +2658,9 @@ func (m *ConversionResponse) Size() (n int) { } func (m *ConversionReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Request != nil { @@ -1681,6 +2675,9 @@ func (m *ConversionReview) Size() (n int) { } func (m *CustomResourceColumnDefinition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1698,6 +2695,9 @@ func (m *CustomResourceColumnDefinition) Size() (n int) { } func (m *CustomResourceConversion) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Strategy) @@ -1716,6 +2716,9 @@ func (m *CustomResourceConversion) Size() (n int) { } func (m *CustomResourceDefinition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1728,6 +2731,9 @@ func (m *CustomResourceDefinition) Size() (n int) { } func (m *CustomResourceDefinitionCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1744,6 +2750,9 @@ func (m *CustomResourceDefinitionCondition) Size() (n int) { } func (m *CustomResourceDefinitionList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1758,6 +2767,9 @@ func (m *CustomResourceDefinitionList) Size() (n int) { } func (m *CustomResourceDefinitionNames) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Plural) @@ -1784,6 +2796,9 @@ func (m *CustomResourceDefinitionNames) Size() (n int) { } func (m *CustomResourceDefinitionSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -1818,10 +2833,16 @@ func (m *CustomResourceDefinitionSpec) Size() (n int) { l = m.Conversion.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.PreserveUnknownFields != nil { + n += 2 + } return n } func (m *CustomResourceDefinitionStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Conditions) > 0 { @@ -1842,6 +2863,9 @@ func (m *CustomResourceDefinitionStatus) Size() (n int) { } func (m *CustomResourceDefinitionVersion) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1866,6 +2890,9 @@ func (m *CustomResourceDefinitionVersion) Size() (n int) { } func (m *CustomResourceSubresourceScale) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.SpecReplicasPath) @@ -1880,12 +2907,18 @@ func (m *CustomResourceSubresourceScale) Size() (n int) { } func (m *CustomResourceSubresourceStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l return n } func (m *CustomResourceSubresources) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Status != nil { @@ -1900,6 +2933,9 @@ func (m *CustomResourceSubresources) Size() (n int) { } func (m *CustomResourceValidation) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.OpenAPIV3Schema != nil { @@ -1910,6 +2946,9 @@ func (m *CustomResourceValidation) Size() (n int) { } func (m *ExternalDocumentation) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Description) @@ -1920,6 +2959,9 @@ func (m *ExternalDocumentation) Size() (n int) { } func (m *JSON) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Raw != nil { @@ -1930,6 +2972,9 @@ func (m *JSON) Size() (n int) { } func (m *JSONSchemaProps) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ID) @@ -2075,10 +3120,28 @@ func (m *JSONSchemaProps) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } n += 3 + if m.XPreserveUnknownFields != nil { + n += 3 + } + n += 3 + n += 3 + if len(m.XListMapKeys) > 0 { + for _, s := range m.XListMapKeys { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.XListType != nil { + l = len(*m.XListType) + n += 2 + l + sovGenerated(uint64(l)) + } return n } func (m *JSONSchemaPropsOrArray) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Schema != nil { @@ -2095,6 +3158,9 @@ func (m *JSONSchemaPropsOrArray) Size() (n int) { } func (m *JSONSchemaPropsOrBool) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -2106,6 +3172,9 @@ func (m *JSONSchemaPropsOrBool) Size() (n int) { } func (m *JSONSchemaPropsOrStringArray) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Schema != nil { @@ -2122,6 +3191,9 @@ func (m *JSONSchemaPropsOrStringArray) Size() (n int) { } func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -2132,10 +3204,16 @@ func (m *ServiceReference) Size() (n int) { l = len(*m.Path) n += 1 + l + sovGenerated(uint64(l)) } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } return n } func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Service != nil { @@ -2154,14 +3232,7 @@ func (m *WebhookClientConfig) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -2170,10 +3241,15 @@ func (this *ConversionRequest) String() string { if this == nil { return "nil" } + repeatedStringForObjects := "[]RawExtension{" + for _, f := range this.Objects { + repeatedStringForObjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForObjects += "}" s := strings.Join([]string{`&ConversionRequest{`, `UID:` + fmt.Sprintf("%v", this.UID) + `,`, `DesiredAPIVersion:` + fmt.Sprintf("%v", this.DesiredAPIVersion) + `,`, - `Objects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Objects), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Objects:` + repeatedStringForObjects + `,`, `}`, }, "") return s @@ -2182,10 +3258,15 @@ func (this *ConversionResponse) String() string { if this == nil { return "nil" } + repeatedStringForConvertedObjects := "[]RawExtension{" + for _, f := range this.ConvertedObjects { + repeatedStringForConvertedObjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConvertedObjects += "}" s := strings.Join([]string{`&ConversionResponse{`, `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `ConvertedObjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ConvertedObjects), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `Result:` + strings.Replace(strings.Replace(this.Result.String(), "Status", "k8s_io_apimachinery_pkg_apis_meta_v1.Status", 1), `&`, ``, 1) + `,`, + `ConvertedObjects:` + repeatedStringForConvertedObjects + `,`, + `Result:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2195,8 +3276,8 @@ func (this *ConversionReview) String() string { return "nil" } s := strings.Join([]string{`&ConversionReview{`, - `Request:` + strings.Replace(fmt.Sprintf("%v", this.Request), "ConversionRequest", "ConversionRequest", 1) + `,`, - `Response:` + strings.Replace(fmt.Sprintf("%v", this.Response), "ConversionResponse", "ConversionResponse", 1) + `,`, + `Request:` + strings.Replace(this.Request.String(), "ConversionRequest", "ConversionRequest", 1) + `,`, + `Response:` + strings.Replace(this.Response.String(), "ConversionResponse", "ConversionResponse", 1) + `,`, `}`, }, "") return s @@ -2222,7 +3303,7 @@ func (this *CustomResourceConversion) String() string { } s := strings.Join([]string{`&CustomResourceConversion{`, `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, - `WebhookClientConfig:` + strings.Replace(fmt.Sprintf("%v", this.WebhookClientConfig), "WebhookClientConfig", "WebhookClientConfig", 1) + `,`, + `WebhookClientConfig:` + strings.Replace(this.WebhookClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1) + `,`, `ConversionReviewVersions:` + fmt.Sprintf("%v", this.ConversionReviewVersions) + `,`, `}`, }, "") @@ -2233,7 +3314,7 @@ func (this *CustomResourceDefinition) String() string { return "nil" } s := strings.Join([]string{`&CustomResourceDefinition{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CustomResourceDefinitionSpec", "CustomResourceDefinitionSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CustomResourceDefinitionStatus", "CustomResourceDefinitionStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2247,7 +3328,7 @@ func (this *CustomResourceDefinitionCondition) String() string { s := strings.Join([]string{`&CustomResourceDefinitionCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2258,9 +3339,14 @@ func (this *CustomResourceDefinitionList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]CustomResourceDefinition{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinition", "CustomResourceDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&CustomResourceDefinitionList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomResourceDefinition", "CustomResourceDefinition", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2284,16 +3370,27 @@ func (this *CustomResourceDefinitionSpec) String() string { if this == nil { return "nil" } + repeatedStringForVersions := "[]CustomResourceDefinitionVersion{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinitionVersion", "CustomResourceDefinitionVersion", 1), `&`, ``, 1) + "," + } + repeatedStringForVersions += "}" + repeatedStringForAdditionalPrinterColumns := "[]CustomResourceColumnDefinition{" + for _, f := range this.AdditionalPrinterColumns { + repeatedStringForAdditionalPrinterColumns += strings.Replace(strings.Replace(f.String(), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForAdditionalPrinterColumns += "}" s := strings.Join([]string{`&CustomResourceDefinitionSpec{`, `Group:` + fmt.Sprintf("%v", this.Group) + `,`, `Version:` + fmt.Sprintf("%v", this.Version) + `,`, `Names:` + strings.Replace(strings.Replace(this.Names.String(), "CustomResourceDefinitionNames", "CustomResourceDefinitionNames", 1), `&`, ``, 1) + `,`, `Scope:` + fmt.Sprintf("%v", this.Scope) + `,`, - `Validation:` + strings.Replace(fmt.Sprintf("%v", this.Validation), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, - `Subresources:` + strings.Replace(fmt.Sprintf("%v", this.Subresources), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, - `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "CustomResourceDefinitionVersion", "CustomResourceDefinitionVersion", 1), `&`, ``, 1) + `,`, - `AdditionalPrinterColumns:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AdditionalPrinterColumns), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + `,`, - `Conversion:` + strings.Replace(fmt.Sprintf("%v", this.Conversion), "CustomResourceConversion", "CustomResourceConversion", 1) + `,`, + `Validation:` + strings.Replace(this.Validation.String(), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, + `Subresources:` + strings.Replace(this.Subresources.String(), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, + `Versions:` + repeatedStringForVersions + `,`, + `AdditionalPrinterColumns:` + repeatedStringForAdditionalPrinterColumns + `,`, + `Conversion:` + strings.Replace(this.Conversion.String(), "CustomResourceConversion", "CustomResourceConversion", 1) + `,`, + `PreserveUnknownFields:` + valueToStringGenerated(this.PreserveUnknownFields) + `,`, `}`, }, "") return s @@ -2302,8 +3399,13 @@ func (this *CustomResourceDefinitionStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]CustomResourceDefinitionCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinitionCondition", "CustomResourceDefinitionCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&CustomResourceDefinitionStatus{`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "CustomResourceDefinitionCondition", "CustomResourceDefinitionCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `AcceptedNames:` + strings.Replace(strings.Replace(this.AcceptedNames.String(), "CustomResourceDefinitionNames", "CustomResourceDefinitionNames", 1), `&`, ``, 1) + `,`, `StoredVersions:` + fmt.Sprintf("%v", this.StoredVersions) + `,`, `}`, @@ -2314,13 +3416,18 @@ func (this *CustomResourceDefinitionVersion) String() string { if this == nil { return "nil" } + repeatedStringForAdditionalPrinterColumns := "[]CustomResourceColumnDefinition{" + for _, f := range this.AdditionalPrinterColumns { + repeatedStringForAdditionalPrinterColumns += strings.Replace(strings.Replace(f.String(), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForAdditionalPrinterColumns += "}" s := strings.Join([]string{`&CustomResourceDefinitionVersion{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Served:` + fmt.Sprintf("%v", this.Served) + `,`, `Storage:` + fmt.Sprintf("%v", this.Storage) + `,`, - `Schema:` + strings.Replace(fmt.Sprintf("%v", this.Schema), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, - `Subresources:` + strings.Replace(fmt.Sprintf("%v", this.Subresources), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, - `AdditionalPrinterColumns:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AdditionalPrinterColumns), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, + `Subresources:` + strings.Replace(this.Subresources.String(), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, + `AdditionalPrinterColumns:` + repeatedStringForAdditionalPrinterColumns + `,`, `}`, }, "") return s @@ -2351,8 +3458,8 @@ func (this *CustomResourceSubresources) String() string { return "nil" } s := strings.Join([]string{`&CustomResourceSubresources{`, - `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "CustomResourceSubresourceStatus", "CustomResourceSubresourceStatus", 1) + `,`, - `Scale:` + strings.Replace(fmt.Sprintf("%v", this.Scale), "CustomResourceSubresourceScale", "CustomResourceSubresourceScale", 1) + `,`, + `Status:` + strings.Replace(this.Status.String(), "CustomResourceSubresourceStatus", "CustomResourceSubresourceStatus", 1) + `,`, + `Scale:` + strings.Replace(this.Scale.String(), "CustomResourceSubresourceScale", "CustomResourceSubresourceScale", 1) + `,`, `}`, }, "") return s @@ -2362,7 +3469,7 @@ func (this *CustomResourceValidation) String() string { return "nil" } s := strings.Join([]string{`&CustomResourceValidation{`, - `OpenAPIV3Schema:` + strings.Replace(fmt.Sprintf("%v", this.OpenAPIV3Schema), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `OpenAPIV3Schema:` + strings.Replace(this.OpenAPIV3Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, `}`, }, "") return s @@ -2392,6 +3499,26 @@ func (this *JSONSchemaProps) String() string { if this == nil { return "nil" } + repeatedStringForEnum := "[]JSON{" + for _, f := range this.Enum { + repeatedStringForEnum += strings.Replace(strings.Replace(f.String(), "JSON", "JSON", 1), `&`, ``, 1) + "," + } + repeatedStringForEnum += "}" + repeatedStringForAllOf := "[]JSONSchemaProps{" + for _, f := range this.AllOf { + repeatedStringForAllOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForAllOf += "}" + repeatedStringForOneOf := "[]JSONSchemaProps{" + for _, f := range this.OneOf { + repeatedStringForOneOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForOneOf += "}" + repeatedStringForAnyOf := "[]JSONSchemaProps{" + for _, f := range this.AnyOf { + repeatedStringForAnyOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForAnyOf += "}" keysForProperties := make([]string, 0, len(this.Properties)) for k := range this.Properties { keysForProperties = append(keysForProperties, k) @@ -2440,7 +3567,7 @@ func (this *JSONSchemaProps) String() string { `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Format:` + fmt.Sprintf("%v", this.Format) + `,`, `Title:` + fmt.Sprintf("%v", this.Title) + `,`, - `Default:` + strings.Replace(fmt.Sprintf("%v", this.Default), "JSON", "JSON", 1) + `,`, + `Default:` + strings.Replace(this.Default.String(), "JSON", "JSON", 1) + `,`, `Maximum:` + valueToStringGenerated(this.Maximum) + `,`, `ExclusiveMaximum:` + fmt.Sprintf("%v", this.ExclusiveMaximum) + `,`, `Minimum:` + valueToStringGenerated(this.Minimum) + `,`, @@ -2452,24 +3579,29 @@ func (this *JSONSchemaProps) String() string { `MinItems:` + valueToStringGenerated(this.MinItems) + `,`, `UniqueItems:` + fmt.Sprintf("%v", this.UniqueItems) + `,`, `MultipleOf:` + valueToStringGenerated(this.MultipleOf) + `,`, - `Enum:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Enum), "JSON", "JSON", 1), `&`, ``, 1) + `,`, + `Enum:` + repeatedStringForEnum + `,`, `MaxProperties:` + valueToStringGenerated(this.MaxProperties) + `,`, `MinProperties:` + valueToStringGenerated(this.MinProperties) + `,`, `Required:` + fmt.Sprintf("%v", this.Required) + `,`, - `Items:` + strings.Replace(fmt.Sprintf("%v", this.Items), "JSONSchemaPropsOrArray", "JSONSchemaPropsOrArray", 1) + `,`, - `AllOf:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllOf), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + `,`, - `OneOf:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OneOf), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + `,`, - `AnyOf:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AnyOf), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + `,`, - `Not:` + strings.Replace(fmt.Sprintf("%v", this.Not), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `Items:` + strings.Replace(this.Items.String(), "JSONSchemaPropsOrArray", "JSONSchemaPropsOrArray", 1) + `,`, + `AllOf:` + repeatedStringForAllOf + `,`, + `OneOf:` + repeatedStringForOneOf + `,`, + `AnyOf:` + repeatedStringForAnyOf + `,`, + `Not:` + strings.Replace(this.Not.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, `Properties:` + mapStringForProperties + `,`, - `AdditionalProperties:` + strings.Replace(fmt.Sprintf("%v", this.AdditionalProperties), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, + `AdditionalProperties:` + strings.Replace(this.AdditionalProperties.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, `PatternProperties:` + mapStringForPatternProperties + `,`, `Dependencies:` + mapStringForDependencies + `,`, - `AdditionalItems:` + strings.Replace(fmt.Sprintf("%v", this.AdditionalItems), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, + `AdditionalItems:` + strings.Replace(this.AdditionalItems.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, `Definitions:` + mapStringForDefinitions + `,`, - `ExternalDocs:` + strings.Replace(fmt.Sprintf("%v", this.ExternalDocs), "ExternalDocumentation", "ExternalDocumentation", 1) + `,`, - `Example:` + strings.Replace(fmt.Sprintf("%v", this.Example), "JSON", "JSON", 1) + `,`, + `ExternalDocs:` + strings.Replace(this.ExternalDocs.String(), "ExternalDocumentation", "ExternalDocumentation", 1) + `,`, + `Example:` + strings.Replace(this.Example.String(), "JSON", "JSON", 1) + `,`, `Nullable:` + fmt.Sprintf("%v", this.Nullable) + `,`, + `XPreserveUnknownFields:` + valueToStringGenerated(this.XPreserveUnknownFields) + `,`, + `XEmbeddedResource:` + fmt.Sprintf("%v", this.XEmbeddedResource) + `,`, + `XIntOrString:` + fmt.Sprintf("%v", this.XIntOrString) + `,`, + `XListMapKeys:` + fmt.Sprintf("%v", this.XListMapKeys) + `,`, + `XListType:` + valueToStringGenerated(this.XListType) + `,`, `}`, }, "") return s @@ -2478,9 +3610,14 @@ func (this *JSONSchemaPropsOrArray) String() string { if this == nil { return "nil" } + repeatedStringForJSONSchemas := "[]JSONSchemaProps{" + for _, f := range this.JSONSchemas { + repeatedStringForJSONSchemas += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForJSONSchemas += "}" s := strings.Join([]string{`&JSONSchemaPropsOrArray{`, - `Schema:` + strings.Replace(fmt.Sprintf("%v", this.Schema), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, - `JSONSchemas:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.JSONSchemas), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `JSONSchemas:` + repeatedStringForJSONSchemas + `,`, `}`, }, "") return s @@ -2491,7 +3628,7 @@ func (this *JSONSchemaPropsOrBool) String() string { } s := strings.Join([]string{`&JSONSchemaPropsOrBool{`, `Allows:` + fmt.Sprintf("%v", this.Allows) + `,`, - `Schema:` + strings.Replace(fmt.Sprintf("%v", this.Schema), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, `}`, }, "") return s @@ -2501,7 +3638,7 @@ func (this *JSONSchemaPropsOrStringArray) String() string { return "nil" } s := strings.Join([]string{`&JSONSchemaPropsOrStringArray{`, - `Schema:` + strings.Replace(fmt.Sprintf("%v", this.Schema), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, `Property:` + fmt.Sprintf("%v", this.Property) + `,`, `}`, }, "") @@ -2515,6 +3652,7 @@ func (this *ServiceReference) String() string { `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, `}`, }, "") return s @@ -2524,7 +3662,7 @@ func (this *WebhookClientConfig) String() string { return "nil" } s := strings.Join([]string{`&WebhookClientConfig{`, - `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "ServiceReference", "ServiceReference", 1) + `,`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, `URL:` + valueToStringGenerated(this.URL) + `,`, `}`, @@ -2554,7 +3692,7 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2582,7 +3720,7 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2592,6 +3730,9 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2611,7 +3752,7 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2621,6 +3762,9 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2640,7 +3784,7 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2649,10 +3793,13 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Objects = append(m.Objects, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + m.Objects = append(m.Objects, runtime.RawExtension{}) if err := m.Objects[len(m.Objects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2666,6 +3813,9 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2693,7 +3843,7 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2721,7 +3871,7 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2731,6 +3881,9 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2750,7 +3903,7 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2759,10 +3912,13 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ConvertedObjects = append(m.ConvertedObjects, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + m.ConvertedObjects = append(m.ConvertedObjects, runtime.RawExtension{}) if err := m.ConvertedObjects[len(m.ConvertedObjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2781,7 +3937,7 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2790,6 +3946,9 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2806,6 +3965,9 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2833,7 +3995,7 @@ func (m *ConversionReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2861,7 +4023,7 @@ func (m *ConversionReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2870,6 +4032,9 @@ func (m *ConversionReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2894,7 +4059,7 @@ func (m *ConversionReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2903,6 +4068,9 @@ func (m *ConversionReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2922,6 +4090,9 @@ func (m *ConversionReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2949,7 +4120,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2977,7 +4148,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2987,6 +4158,9 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3006,7 +4180,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3016,6 +4190,9 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3035,7 +4212,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3045,6 +4222,9 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3064,7 +4244,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3074,6 +4254,9 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3093,7 +4276,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Priority |= (int32(b) & 0x7F) << shift + m.Priority |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3112,7 +4295,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3122,6 +4305,9 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3136,6 +4322,9 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3163,7 +4352,7 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3191,7 +4380,7 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3201,6 +4390,9 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3220,7 +4412,7 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3229,6 +4421,9 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3253,7 +4448,7 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3263,6 +4458,9 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3277,6 +4475,9 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3304,7 +4505,7 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3332,7 +4533,7 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3341,6 +4542,9 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3362,7 +4566,7 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3371,6 +4575,9 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3392,7 +4599,7 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3401,6 +4608,9 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3417,6 +4627,9 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3444,7 +4657,7 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3472,7 +4685,7 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3482,6 +4695,9 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3501,7 +4717,7 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3511,6 +4727,9 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3530,7 +4749,7 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3539,6 +4758,9 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3560,7 +4782,7 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3570,6 +4792,9 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3589,7 +4814,7 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3599,6 +4824,9 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3613,6 +4841,9 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3640,7 +4871,7 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3668,7 +4899,7 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3677,6 +4908,9 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3698,7 +4932,7 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3707,6 +4941,9 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3724,6 +4961,9 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3751,7 +4991,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3779,7 +5019,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3789,6 +5029,9 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3808,7 +5051,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3818,6 +5061,9 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3837,7 +5083,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3847,6 +5093,9 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3866,7 +5115,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3876,6 +5125,9 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3895,7 +5147,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3905,6 +5157,9 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3924,7 +5179,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3934,6 +5189,9 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3948,6 +5206,9 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3975,7 +5236,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4003,7 +5264,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4013,6 +5274,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4032,7 +5296,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4042,6 +5306,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4061,7 +5328,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4070,6 +5337,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4091,7 +5361,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4101,6 +5371,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4120,7 +5393,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4129,6 +5402,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4153,7 +5429,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4162,6 +5438,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4186,7 +5465,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4195,6 +5474,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4217,7 +5499,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4226,6 +5508,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4248,7 +5533,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4257,6 +5542,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4267,6 +5555,27 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreserveUnknownFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.PreserveUnknownFields = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4276,6 +5585,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4303,7 +5615,7 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4331,7 +5643,7 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4340,6 +5652,9 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4362,7 +5677,7 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4371,6 +5686,9 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4392,7 +5710,7 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4402,6 +5720,9 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4416,6 +5737,9 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4443,7 +5767,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4471,7 +5795,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4481,6 +5805,9 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4500,7 +5827,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4520,7 +5847,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4540,7 +5867,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4549,6 +5876,9 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4573,7 +5903,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4582,6 +5912,9 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4606,7 +5939,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4615,6 +5948,9 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4632,6 +5968,9 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4659,7 +5998,7 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4687,7 +6026,7 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4697,6 +6036,9 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4716,7 +6058,7 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4726,6 +6068,9 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4745,7 +6090,7 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4755,6 +6100,9 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4770,6 +6118,9 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4797,7 +6148,7 @@ func (m *CustomResourceSubresourceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4820,6 +6171,9 @@ func (m *CustomResourceSubresourceStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4847,7 +6201,7 @@ func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4875,7 +6229,7 @@ func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4884,6 +6238,9 @@ func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4908,7 +6265,7 @@ func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4917,6 +6274,9 @@ func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4936,6 +6296,9 @@ func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4963,7 +6326,7 @@ func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4991,7 +6354,7 @@ func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5000,6 +6363,9 @@ func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5019,6 +6385,9 @@ func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5046,7 +6415,7 @@ func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5074,7 +6443,7 @@ func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5084,6 +6453,9 @@ func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5103,7 +6475,7 @@ func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5113,6 +6485,9 @@ func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5127,6 +6502,9 @@ func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5154,7 +6532,7 @@ func (m *JSON) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5182,7 +6560,7 @@ func (m *JSON) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5191,6 +6569,9 @@ func (m *JSON) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5208,6 +6589,9 @@ func (m *JSON) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5235,7 +6619,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5263,7 +6647,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5273,6 +6657,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5292,7 +6679,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5302,6 +6689,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5321,7 +6711,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5331,6 +6721,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5351,7 +6744,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5361,6 +6754,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5380,7 +6776,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5390,6 +6786,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5409,7 +6808,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5419,6 +6818,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5438,7 +6840,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5448,6 +6850,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5467,7 +6872,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5476,6 +6881,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5512,7 +6920,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5544,7 +6952,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5564,7 +6972,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5584,7 +6992,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5604,7 +7012,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5614,6 +7022,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5633,7 +7044,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5653,7 +7064,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5673,7 +7084,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5705,7 +7116,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5714,6 +7125,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5736,7 +7150,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5756,7 +7170,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5776,7 +7190,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5786,6 +7200,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5805,7 +7222,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5814,6 +7231,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5838,7 +7258,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5847,6 +7267,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5869,7 +7292,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5878,6 +7301,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5900,7 +7326,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5909,6 +7335,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5931,7 +7360,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5940,6 +7369,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5964,7 +7396,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5973,6 +7405,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5993,7 +7428,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6010,7 +7445,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6020,6 +7455,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6036,7 +7474,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6045,7 +7483,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -6087,7 +7525,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6096,6 +7534,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6120,7 +7561,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6129,6 +7570,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6149,7 +7593,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6166,7 +7610,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6176,6 +7620,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6192,7 +7639,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6201,7 +7648,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -6243,7 +7690,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6252,6 +7699,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6272,7 +7722,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6289,7 +7739,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6299,6 +7749,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6315,7 +7768,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6324,7 +7777,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -6366,7 +7819,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6375,6 +7828,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6399,7 +7855,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6408,6 +7864,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6428,7 +7887,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6445,7 +7904,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6455,6 +7914,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6471,7 +7933,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6480,7 +7942,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -6522,7 +7984,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6531,6 +7993,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6555,7 +8020,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6564,6 +8029,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6588,12 +8056,138 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Nullable = bool(v != 0) + case 38: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XPreserveUnknownFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.XPreserveUnknownFields = &b + case 39: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XEmbeddedResource", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.XEmbeddedResource = bool(v != 0) + case 40: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XIntOrString", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.XIntOrString = bool(v != 0) + case 41: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XListMapKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.XListMapKeys = append(m.XListMapKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 42: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XListType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.XListType = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6603,6 +8197,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6630,7 +8227,7 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6658,7 +8255,7 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6667,6 +8264,9 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6691,7 +8291,7 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6700,6 +8300,9 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6717,6 +8320,9 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6744,7 +8350,7 @@ func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6772,7 +8378,7 @@ func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6792,7 +8398,7 @@ func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6801,6 +8407,9 @@ func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6820,6 +8429,9 @@ func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6847,7 +8459,7 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6875,7 +8487,7 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6884,6 +8496,9 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6908,7 +8523,7 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6918,6 +8533,9 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6932,6 +8550,9 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6959,7 +8580,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6987,7 +8608,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6997,6 +8618,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7016,7 +8640,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7026,6 +8650,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7045,7 +8672,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7055,12 +8682,35 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) m.Path = &s iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7070,6 +8720,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7097,7 +8750,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7125,7 +8778,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7134,6 +8787,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7158,7 +8814,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7167,6 +8823,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7189,7 +8848,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7199,6 +8858,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7214,6 +8876,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7280,10 +8945,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -7312,6 +8980,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -7330,185 +9001,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 2783 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xdf, 0x6f, 0x23, 0x57, - 0xf5, 0xdf, 0xb1, 0xe3, 0xc4, 0xb9, 0x49, 0x36, 0xc9, 0xdd, 0x66, 0x3b, 0x9b, 0x6f, 0x6a, 0x27, - 0xee, 0xb7, 0x55, 0x28, 0xbb, 0x4e, 0xbb, 0xb4, 0xb4, 0x54, 0xe2, 0x21, 0x76, 0xd2, 0x2a, 0x65, - 0xf3, 0x83, 0xeb, 0xdd, 0xb6, 0xd0, 0x9f, 0x37, 0xe3, 0x6b, 0x67, 0x36, 0xf3, 0x6b, 0xe7, 0xce, - 0x38, 0x89, 0x0a, 0x08, 0xa8, 0x2a, 0x10, 0x02, 0x8a, 0xe8, 0xbe, 0x20, 0xe0, 0x01, 0x10, 0x2f, - 0x3c, 0xc0, 0x03, 0xbc, 0xc1, 0x1f, 0xb0, 0x8f, 0x15, 0x4f, 0x15, 0x42, 0x16, 0xeb, 0xfe, 0x0b, - 0x48, 0x48, 0x79, 0x42, 0xf7, 0xc7, 0xdc, 0x19, 0x8f, 0xed, 0xdd, 0xa8, 0x6b, 0x77, 0x79, 0xcb, - 0x9c, 0x73, 0xee, 0xf9, 0x9c, 0x7b, 0xee, 0x39, 0xe7, 0x9e, 0x7b, 0x1c, 0xd0, 0x38, 0x7c, 0x81, - 0x96, 0x4d, 0x77, 0xed, 0x30, 0xdc, 0x27, 0xbe, 0x43, 0x02, 0x42, 0xd7, 0x5a, 0xc4, 0xa9, 0xbb, - 0xfe, 0x9a, 0x64, 0x60, 0xcf, 0x24, 0xc7, 0x01, 0x71, 0xa8, 0xe9, 0x3a, 0xf4, 0x0a, 0xf6, 0x4c, - 0x4a, 0xfc, 0x16, 0xf1, 0xd7, 0xbc, 0xc3, 0x26, 0xe3, 0xd1, 0x6e, 0x81, 0xb5, 0xd6, 0x33, 0xfb, - 0x24, 0xc0, 0xcf, 0xac, 0x35, 0x89, 0x43, 0x7c, 0x1c, 0x90, 0x7a, 0xd9, 0xf3, 0xdd, 0xc0, 0x85, - 0x5f, 0x15, 0xea, 0xca, 0x5d, 0xd2, 0xef, 0x28, 0x75, 0x65, 0xef, 0xb0, 0xc9, 0x78, 0xb4, 0x5b, - 0xa0, 0x2c, 0xd5, 0x2d, 0x5e, 0x69, 0x9a, 0xc1, 0x41, 0xb8, 0x5f, 0x36, 0x5c, 0x7b, 0xad, 0xe9, - 0x36, 0xdd, 0x35, 0xae, 0x75, 0x3f, 0x6c, 0xf0, 0x2f, 0xfe, 0xc1, 0xff, 0x12, 0x68, 0x8b, 0xcf, - 0xc6, 0xc6, 0xdb, 0xd8, 0x38, 0x30, 0x1d, 0xe2, 0x9f, 0xc4, 0x16, 0xdb, 0x24, 0xc0, 0x6b, 0xad, - 0x1e, 0x1b, 0x17, 0xd7, 0x06, 0xad, 0xf2, 0x43, 0x27, 0x30, 0x6d, 0xd2, 0xb3, 0xe0, 0xcb, 0xf7, - 0x5b, 0x40, 0x8d, 0x03, 0x62, 0xe3, 0xf4, 0xba, 0xd2, 0xa9, 0x06, 0xe6, 0xab, 0xae, 0xd3, 0x22, - 0x3e, 0xdb, 0x25, 0x22, 0xb7, 0x42, 0x42, 0x03, 0x58, 0x01, 0xd9, 0xd0, 0xac, 0xeb, 0xda, 0xb2, - 0xb6, 0x3a, 0x59, 0x79, 0xfa, 0x4e, 0xbb, 0x78, 0xae, 0xd3, 0x2e, 0x66, 0x6f, 0x6c, 0x6d, 0x9c, - 0xb6, 0x8b, 0x2b, 0x83, 0x90, 0x82, 0x13, 0x8f, 0xd0, 0xf2, 0x8d, 0xad, 0x0d, 0xc4, 0x16, 0xc3, - 0x97, 0xc1, 0x7c, 0x9d, 0x50, 0xd3, 0x27, 0xf5, 0xf5, 0xbd, 0xad, 0x57, 0x85, 0x7e, 0x3d, 0xc3, - 0x35, 0x5e, 0x92, 0x1a, 0xe7, 0x37, 0xd2, 0x02, 0xa8, 0x77, 0x0d, 0x7c, 0x1d, 0x4c, 0xb8, 0xfb, - 0x37, 0x89, 0x11, 0x50, 0x3d, 0xbb, 0x9c, 0x5d, 0x9d, 0xba, 0x7a, 0xa5, 0x1c, 0x9f, 0xa0, 0x32, - 0x81, 0x1f, 0x9b, 0xdc, 0x6c, 0x19, 0xe1, 0xa3, 0xcd, 0xe8, 0xe4, 0x2a, 0xb3, 0x12, 0x6d, 0x62, - 0x57, 0x68, 0x41, 0x91, 0xba, 0xd2, 0xef, 0x32, 0x00, 0x26, 0x37, 0x4f, 0x3d, 0xd7, 0xa1, 0x64, - 0x28, 0xbb, 0xa7, 0x60, 0xce, 0xe0, 0x9a, 0x03, 0x52, 0x97, 0xb8, 0x7a, 0xe6, 0xb3, 0x58, 0xaf, - 0x4b, 0xfc, 0xb9, 0x6a, 0x4a, 0x1d, 0xea, 0x01, 0x80, 0xd7, 0xc1, 0xb8, 0x4f, 0x68, 0x68, 0x05, - 0x7a, 0x76, 0x59, 0x5b, 0x9d, 0xba, 0x7a, 0x79, 0x20, 0x14, 0x8f, 0x6f, 0x16, 0x7c, 0xe5, 0xd6, - 0x33, 0xe5, 0x5a, 0x80, 0x83, 0x90, 0x56, 0xce, 0x4b, 0xa4, 0x71, 0xc4, 0x75, 0x20, 0xa9, 0xab, - 0xf4, 0xc3, 0x0c, 0x98, 0x4b, 0x7a, 0xa9, 0x65, 0x92, 0x23, 0x78, 0x04, 0x26, 0x7c, 0x11, 0x2c, - 0xdc, 0x4f, 0x53, 0x57, 0xf7, 0xca, 0x0f, 0x94, 0x56, 0xe5, 0x9e, 0x20, 0xac, 0x4c, 0xb1, 0x33, - 0x93, 0x1f, 0x28, 0x42, 0x83, 0xef, 0x81, 0xbc, 0x2f, 0x0f, 0x8a, 0x47, 0xd3, 0xd4, 0xd5, 0xaf, - 0x0f, 0x11, 0x59, 0x28, 0xae, 0x4c, 0x77, 0xda, 0xc5, 0x7c, 0xf4, 0x85, 0x14, 0x60, 0xe9, 0xa3, - 0x0c, 0x28, 0x54, 0x43, 0x1a, 0xb8, 0x36, 0x22, 0xd4, 0x0d, 0x7d, 0x83, 0x54, 0x5d, 0x2b, 0xb4, - 0x9d, 0x0d, 0xd2, 0x30, 0x1d, 0x33, 0x60, 0xd1, 0xba, 0x0c, 0xc6, 0x1c, 0x6c, 0x13, 0x19, 0x3d, - 0xd3, 0xd2, 0xa7, 0x63, 0x3b, 0xd8, 0x26, 0x88, 0x73, 0x98, 0x04, 0x0b, 0x16, 0x99, 0x0b, 0x4a, - 0xe2, 0xfa, 0x89, 0x47, 0x10, 0xe7, 0xc0, 0x27, 0xc1, 0x78, 0xc3, 0xf5, 0x6d, 0x2c, 0xce, 0x71, - 0x32, 0x3e, 0x99, 0x97, 0x38, 0x15, 0x49, 0x2e, 0x7c, 0x0e, 0x4c, 0xd5, 0x09, 0x35, 0x7c, 0xd3, - 0x63, 0xd0, 0xfa, 0x18, 0x17, 0xbe, 0x20, 0x85, 0xa7, 0x36, 0x62, 0x16, 0x4a, 0xca, 0xc1, 0xcb, - 0x20, 0xef, 0xf9, 0xa6, 0xeb, 0x9b, 0xc1, 0x89, 0x9e, 0x5b, 0xd6, 0x56, 0x73, 0x95, 0x39, 0xb9, - 0x26, 0xbf, 0x27, 0xe9, 0x48, 0x49, 0xc0, 0x65, 0x90, 0x7f, 0xa5, 0xb6, 0xbb, 0xb3, 0x87, 0x83, - 0x03, 0x7d, 0x9c, 0x23, 0x8c, 0x31, 0x69, 0x94, 0xbf, 0x29, 0xa9, 0xa5, 0x7f, 0x66, 0x80, 0x9e, - 0xf6, 0x4a, 0xe4, 0x52, 0xf8, 0x12, 0xc8, 0xd3, 0x80, 0x55, 0x9c, 0xe6, 0x89, 0xf4, 0xc9, 0x53, - 0x11, 0x58, 0x4d, 0xd2, 0x4f, 0xdb, 0xc5, 0x8b, 0xf1, 0x8a, 0x88, 0xca, 0xfd, 0xa1, 0xd6, 0xc2, - 0x5f, 0x6b, 0xe0, 0xc2, 0x11, 0xd9, 0x3f, 0x70, 0xdd, 0xc3, 0xaa, 0x65, 0x12, 0x27, 0xa8, 0xba, - 0x4e, 0xc3, 0x6c, 0xca, 0x18, 0x40, 0x0f, 0x18, 0x03, 0xaf, 0xf5, 0x6a, 0xae, 0x3c, 0xda, 0x69, - 0x17, 0x2f, 0xf4, 0x61, 0xa0, 0x7e, 0x76, 0xc0, 0xd7, 0x81, 0x6e, 0xa4, 0x92, 0x44, 0x16, 0x30, - 0x51, 0xb6, 0x26, 0x2b, 0x4b, 0x9d, 0x76, 0x51, 0xaf, 0x0e, 0x90, 0x41, 0x03, 0x57, 0x97, 0xde, - 0xcf, 0xa6, 0xdd, 0x9b, 0x08, 0xb7, 0x77, 0x41, 0x9e, 0xa5, 0x71, 0x1d, 0x07, 0x58, 0x26, 0xe2, - 0xd3, 0x67, 0x4b, 0x7a, 0x51, 0x33, 0xb6, 0x49, 0x80, 0x2b, 0x50, 0x1e, 0x08, 0x88, 0x69, 0x48, - 0x69, 0x85, 0xdf, 0x06, 0x63, 0xd4, 0x23, 0x86, 0x74, 0xf4, 0x1b, 0x0f, 0x9a, 0x6c, 0x03, 0x36, - 0x52, 0xf3, 0x88, 0x11, 0xe7, 0x02, 0xfb, 0x42, 0x1c, 0x16, 0x7e, 0xa0, 0x81, 0x71, 0xca, 0x0b, - 0x94, 0x2c, 0x6a, 0x6f, 0x8d, 0xca, 0x82, 0x54, 0x15, 0x14, 0xdf, 0x48, 0x82, 0x97, 0xfe, 0x9d, - 0x01, 0x2b, 0x83, 0x96, 0x56, 0x5d, 0xa7, 0x2e, 0x8e, 0x63, 0x4b, 0xe6, 0xb6, 0x88, 0xf4, 0xe7, - 0x92, 0xb9, 0x7d, 0xda, 0x2e, 0x3e, 0x71, 0x5f, 0x05, 0x89, 0x22, 0xf0, 0x15, 0xb5, 0x6f, 0x51, - 0x28, 0x56, 0xba, 0x0d, 0x3b, 0x6d, 0x17, 0x67, 0xd5, 0xb2, 0x6e, 0x5b, 0x61, 0x0b, 0x40, 0x0b, - 0xd3, 0xe0, 0xba, 0x8f, 0x1d, 0x2a, 0xd4, 0x9a, 0x36, 0x91, 0xee, 0x7b, 0xea, 0x6c, 0xe1, 0xc1, - 0x56, 0x54, 0x16, 0x25, 0x24, 0xbc, 0xd6, 0xa3, 0x0d, 0xf5, 0x41, 0x60, 0x75, 0xcb, 0x27, 0x98, - 0xaa, 0x52, 0x94, 0xb8, 0x51, 0x18, 0x15, 0x49, 0x2e, 0xfc, 0x02, 0x98, 0xb0, 0x09, 0xa5, 0xb8, - 0x49, 0x78, 0xfd, 0x99, 0x8c, 0xaf, 0xe8, 0x6d, 0x41, 0x46, 0x11, 0x9f, 0xf5, 0x27, 0x4b, 0x83, - 0xbc, 0x76, 0xcd, 0xa4, 0x01, 0x7c, 0xb3, 0x27, 0x01, 0xca, 0x67, 0xdb, 0x21, 0x5b, 0xcd, 0xc3, - 0x5f, 0x15, 0xbf, 0x88, 0x92, 0x08, 0xfe, 0x6f, 0x81, 0x9c, 0x19, 0x10, 0x3b, 0xba, 0xbb, 0x5f, - 0x1b, 0x51, 0xec, 0x55, 0x66, 0xa4, 0x0d, 0xb9, 0x2d, 0x86, 0x86, 0x04, 0x68, 0xe9, 0xf7, 0x19, - 0xf0, 0xd8, 0xa0, 0x25, 0xec, 0x42, 0xa1, 0xcc, 0xe3, 0x9e, 0x15, 0xfa, 0xd8, 0x92, 0x11, 0xa7, - 0x3c, 0xbe, 0xc7, 0xa9, 0x48, 0x72, 0x59, 0xc9, 0xa7, 0xa6, 0xd3, 0x0c, 0x2d, 0xec, 0xcb, 0x70, - 0x52, 0xbb, 0xae, 0x49, 0x3a, 0x52, 0x12, 0xb0, 0x0c, 0x00, 0x3d, 0x70, 0xfd, 0x80, 0x63, 0xc8, - 0xea, 0x75, 0x9e, 0x15, 0x88, 0x9a, 0xa2, 0xa2, 0x84, 0x04, 0xbb, 0xd1, 0x0e, 0x4d, 0xa7, 0x2e, - 0x4f, 0x5d, 0x65, 0xf1, 0xd7, 0x4c, 0xa7, 0x8e, 0x38, 0x87, 0xe1, 0x5b, 0x26, 0x0d, 0x18, 0x45, - 0x1e, 0x79, 0x97, 0xd7, 0xb9, 0xa4, 0x92, 0x60, 0xf8, 0x06, 0xab, 0xfa, 0xae, 0x6f, 0x12, 0xaa, - 0x8f, 0xc7, 0xf8, 0x55, 0x45, 0x45, 0x09, 0x89, 0xd2, 0x2f, 0xf3, 0x83, 0x83, 0x84, 0x95, 0x12, - 0xf8, 0x38, 0xc8, 0x35, 0x7d, 0x37, 0xf4, 0xa4, 0x97, 0x94, 0xb7, 0x5f, 0x66, 0x44, 0x24, 0x78, - 0x2c, 0x2a, 0x5b, 0x5d, 0x6d, 0xaa, 0x8a, 0xca, 0xa8, 0x39, 0x8d, 0xf8, 0xf0, 0x7b, 0x1a, 0xc8, - 0x39, 0xd2, 0x39, 0x2c, 0xe4, 0xde, 0x1c, 0x51, 0x5c, 0x70, 0xf7, 0xc6, 0xe6, 0x0a, 0xcf, 0x0b, - 0x64, 0xf8, 0x2c, 0xc8, 0x51, 0xc3, 0xf5, 0x88, 0xf4, 0x7a, 0x21, 0x12, 0xaa, 0x31, 0xe2, 0x69, - 0xbb, 0x38, 0x13, 0xa9, 0xe3, 0x04, 0x24, 0x84, 0xe1, 0x0f, 0x34, 0x00, 0x5a, 0xd8, 0x32, 0xeb, - 0x98, 0xb7, 0x0c, 0x39, 0x6e, 0xfe, 0x70, 0xc3, 0xfa, 0x55, 0xa5, 0x5e, 0x1c, 0x5a, 0xfc, 0x8d, - 0x12, 0xd0, 0xf0, 0x43, 0x0d, 0x4c, 0xd3, 0x70, 0xdf, 0x97, 0xab, 0x28, 0x6f, 0x2e, 0xa6, 0xae, - 0x7e, 0x63, 0xa8, 0xb6, 0xd4, 0x12, 0x00, 0x95, 0xb9, 0x4e, 0xbb, 0x38, 0x9d, 0xa4, 0xa0, 0x2e, - 0x03, 0xe0, 0x8f, 0x35, 0x90, 0x6f, 0x45, 0x77, 0xf6, 0x04, 0x4f, 0xf8, 0xb7, 0x47, 0x74, 0xb0, - 0x32, 0xa2, 0xe2, 0x2c, 0x50, 0x7d, 0x80, 0xb2, 0x00, 0xfe, 0x55, 0x03, 0x3a, 0xae, 0x8b, 0x02, - 0x8f, 0xad, 0x3d, 0xdf, 0x74, 0x02, 0xe2, 0x8b, 0x7e, 0x93, 0xea, 0x79, 0x6e, 0xde, 0x70, 0xef, - 0xc2, 0x74, 0x2f, 0x5b, 0x59, 0x96, 0xd6, 0xe9, 0xeb, 0x03, 0xcc, 0x40, 0x03, 0x0d, 0xe4, 0x81, - 0x16, 0xb7, 0x34, 0xfa, 0xe4, 0x08, 0x02, 0x2d, 0xee, 0xa5, 0x64, 0x75, 0x88, 0x3b, 0xa8, 0x04, - 0x74, 0xe9, 0xc3, 0x6c, 0xba, 0x69, 0x4f, 0x5f, 0xfa, 0xf0, 0xb6, 0x30, 0x56, 0x6c, 0x85, 0xea, - 0x1a, 0x77, 0xee, 0xbb, 0x23, 0x3a, 0x7b, 0x75, 0x6b, 0xc7, 0x8d, 0x97, 0x22, 0x51, 0x94, 0xb0, - 0x03, 0xfe, 0x42, 0x03, 0x33, 0xd8, 0x30, 0x88, 0x17, 0x90, 0xba, 0xa8, 0xc5, 0x99, 0xcf, 0xa1, - 0xdc, 0x2c, 0x48, 0xab, 0x66, 0xd6, 0x93, 0xd0, 0xa8, 0xdb, 0x12, 0xf8, 0x22, 0x38, 0x4f, 0x03, - 0xd7, 0x27, 0xf5, 0x54, 0x97, 0x0b, 0x3b, 0xed, 0xe2, 0xf9, 0x5a, 0x17, 0x07, 0xa5, 0x24, 0x4b, - 0x9f, 0x8e, 0x81, 0xe2, 0x7d, 0x32, 0xe3, 0x0c, 0xef, 0xa8, 0x27, 0xc1, 0x38, 0xdf, 0x6e, 0x9d, - 0x7b, 0x25, 0x9f, 0xe8, 0xdc, 0x38, 0x15, 0x49, 0x2e, 0xab, 0xeb, 0x0c, 0x9f, 0x75, 0x1b, 0x59, - 0x2e, 0xa8, 0xea, 0x7a, 0x4d, 0x90, 0x51, 0xc4, 0x87, 0xef, 0x81, 0x71, 0x31, 0x27, 0xe1, 0x45, - 0x75, 0x84, 0x85, 0x11, 0x70, 0x3b, 0x39, 0x14, 0x92, 0x90, 0xbd, 0x05, 0x31, 0xf7, 0xb0, 0x0b, - 0xe2, 0x3d, 0x2b, 0xd0, 0xf8, 0xff, 0x78, 0x05, 0x2a, 0xfd, 0x47, 0x4b, 0xe7, 0x7d, 0x62, 0xab, - 0x35, 0x03, 0x5b, 0x04, 0x6e, 0x80, 0x39, 0xf6, 0xc8, 0x40, 0xc4, 0xb3, 0x4c, 0x03, 0x53, 0xfe, - 0xc6, 0x15, 0x01, 0xa7, 0xc6, 0x2e, 0xb5, 0x14, 0x1f, 0xf5, 0xac, 0x80, 0xaf, 0x00, 0x28, 0x1a, - 0xef, 0x2e, 0x3d, 0xa2, 0x87, 0x50, 0x2d, 0x74, 0xad, 0x47, 0x02, 0xf5, 0x59, 0x05, 0xab, 0x60, - 0xde, 0xc2, 0xfb, 0xc4, 0xaa, 0x11, 0x8b, 0x18, 0x81, 0xeb, 0x73, 0x55, 0x62, 0x0a, 0xb0, 0xd0, - 0x69, 0x17, 0xe7, 0xaf, 0xa5, 0x99, 0xa8, 0x57, 0xbe, 0xb4, 0x92, 0x4e, 0xaf, 0xe4, 0xc6, 0xc5, - 0x73, 0xe6, 0x37, 0x19, 0xb0, 0x38, 0x38, 0x32, 0xe0, 0xf7, 0xe3, 0x57, 0x97, 0x68, 0xaa, 0xdf, - 0x1e, 0x55, 0x14, 0xca, 0x67, 0x17, 0xe8, 0x7d, 0x72, 0xc1, 0xef, 0xb0, 0x0e, 0x07, 0x5b, 0xd1, - 0x9c, 0xe7, 0xad, 0x91, 0x99, 0xc0, 0x40, 0x2a, 0x93, 0xa2, 0x79, 0xc2, 0x16, 0xef, 0x95, 0xb0, - 0x45, 0x4a, 0x7f, 0xd0, 0xd2, 0x0f, 0xef, 0x38, 0x83, 0xe1, 0x4f, 0x34, 0x30, 0xeb, 0x7a, 0xc4, - 0x59, 0xdf, 0xdb, 0x7a, 0xf5, 0x4b, 0x22, 0x93, 0xa5, 0xab, 0x76, 0x1e, 0xd0, 0xce, 0x57, 0x6a, - 0xbb, 0x3b, 0x42, 0xe1, 0x9e, 0xef, 0x7a, 0xb4, 0x72, 0xa1, 0xd3, 0x2e, 0xce, 0xee, 0x76, 0x43, - 0xa1, 0x34, 0x76, 0xc9, 0x06, 0x0b, 0x9b, 0xc7, 0x01, 0xf1, 0x1d, 0x6c, 0x6d, 0xb8, 0x46, 0x68, - 0x13, 0x27, 0x10, 0x86, 0xa6, 0x86, 0x44, 0xda, 0x19, 0x87, 0x44, 0x8f, 0x81, 0x6c, 0xe8, 0x5b, - 0x32, 0x8a, 0xa7, 0xd4, 0x10, 0x14, 0x5d, 0x43, 0x8c, 0x5e, 0x5a, 0x01, 0x63, 0xcc, 0x4e, 0x78, - 0x09, 0x64, 0x7d, 0x7c, 0xc4, 0xb5, 0x4e, 0x57, 0x26, 0x98, 0x08, 0xc2, 0x47, 0x88, 0xd1, 0x4a, - 0xff, 0x58, 0x02, 0xb3, 0xa9, 0xbd, 0xc0, 0x45, 0x90, 0x51, 0x93, 0x55, 0x20, 0x95, 0x66, 0xb6, - 0x36, 0x50, 0xc6, 0xac, 0xc3, 0xe7, 0x55, 0xf1, 0x15, 0xa0, 0x45, 0x55, 0xcf, 0x39, 0x95, 0xb5, - 0xb4, 0xb1, 0x3a, 0x66, 0x48, 0x54, 0x38, 0x99, 0x0d, 0xa4, 0x21, 0xb3, 0x44, 0xd8, 0x40, 0x1a, - 0x88, 0xd1, 0x3e, 0xeb, 0x84, 0x2c, 0x1a, 0xd1, 0xe5, 0xce, 0x30, 0xa2, 0x1b, 0xbf, 0xe7, 0x88, - 0xee, 0x71, 0x90, 0x0b, 0xcc, 0xc0, 0x22, 0xfa, 0x44, 0xf7, 0xcb, 0xe3, 0x3a, 0x23, 0x22, 0xc1, - 0x83, 0x37, 0xc1, 0x44, 0x9d, 0x34, 0x70, 0x68, 0x05, 0x7a, 0x9e, 0x87, 0x50, 0x75, 0x08, 0x21, - 0x24, 0xe6, 0xa7, 0x1b, 0x42, 0x2f, 0x8a, 0x00, 0xe0, 0x13, 0x60, 0xc2, 0xc6, 0xc7, 0xa6, 0x1d, - 0xda, 0xbc, 0x27, 0xd3, 0x84, 0xd8, 0xb6, 0x20, 0xa1, 0x88, 0xc7, 0x2a, 0x23, 0x39, 0x36, 0xac, - 0x90, 0x9a, 0x2d, 0x22, 0x99, 0x3a, 0xe0, 0xb7, 0xa7, 0xaa, 0x8c, 0x9b, 0x29, 0x3e, 0xea, 0x59, - 0xc1, 0xc1, 0x4c, 0x87, 0x2f, 0x9e, 0x4a, 0x80, 0x09, 0x12, 0x8a, 0x78, 0xdd, 0x60, 0x52, 0x7e, - 0x7a, 0x10, 0x98, 0x5c, 0xdc, 0xb3, 0x02, 0x7e, 0x11, 0x4c, 0xda, 0xf8, 0xf8, 0x1a, 0x71, 0x9a, - 0xc1, 0x81, 0x3e, 0xb3, 0xac, 0xad, 0x66, 0x2b, 0x33, 0x9d, 0x76, 0x71, 0x72, 0x3b, 0x22, 0xa2, - 0x98, 0xcf, 0x85, 0x4d, 0x47, 0x0a, 0x9f, 0x4f, 0x08, 0x47, 0x44, 0x14, 0xf3, 0x59, 0x07, 0xe1, - 0xe1, 0x80, 0x25, 0x97, 0x3e, 0xdb, 0xfd, 0x32, 0xdc, 0x13, 0x64, 0x14, 0xf1, 0xe1, 0x2a, 0xc8, - 0xdb, 0xf8, 0x98, 0xbf, 0xe2, 0xf5, 0x39, 0xae, 0x96, 0xcf, 0x92, 0xb7, 0x25, 0x0d, 0x29, 0x2e, - 0x97, 0x34, 0x1d, 0x21, 0x39, 0x9f, 0x90, 0x94, 0x34, 0xa4, 0xb8, 0x2c, 0x88, 0x43, 0xc7, 0xbc, - 0x15, 0x12, 0x21, 0x0c, 0xb9, 0x67, 0x54, 0x10, 0xdf, 0x88, 0x59, 0x28, 0x29, 0xc7, 0x5e, 0xd1, - 0x76, 0x68, 0x05, 0xa6, 0x67, 0x91, 0xdd, 0x86, 0x7e, 0x81, 0xfb, 0x9f, 0xf7, 0xc9, 0xdb, 0x8a, - 0x8a, 0x12, 0x12, 0x90, 0x80, 0x31, 0xe2, 0x84, 0xb6, 0xfe, 0x08, 0xbf, 0xd8, 0x87, 0x12, 0x82, - 0x2a, 0x73, 0x36, 0x9d, 0xd0, 0x46, 0x5c, 0x3d, 0x7c, 0x1e, 0xcc, 0xd8, 0xf8, 0x98, 0x95, 0x03, - 0xe2, 0x07, 0xec, 0x7d, 0xbf, 0xc0, 0x37, 0x3f, 0xcf, 0x3a, 0xce, 0xed, 0x24, 0x03, 0x75, 0xcb, - 0xf1, 0x85, 0xa6, 0x93, 0x58, 0x78, 0x31, 0xb1, 0x30, 0xc9, 0x40, 0xdd, 0x72, 0xcc, 0xd3, 0x3e, - 0xb9, 0x15, 0x9a, 0x3e, 0xa9, 0xeb, 0x8f, 0xf2, 0x26, 0x55, 0xce, 0xf7, 0x05, 0x0d, 0x29, 0x2e, - 0x6c, 0x45, 0xe3, 0x1e, 0x9d, 0xa7, 0xe1, 0x8d, 0xe1, 0x56, 0xf2, 0x5d, 0x7f, 0xdd, 0xf7, 0xf1, - 0x89, 0xb8, 0x69, 0x92, 0x83, 0x1e, 0x48, 0x41, 0x0e, 0x5b, 0xd6, 0x6e, 0x43, 0xbf, 0xc4, 0x7d, - 0x3f, 0xec, 0x1b, 0x44, 0x55, 0x9d, 0x75, 0x06, 0x82, 0x04, 0x16, 0x03, 0x75, 0x1d, 0x16, 0x1a, - 0x8b, 0xa3, 0x05, 0xdd, 0x65, 0x20, 0x48, 0x60, 0xf1, 0x9d, 0x3a, 0x27, 0xbb, 0x0d, 0xfd, 0xff, - 0x46, 0xbc, 0x53, 0x06, 0x82, 0x04, 0x16, 0x34, 0x41, 0xd6, 0x71, 0x03, 0x7d, 0x69, 0x24, 0xd7, - 0x33, 0xbf, 0x70, 0x76, 0xdc, 0x00, 0x31, 0x0c, 0xf8, 0x73, 0x0d, 0x00, 0x2f, 0x0e, 0xd1, 0xc7, - 0x86, 0x32, 0x45, 0x48, 0x41, 0x96, 0xe3, 0xd8, 0xde, 0x74, 0x02, 0xff, 0x24, 0x7e, 0x47, 0x26, - 0x72, 0x20, 0x61, 0x05, 0xfc, 0xad, 0x06, 0x1e, 0x49, 0xb6, 0xc9, 0xca, 0xbc, 0x02, 0xf7, 0xc8, - 0xf5, 0x61, 0x87, 0x79, 0xc5, 0x75, 0xad, 0x8a, 0xde, 0x69, 0x17, 0x1f, 0x59, 0xef, 0x83, 0x8a, - 0xfa, 0xda, 0x02, 0xff, 0xa8, 0x81, 0x79, 0x59, 0x45, 0x13, 0x16, 0x16, 0xb9, 0x03, 0xc9, 0xb0, - 0x1d, 0x98, 0xc6, 0x11, 0x7e, 0x54, 0xbf, 0x4b, 0xf7, 0xf0, 0x51, 0xaf, 0x69, 0xf0, 0x2f, 0x1a, - 0x98, 0xae, 0x13, 0x8f, 0x38, 0x75, 0xe2, 0x18, 0xcc, 0xd6, 0xe5, 0xa1, 0x8c, 0x0d, 0xd2, 0xb6, - 0x6e, 0x24, 0x20, 0x84, 0x99, 0x65, 0x69, 0xe6, 0x74, 0x92, 0x75, 0xda, 0x2e, 0x5e, 0x8c, 0x97, - 0x26, 0x39, 0xa8, 0xcb, 0x4a, 0xf8, 0x91, 0x06, 0x66, 0xe3, 0x03, 0x10, 0x57, 0xca, 0xca, 0x08, - 0xe3, 0x80, 0xb7, 0xaf, 0xeb, 0xdd, 0x80, 0x28, 0x6d, 0x01, 0xfc, 0x93, 0xc6, 0x3a, 0xb5, 0xe8, - 0xdd, 0x47, 0xf5, 0x12, 0xf7, 0xe5, 0x3b, 0x43, 0xf7, 0xa5, 0x42, 0x10, 0xae, 0xbc, 0x1c, 0xb7, - 0x82, 0x8a, 0x73, 0xda, 0x2e, 0x2e, 0x24, 0x3d, 0xa9, 0x18, 0x28, 0x69, 0x21, 0xfc, 0x91, 0x06, - 0xa6, 0x49, 0xdc, 0x71, 0x53, 0xfd, 0xf1, 0xa1, 0x38, 0xb1, 0x6f, 0x13, 0x2f, 0x5e, 0xea, 0x09, - 0x16, 0x45, 0x5d, 0xd8, 0xac, 0x83, 0x24, 0xc7, 0xd8, 0xf6, 0x2c, 0xa2, 0xff, 0xff, 0x90, 0x3b, - 0xc8, 0x4d, 0xa1, 0x17, 0x45, 0x00, 0xf0, 0x32, 0xc8, 0x3b, 0xa1, 0x65, 0xe1, 0x7d, 0x8b, 0xe8, - 0x4f, 0xf0, 0x5e, 0x44, 0x4d, 0x31, 0x77, 0x24, 0x1d, 0x29, 0x89, 0x45, 0xf6, 0x4e, 0x4a, 0xe5, - 0x19, 0x9c, 0x03, 0xd9, 0x43, 0x22, 0x7f, 0x0e, 0x46, 0xec, 0x4f, 0x58, 0x07, 0xb9, 0x16, 0xb6, - 0xc2, 0xe8, 0xa9, 0x37, 0xe4, 0x1a, 0x8d, 0x84, 0xf2, 0x17, 0x33, 0x2f, 0x68, 0x8b, 0xb7, 0x35, - 0x70, 0xb1, 0x7f, 0xfa, 0x3f, 0x54, 0xb3, 0x7e, 0xa5, 0x81, 0xf9, 0x9e, 0x4c, 0xef, 0x63, 0xd1, - 0xad, 0x6e, 0x8b, 0xde, 0x18, 0x76, 0xca, 0xd6, 0x02, 0xdf, 0x74, 0x9a, 0xbc, 0x4f, 0x49, 0x9a, - 0xf7, 0x53, 0x0d, 0xcc, 0xa5, 0x93, 0xe7, 0x61, 0xfa, 0xab, 0x74, 0x3b, 0x03, 0x2e, 0xf6, 0x6f, - 0xaf, 0xa0, 0xaf, 0xde, 0x91, 0xa3, 0x79, 0x8f, 0xf7, 0x9b, 0xdd, 0x7d, 0xa0, 0x81, 0xa9, 0x9b, - 0x4a, 0x2e, 0xfa, 0xb9, 0x70, 0xe8, 0x93, 0x80, 0xa8, 0x5a, 0xc5, 0x0c, 0x8a, 0x92, 0xb8, 0xa5, - 0x3f, 0x6b, 0x60, 0xa1, 0x6f, 0x19, 0x66, 0x0f, 0x56, 0x6c, 0x59, 0xee, 0x91, 0x18, 0xe8, 0x24, - 0xa6, 0xa5, 0xeb, 0x9c, 0x8a, 0x24, 0x37, 0xe1, 0xbd, 0xcc, 0xe7, 0xe5, 0xbd, 0xd2, 0xdf, 0x34, - 0xb0, 0x74, 0xaf, 0x48, 0x7c, 0x28, 0x47, 0xba, 0x0a, 0xf2, 0xb2, 0x85, 0x3a, 0xe1, 0xc7, 0x29, - 0x5f, 0x0d, 0xb2, 0x68, 0xf0, 0xff, 0x90, 0x11, 0x7f, 0x95, 0xde, 0xd7, 0xc0, 0x5c, 0x8d, 0xf8, - 0x2d, 0xd3, 0x20, 0x88, 0x34, 0x88, 0x4f, 0x1c, 0x83, 0xc0, 0x35, 0x30, 0xc9, 0x7f, 0xa7, 0xf3, - 0xb0, 0x11, 0x0d, 0xb1, 0xe7, 0xa5, 0xcb, 0x27, 0x77, 0x22, 0x06, 0x8a, 0x65, 0xd4, 0xc0, 0x3b, - 0x33, 0x70, 0xe0, 0xbd, 0x04, 0xc6, 0xbc, 0x78, 0x1c, 0x98, 0x67, 0x5c, 0x3e, 0x01, 0xe4, 0xd4, - 0xd2, 0xdf, 0x35, 0xd0, 0xef, 0xbf, 0x55, 0x60, 0x0b, 0x4c, 0x50, 0x61, 0x9c, 0x74, 0xde, 0xee, - 0x03, 0x3a, 0x2f, 0xbd, 0x55, 0x71, 0x4d, 0x44, 0xd4, 0x08, 0x8c, 0xf9, 0xcf, 0xc0, 0x95, 0xd0, - 0xa9, 0xcb, 0x01, 0xde, 0xb4, 0xf0, 0x5f, 0x75, 0x5d, 0xd0, 0x90, 0xe2, 0xc2, 0x4b, 0x62, 0xd4, - 0x94, 0x98, 0xdf, 0x44, 0x63, 0xa6, 0xca, 0x95, 0x3b, 0x77, 0x0b, 0xe7, 0x3e, 0xbe, 0x5b, 0x38, - 0xf7, 0xc9, 0xdd, 0xc2, 0xb9, 0xef, 0x76, 0x0a, 0xda, 0x9d, 0x4e, 0x41, 0xfb, 0xb8, 0x53, 0xd0, - 0x3e, 0xe9, 0x14, 0xb4, 0x7f, 0x75, 0x0a, 0xda, 0xcf, 0x3e, 0x2d, 0x9c, 0xfb, 0xe6, 0x84, 0x34, - 0xed, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x16, 0xda, 0x75, 0x14, 0x43, 0x2a, 0x00, 0x00, -} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto index 7e18cb9e3..7f0902efa 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto @@ -30,32 +30,33 @@ option go_package = "v1beta1"; // ConversionRequest describes the conversion request parameters. message ConversionRequest { - // `uid` is an identifier for the individual request/response. It allows us to distinguish instances of requests which are - // otherwise identical (parallel requests, requests when earlier requests did not modify etc) - // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // uid is an identifier for the individual request/response. It allows distinguishing instances of requests which are + // otherwise identical (parallel requests, etc). + // The UID is meant to track the round trip (request/response) between the Kubernetes API server and the webhook, not the user request. // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. optional string uid = 1; - // `desiredAPIVersion` is the version to convert given objects to. e.g. "myapi.example.com/v1" + // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" optional string desiredAPIVersion = 2; - // `objects` is the list of CR objects to be converted. + // objects is the list of custom resource objects to be converted. repeated k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; } // ConversionResponse describes a conversion response. message ConversionResponse { - // `uid` is an identifier for the individual request/response. - // This should be copied over from the corresponding AdmissionRequest. + // uid is an identifier for the individual request/response. + // This should be copied over from the corresponding `request.uid`. optional string uid = 1; - // `convertedObjects` is the list of converted version of `request.objects` if the `result` is successful otherwise empty. - // The webhook is expected to set apiVersion of these objects to the ConversionRequest.desiredAPIVersion. The list - // must also has the same size as input list with the same objects in the same order(i.e. equal UIDs and object meta) + // convertedObjects is the list of converted version of `request.objects` if the `result` is successful, otherwise empty. + // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list + // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). + // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. repeated k8s.io.apimachinery.pkg.runtime.RawExtension convertedObjects = 2; - // `result` contains the result of conversion with extra details if the conversion failed. `result.status` determines if - // the conversion failed or succeeded. The `result.status` field is required and represent the success or failure of the + // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if + // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` // will be used to construct an error message for the end user. @@ -64,11 +65,11 @@ message ConversionResponse { // ConversionReview describes a conversion request/response. message ConversionReview { - // `request` describes the attributes for the conversion request. + // request describes the attributes for the conversion request. // +optional optional ConversionRequest request = 1; - // `response` describes the attributes for the conversion response. + // response describes the attributes for the conversion response. // +optional optional ConversionResponse response = 2; } @@ -79,12 +80,12 @@ message CustomResourceColumnDefinition { optional string name = 1; // type is an OpenAPI type definition for this column. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. optional string type = 2; // format is an optional OpenAPI type definition for this column. The 'name' format is applied // to the primary identifier column to assist in clients identifying column is the resource name. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. // +optional optional string format = 3; @@ -94,68 +95,71 @@ message CustomResourceColumnDefinition { // priority is an integer defining the relative importance of this column compared to others. Lower // numbers are considered higher priority. Columns that may be omitted in limited space scenarios - // should be given a higher priority. + // should be given a priority greater than 0. // +optional optional int32 priority = 5; - // JSONPath is a simple JSON path, i.e. with array notation. + // JSONPath is a simple JSON path (i.e. with array notation) which is evaluated against + // each custom resource to produce the value for this column. optional string JSONPath = 6; } // CustomResourceConversion describes how to convert different versions of a CR. message CustomResourceConversion { - // `strategy` specifies the conversion strategy. Allowed values are: - // - `None`: The converter only change the apiVersion and would not touch any other field in the CR. - // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information is needed for this option. + // strategy specifies how custom resources are converted between versions. Allowed values are: + // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. + // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information + // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhookClientConfig to be set. optional string strategy = 1; - // `webhookClientConfig` is the instructions for how to call the webhook if strategy is `Webhook`. This field is - // alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // webhookClientConfig is the instructions for how to call the webhook if strategy is `Webhook`. + // Required when `strategy` is set to `Webhook`. // +optional optional WebhookClientConfig webhookClientConfig = 2; - // ConversionReviewVersions is an ordered list of preferred `ConversionReview` - // versions the Webhook expects. API server will try to use first version in + // conversionReviewVersions is an ordered list of preferred `ConversionReview` + // versions the Webhook expects. The API server will use the first version in // the list which it supports. If none of the versions specified in this list - // supported by API server, conversion will fail for this object. + // are supported by API server, conversion will fail for the custom resource. // If a persisted Webhook configuration specifies allowed versions and does not // include any versions known to the API Server, calls to the webhook will fail. - // Default to `['v1beta1']`. + // Defaults to `["v1beta1"]`. // +optional repeated string conversionReviewVersions = 3; } // CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format // <.spec.name>.<.spec.group>. +// Deprecated in v1.16, planned for removal in v1.19. Use apiextensions.k8s.io/v1 CustomResourceDefinition instead. message CustomResourceDefinition { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec describes how the user wants the resources to appear + // spec describes how the user wants the resources to appear optional CustomResourceDefinitionSpec spec = 2; - // Status indicates the actual state of the CustomResourceDefinition + // status indicates the actual state of the CustomResourceDefinition // +optional optional CustomResourceDefinitionStatus status = 3; } // CustomResourceDefinitionCondition contains details for the current condition of this pod. message CustomResourceDefinitionCondition { - // Type is the type of the condition. + // type is the type of the condition. Types include Established, NamesAccepted and Terminating. optional string type = 1; - // Status is the status of the condition. + // status is the status of the condition. // Can be True, False, Unknown. optional string status = 2; - // Last time the condition transitioned from one status to another. + // lastTransitionTime last time the condition transitioned from one status to another. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - // Unique, one-word, CamelCase reason for the condition's last transition. + // reason is a unique, one-word, CamelCase reason for the condition's last transition. // +optional optional string reason = 4; - // Human-readable message indicating details about last transition. + // message is a human-readable message indicating details about last transition. // +optional optional string message = 5; } @@ -164,71 +168,81 @@ message CustomResourceDefinitionCondition { message CustomResourceDefinitionList { optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items individual CustomResourceDefinitions + // items list individual CustomResourceDefinition objects repeated CustomResourceDefinition items = 2; } // CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition message CustomResourceDefinitionNames { - // Plural is the plural name of the resource to serve. It must match the name of the CustomResourceDefinition-registration - // too: plural.group and it must be all lowercase. + // plural is the plural name of the resource to serve. + // The custom resources are served under `/apis///.../`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + // Must be all lowercase. optional string plural = 1; - // Singular is the singular name of the resource. It must be all lowercase Defaults to lowercased + // singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. // +optional optional string singular = 2; - // ShortNames are short names for the resource. It must be all lowercase. + // shortNames are short names for the resource, exposed in API discovery documents, + // and used by clients to support invocations like `kubectl get `. + // It must be all lowercase. // +optional repeated string shortNames = 3; - // Kind is the serialized kind of the resource. It is normally CamelCase and singular. + // kind is the serialized kind of the resource. It is normally CamelCase and singular. + // Custom resource instances will use this value as the `kind` attribute in API calls. optional string kind = 4; - // ListKind is the serialized kind of the list for this resource. Defaults to List. + // listKind is the serialized kind of the list for this resource. Defaults to "`kind`List". // +optional optional string listKind = 5; - // Categories is a list of grouped resources custom resources belong to (e.g. 'all') + // categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). + // This is published in API discovery documents, and used by clients to support invocations like + // `kubectl get all`. // +optional repeated string categories = 6; } // CustomResourceDefinitionSpec describes how a user wants their resource to appear message CustomResourceDefinitionSpec { - // Group is the group this resource belongs in + // group is the API group of the defined custom resource. + // The custom resources are served under `/apis//...`. + // Must match the name of the CustomResourceDefinition (in the form `.`). optional string group = 1; - // Version is the version this resource belongs in - // Should be always first item in Versions field if provided. - // Optional, but at least one of Version or Versions must be set. - // Deprecated: Please use `Versions`. + // version is the API version of the defined custom resource. + // The custom resources are served under `/apis///...`. + // Must match the name of the first item in the `versions` list if `version` and `versions` are both specified. + // Optional if `versions` is specified. + // Deprecated: use `versions` instead. // +optional optional string version = 2; - // Names are the names used to describe this custom resource + // names specify the resource and kind names for the custom resource. optional CustomResourceDefinitionNames names = 3; - // Scope indicates whether this resource is cluster or namespace scoped. Default is namespaced + // scope indicates whether the defined custom resource is cluster- or namespace-scoped. + // Allowed values are `Cluster` and `Namespaced`. Default is `Namespaced`. optional string scope = 4; - // Validation describes the validation methods for CustomResources - // Optional, the global validation schema for all versions. + // validation describes the schema used for validation and pruning of the custom resource. + // If present, this validation schema is used to validate all versions. // Top-level and per-version schemas are mutually exclusive. // +optional optional CustomResourceValidation validation = 5; - // Subresources describes the subresources for CustomResource - // Optional, the global subresources for all versions. + // subresources specify what subresources the defined custom resource has. + // If present, this field configures subresources for all versions. // Top-level and per-version subresources are mutually exclusive. // +optional optional CustomResourceSubresources subresources = 6; - // Versions is the list of all supported versions for this resource. - // If Version field is provided, this field is optional. - // Validation: All versions must use the same validation schema for now. i.e., top - // level Validation field is applied to all of these versions. - // Order: The version name will be used to compute the order. + // versions is the list of all API versions of the defined custom resource. + // Optional if `version` is specified. + // The name of the first item in the `versions` list must match the `version` field if `version` and `versions` are both specified. + // Version names are used to compute the order in which served versions are listed in API discovery. // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first @@ -238,92 +252,106 @@ message CustomResourceDefinitionSpec { // +optional repeated CustomResourceDefinitionVersion versions = 7; - // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. - // Optional, the global columns for all versions. + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. + // If present, this field configures columns for all versions. // Top-level and per-version columns are mutually exclusive. + // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. // +optional repeated CustomResourceColumnDefinition additionalPrinterColumns = 8; - // `conversion` defines conversion settings for the CRD. + // conversion defines conversion settings for the CRD. // +optional optional CustomResourceConversion conversion = 9; + + // preserveUnknownFields indicates that object fields which are not specified + // in the OpenAPI schema should be preserved when persisting to storage. + // apiVersion, kind, metadata and known fields inside metadata are always preserved. + // If false, schemas must be defined for all versions. + // Defaults to true in v1beta for backwards compatibility. + // Deprecated: will be required to be false in v1. Preservation of unknown fields can be specified + // in the validation schema using the `x-kubernetes-preserve-unknown-fields: true` extension. + // See https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#pruning-versus-preserving-unknown-fields for details. + // +optional + optional bool preserveUnknownFields = 10; } // CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition message CustomResourceDefinitionStatus { - // Conditions indicate state for particular aspects of a CustomResourceDefinition + // conditions indicate state for particular aspects of a CustomResourceDefinition + // +optional repeated CustomResourceDefinitionCondition conditions = 1; - // AcceptedNames are the names that are actually being used to serve discovery + // acceptedNames are the names that are actually being used to serve discovery. // They may be different than the names in spec. optional CustomResourceDefinitionNames acceptedNames = 2; - // StoredVersions are all versions of CustomResources that were ever persisted. Tracking these + // storedVersions lists all versions of CustomResources that were ever persisted. Tracking these // versions allows a migration path for stored versions in etcd. The field is mutable - // so the migration controller can first finish a migration to another version (i.e. - // that no old objects are left in the storage), and then remove the rest of the + // so a migration controller can finish a migration to another version (ensuring + // no old objects are left in storage), and then remove the rest of the // versions from this list. - // None of the versions in this list can be removed from the spec.Versions field. + // Versions may not be removed from `spec.versions` while they exist in this list. repeated string storedVersions = 3; } // CustomResourceDefinitionVersion describes a version for CRD. message CustomResourceDefinitionVersion { - // Name is the version name, e.g. “v1”, “v2beta1”, etc. + // name is the version name, e.g. “v1”, “v2beta1”, etc. + // The custom resources are served under this version at `/apis///...` if `served` is true. optional string name = 1; - // Served is a flag enabling/disabling this version from being served via REST APIs + // served is a flag enabling/disabling this version from being served via REST APIs optional bool served = 2; - // Storage flags the version as storage version. There must be exactly one - // flagged as storage version. + // storage indicates this version should be used when persisting custom resources to storage. + // There must be exactly one version with storage=true. optional bool storage = 3; - // Schema describes the schema for CustomResource used in validation, pruning, and defaulting. + // schema describes the schema used for validation and pruning of this version of the custom resource. // Top-level and per-version schemas are mutually exclusive. - // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead) - // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead). // +optional optional CustomResourceValidation schema = 4; - // Subresources describes the subresources for CustomResource + // subresources specify what subresources this version of the defined custom resource have. // Top-level and per-version subresources are mutually exclusive. - // Per-version subresources must not all be set to identical values (top-level subresources should be used instead) - // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // Per-version subresources must not all be set to identical values (top-level subresources should be used instead). // +optional optional CustomResourceSubresources subresources = 5; - // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. // Top-level and per-version columns are mutually exclusive. - // Per-version columns must not all be set to identical values (top-level columns should be used instead) - // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. - // NOTE: CRDs created prior to 1.13 populated the top-level additionalPrinterColumns field by default. To apply an - // update that changes to per-version additionalPrinterColumns, the top-level additionalPrinterColumns field must - // be explicitly set to null + // Per-version columns must not all be set to identical values (top-level columns should be used instead). + // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. // +optional repeated CustomResourceColumnDefinition additionalPrinterColumns = 6; } // CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. message CustomResourceSubresourceScale { - // SpecReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Spec.Replicas. + // specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .spec. - // If there is no value under the given path in the CustomResource, the /scale subresource will return an error on GET. + // Must be a JSON Path under `.spec`. + // If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. optional string specReplicasPath = 1; - // StatusReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Replicas. + // statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .status. - // If there is no value under the given path in the CustomResource, the status replica value in the /scale subresource + // Must be a JSON Path under `.status`. + // If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource // will default to 0. optional string statusReplicasPath = 2; - // LabelSelectorPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Selector. + // labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .status. - // Must be set to work with HPA. - // If there is no value under the given path in the CustomResource, the status label selector value in the /scale + // Must be a JSON Path under `.status` or `.spec`. + // Must be set to work with HorizontalPodAutoscaler. + // The field pointed by this JSON path must be a string field (not a complex selector struct) + // which contains a serialized label selector in string form. + // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource + // If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` // subresource will default to the empty string. // +optional optional string labelSelectorPath = 3; @@ -339,18 +367,21 @@ message CustomResourceSubresourceStatus { // CustomResourceSubresources defines the status and scale subresources for CustomResources. message CustomResourceSubresources { - // Status denotes the status subresource for CustomResources + // status indicates the custom resource should serve a `/status` subresource. + // When enabled: + // 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. + // 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. // +optional optional CustomResourceSubresourceStatus status = 1; - // Scale denotes the scale subresource for CustomResources + // scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. // +optional optional CustomResourceSubresourceScale scale = 2; } // CustomResourceValidation is a list of validation methods for CustomResources. message CustomResourceValidation { - // OpenAPIV3Schema is the OpenAPI v3 schema to be validated against. + // openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. // +optional optional JSONSchemaProps openAPIV3Schema = 1; } @@ -384,6 +415,9 @@ message JSONSchemaProps { optional string title = 7; + // default is a default value for undefined object fields. + // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. + // CustomResourceDefinitions with defaults must be created using the v1 (or newer) CustomResourceDefinition API. optional JSON default = 8; optional double maximum = 9; @@ -443,6 +477,65 @@ message JSONSchemaProps { optional JSON example = 36; optional bool nullable = 37; + + // x-kubernetes-preserve-unknown-fields stops the API server + // decoding step from pruning fields which are not specified + // in the validation schema. This affects fields recursively, + // but switches back to normal pruning behaviour if nested + // properties or additionalProperties are specified in the schema. + // This can either be true or undefined. False is forbidden. + optional bool xKubernetesPreserveUnknownFields = 38; + + // x-kubernetes-embedded-resource defines that the value is an + // embedded Kubernetes runtime.Object, with TypeMeta and + // ObjectMeta. The type must be object. It is allowed to further + // restrict the embedded object. kind, apiVersion and metadata + // are validated automatically. x-kubernetes-preserve-unknown-fields + // is allowed to be true, but does not have to be if the object + // is fully specified (up to kind, apiVersion, metadata). + optional bool xKubernetesEmbeddedResource = 39; + + // x-kubernetes-int-or-string specifies that this value is + // either an integer or a string. If this is true, an empty + // type is allowed and type as child of anyOf is permitted + // if following one of the following patterns: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + optional bool xKubernetesIntOrString = 40; + + // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used + // as the index of the map. + // + // This tag MUST only be used on lists that have the "x-kubernetes-list-type" + // extension set to "map". Also, the values specified for this attribute must + // be a scalar typed field of the child structure (no nesting is supported). + // + // +optional + repeated string xKubernetesListMapKeys = 41; + + // x-kubernetes-list-type annotates an array to further describe its topology. + // This extension must only be used on lists and may have 3 possible values: + // + // 1) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic lists will be entirely replaced when updated. This extension + // may be used on any type of list (struct, scalar, ...). + // 2) `set`: + // Sets are lists that must not have multiple items with the same value. Each + // value must be a scalar (or another atomic type). + // 3) `map`: + // These lists are like maps in that their elements have a non-index key + // used to identify them. Order is preserved upon merge. The map tag + // must only be used on a list with elements of type object. + // Defaults to atomic for arrays. + // +optional + optional string xKubernetesListType = 42; } // JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps @@ -470,24 +563,28 @@ message JSONSchemaPropsOrStringArray { // ServiceReference holds a reference to Service.legacy.k8s.io message ServiceReference { - // `namespace` is the namespace of the service. + // namespace is the namespace of the service. // Required optional string namespace = 1; - // `name` is the name of the service. + // name is the name of the service. // Required optional string name = 2; - // `path` is an optional URL path which will be sent in any request to - // this service. + // path is an optional URL path at which the webhook will be contacted. // +optional optional string path = 3; + + // port is an optional service port at which the webhook will be contacted. + // `port` should be a valid port number (1-65535, inclusive). + // Defaults to 443 for backward compatibility. + // +optional + optional int32 port = 4; } -// WebhookClientConfig contains the information to make a TLS -// connection with the webhook. It has the same field as admissionregistration.v1beta1.WebhookClientConfig. +// WebhookClientConfig contains the information to make a TLS connection with the webhook. message WebhookClientConfig { - // `url` gives the location of the webhook, in standard URL form + // url gives the location of the webhook, in standard URL form // (`scheme://host:port/path`). Exactly one of `url` or `service` // must be specified. // @@ -516,17 +613,15 @@ message WebhookClientConfig { // +optional optional string url = 3; - // `service` is a reference to the service for this webhook. Either - // `service` or `url` must be specified. + // service is a reference to the service for this webhook. Either + // service or url must be specified. // // If the webhook is running within the cluster, then you should use `service`. // - // Port 443 will be used if it is open, otherwise it is an error. - // // +optional optional ServiceReference service = 1; - // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. // If unspecified, system trust roots on the apiserver are used. // +optional optional bytes caBundle = 2; diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go index 973ac0eed..f6c260b68 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go @@ -26,6 +26,11 @@ import ( type ConversionStrategyType string const ( + // KubeAPIApprovedAnnotation is an annotation that must be set to create a CRD for the k8s.io, *.k8s.io, kubernetes.io, or *.kubernetes.io namespaces. + // The value should be a link to a URL where the current spec was approved, so updates to the spec should also update the URL. + // If the API is unapproved, you may set the annotation to a string starting with `"unapproved"`. For instance, `"unapproved, temporarily squatting"` or `"unapproved, experimental-only"`. This is discouraged. + KubeAPIApprovedAnnotation = "api-approved.kubernetes.io" + // NoneConverter is a converter that only sets apiversion of the CR and leave everything else unchanged. NoneConverter ConversionStrategyType = "None" // WebhookConverter is a converter that calls to an external webhook to convert the CR. @@ -34,33 +39,36 @@ const ( // CustomResourceDefinitionSpec describes how a user wants their resource to appear type CustomResourceDefinitionSpec struct { - // Group is the group this resource belongs in + // group is the API group of the defined custom resource. + // The custom resources are served under `/apis//...`. + // Must match the name of the CustomResourceDefinition (in the form `.`). Group string `json:"group" protobuf:"bytes,1,opt,name=group"` - // Version is the version this resource belongs in - // Should be always first item in Versions field if provided. - // Optional, but at least one of Version or Versions must be set. - // Deprecated: Please use `Versions`. + // version is the API version of the defined custom resource. + // The custom resources are served under `/apis///...`. + // Must match the name of the first item in the `versions` list if `version` and `versions` are both specified. + // Optional if `versions` is specified. + // Deprecated: use `versions` instead. // +optional Version string `json:"version,omitempty" protobuf:"bytes,2,opt,name=version"` - // Names are the names used to describe this custom resource + // names specify the resource and kind names for the custom resource. Names CustomResourceDefinitionNames `json:"names" protobuf:"bytes,3,opt,name=names"` - // Scope indicates whether this resource is cluster or namespace scoped. Default is namespaced + // scope indicates whether the defined custom resource is cluster- or namespace-scoped. + // Allowed values are `Cluster` and `Namespaced`. Default is `Namespaced`. Scope ResourceScope `json:"scope" protobuf:"bytes,4,opt,name=scope,casttype=ResourceScope"` - // Validation describes the validation methods for CustomResources - // Optional, the global validation schema for all versions. + // validation describes the schema used for validation and pruning of the custom resource. + // If present, this validation schema is used to validate all versions. // Top-level and per-version schemas are mutually exclusive. // +optional Validation *CustomResourceValidation `json:"validation,omitempty" protobuf:"bytes,5,opt,name=validation"` - // Subresources describes the subresources for CustomResource - // Optional, the global subresources for all versions. + // subresources specify what subresources the defined custom resource has. + // If present, this field configures subresources for all versions. // Top-level and per-version subresources are mutually exclusive. // +optional Subresources *CustomResourceSubresources `json:"subresources,omitempty" protobuf:"bytes,6,opt,name=subresources"` - // Versions is the list of all supported versions for this resource. - // If Version field is provided, this field is optional. - // Validation: All versions must use the same validation schema for now. i.e., top - // level Validation field is applied to all of these versions. - // Order: The version name will be used to compute the order. + // versions is the list of all API versions of the defined custom resource. + // Optional if `version` is specified. + // The name of the first item in the `versions` list must match the `version` field if `version` and `versions` are both specified. + // Version names are used to compute the order in which served versions are listed in API discovery. // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first @@ -69,44 +77,57 @@ type CustomResourceDefinitionSpec struct { // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. // +optional Versions []CustomResourceDefinitionVersion `json:"versions,omitempty" protobuf:"bytes,7,rep,name=versions"` - // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. - // Optional, the global columns for all versions. + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. + // If present, this field configures columns for all versions. // Top-level and per-version columns are mutually exclusive. + // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. // +optional AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,8,rep,name=additionalPrinterColumns"` - // `conversion` defines conversion settings for the CRD. + // conversion defines conversion settings for the CRD. // +optional Conversion *CustomResourceConversion `json:"conversion,omitempty" protobuf:"bytes,9,opt,name=conversion"` + + // preserveUnknownFields indicates that object fields which are not specified + // in the OpenAPI schema should be preserved when persisting to storage. + // apiVersion, kind, metadata and known fields inside metadata are always preserved. + // If false, schemas must be defined for all versions. + // Defaults to true in v1beta for backwards compatibility. + // Deprecated: will be required to be false in v1. Preservation of unknown fields can be specified + // in the validation schema using the `x-kubernetes-preserve-unknown-fields: true` extension. + // See https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#pruning-versus-preserving-unknown-fields for details. + // +optional + PreserveUnknownFields *bool `json:"preserveUnknownFields,omitempty" protobuf:"varint,10,opt,name=preserveUnknownFields"` } // CustomResourceConversion describes how to convert different versions of a CR. type CustomResourceConversion struct { - // `strategy` specifies the conversion strategy. Allowed values are: - // - `None`: The converter only change the apiVersion and would not touch any other field in the CR. - // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information is needed for this option. + // strategy specifies how custom resources are converted between versions. Allowed values are: + // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. + // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information + // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhookClientConfig to be set. Strategy ConversionStrategyType `json:"strategy" protobuf:"bytes,1,name=strategy"` - // `webhookClientConfig` is the instructions for how to call the webhook if strategy is `Webhook`. This field is - // alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // webhookClientConfig is the instructions for how to call the webhook if strategy is `Webhook`. + // Required when `strategy` is set to `Webhook`. // +optional WebhookClientConfig *WebhookClientConfig `json:"webhookClientConfig,omitempty" protobuf:"bytes,2,name=webhookClientConfig"` - // ConversionReviewVersions is an ordered list of preferred `ConversionReview` - // versions the Webhook expects. API server will try to use first version in + // conversionReviewVersions is an ordered list of preferred `ConversionReview` + // versions the Webhook expects. The API server will use the first version in // the list which it supports. If none of the versions specified in this list - // supported by API server, conversion will fail for this object. + // are supported by API server, conversion will fail for the custom resource. // If a persisted Webhook configuration specifies allowed versions and does not // include any versions known to the API Server, calls to the webhook will fail. - // Default to `['v1beta1']`. + // Defaults to `["v1beta1"]`. // +optional ConversionReviewVersions []string `json:"conversionReviewVersions,omitempty" protobuf:"bytes,3,rep,name=conversionReviewVersions"` } -// WebhookClientConfig contains the information to make a TLS -// connection with the webhook. It has the same field as admissionregistration.v1beta1.WebhookClientConfig. +// WebhookClientConfig contains the information to make a TLS connection with the webhook. type WebhookClientConfig struct { - // `url` gives the location of the webhook, in standard URL form + // url gives the location of the webhook, in standard URL form // (`scheme://host:port/path`). Exactly one of `url` or `service` // must be specified. // @@ -135,17 +156,15 @@ type WebhookClientConfig struct { // +optional URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"` - // `service` is a reference to the service for this webhook. Either - // `service` or `url` must be specified. + // service is a reference to the service for this webhook. Either + // service or url must be specified. // // If the webhook is running within the cluster, then you should use `service`. // - // Port 443 will be used if it is open, otherwise it is an error. - // // +optional Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` - // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. // If unspecified, system trust roots on the apiserver are used. // +optional CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` @@ -153,47 +172,49 @@ type WebhookClientConfig struct { // ServiceReference holds a reference to Service.legacy.k8s.io type ServiceReference struct { - // `namespace` is the namespace of the service. + // namespace is the namespace of the service. // Required Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` - // `name` is the name of the service. + // name is the name of the service. // Required Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // `path` is an optional URL path which will be sent in any request to - // this service. + // path is an optional URL path at which the webhook will be contacted. // +optional Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` + + // port is an optional service port at which the webhook will be contacted. + // `port` should be a valid port number (1-65535, inclusive). + // Defaults to 443 for backward compatibility. + // +optional + Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` } // CustomResourceDefinitionVersion describes a version for CRD. type CustomResourceDefinitionVersion struct { - // Name is the version name, e.g. “v1”, “v2beta1”, etc. + // name is the version name, e.g. “v1”, “v2beta1”, etc. + // The custom resources are served under this version at `/apis///...` if `served` is true. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Served is a flag enabling/disabling this version from being served via REST APIs + // served is a flag enabling/disabling this version from being served via REST APIs Served bool `json:"served" protobuf:"varint,2,opt,name=served"` - // Storage flags the version as storage version. There must be exactly one - // flagged as storage version. + // storage indicates this version should be used when persisting custom resources to storage. + // There must be exactly one version with storage=true. Storage bool `json:"storage" protobuf:"varint,3,opt,name=storage"` - // Schema describes the schema for CustomResource used in validation, pruning, and defaulting. + // schema describes the schema used for validation and pruning of this version of the custom resource. // Top-level and per-version schemas are mutually exclusive. - // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead) - // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead). // +optional Schema *CustomResourceValidation `json:"schema,omitempty" protobuf:"bytes,4,opt,name=schema"` - // Subresources describes the subresources for CustomResource + // subresources specify what subresources this version of the defined custom resource have. // Top-level and per-version subresources are mutually exclusive. - // Per-version subresources must not all be set to identical values (top-level subresources should be used instead) - // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // Per-version subresources must not all be set to identical values (top-level subresources should be used instead). // +optional Subresources *CustomResourceSubresources `json:"subresources,omitempty" protobuf:"bytes,5,opt,name=subresources"` - // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. // Top-level and per-version columns are mutually exclusive. - // Per-version columns must not all be set to identical values (top-level columns should be used instead) - // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. - // NOTE: CRDs created prior to 1.13 populated the top-level additionalPrinterColumns field by default. To apply an - // update that changes to per-version additionalPrinterColumns, the top-level additionalPrinterColumns field must - // be explicitly set to null + // Per-version columns must not all be set to identical values (top-level columns should be used instead). + // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. // +optional AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,6,rep,name=additionalPrinterColumns"` } @@ -203,11 +224,11 @@ type CustomResourceColumnDefinition struct { // name is a human readable name for the column. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // type is an OpenAPI type definition for this column. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. Type string `json:"type" protobuf:"bytes,2,opt,name=type"` // format is an optional OpenAPI type definition for this column. The 'name' format is applied // to the primary identifier column to assist in clients identifying column is the resource name. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. // +optional Format string `json:"format,omitempty" protobuf:"bytes,3,opt,name=format"` // description is a human readable description of this column. @@ -215,31 +236,38 @@ type CustomResourceColumnDefinition struct { Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` // priority is an integer defining the relative importance of this column compared to others. Lower // numbers are considered higher priority. Columns that may be omitted in limited space scenarios - // should be given a higher priority. + // should be given a priority greater than 0. // +optional Priority int32 `json:"priority,omitempty" protobuf:"bytes,5,opt,name=priority"` - - // JSONPath is a simple JSON path, i.e. with array notation. + // JSONPath is a simple JSON path (i.e. with array notation) which is evaluated against + // each custom resource to produce the value for this column. JSONPath string `json:"JSONPath" protobuf:"bytes,6,opt,name=JSONPath"` } // CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition type CustomResourceDefinitionNames struct { - // Plural is the plural name of the resource to serve. It must match the name of the CustomResourceDefinition-registration - // too: plural.group and it must be all lowercase. + // plural is the plural name of the resource to serve. + // The custom resources are served under `/apis///.../`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + // Must be all lowercase. Plural string `json:"plural" protobuf:"bytes,1,opt,name=plural"` - // Singular is the singular name of the resource. It must be all lowercase Defaults to lowercased + // singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. // +optional Singular string `json:"singular,omitempty" protobuf:"bytes,2,opt,name=singular"` - // ShortNames are short names for the resource. It must be all lowercase. + // shortNames are short names for the resource, exposed in API discovery documents, + // and used by clients to support invocations like `kubectl get `. + // It must be all lowercase. // +optional ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,3,opt,name=shortNames"` - // Kind is the serialized kind of the resource. It is normally CamelCase and singular. + // kind is the serialized kind of the resource. It is normally CamelCase and singular. + // Custom resource instances will use this value as the `kind` attribute in API calls. Kind string `json:"kind" protobuf:"bytes,4,opt,name=kind"` - // ListKind is the serialized kind of the list for this resource. Defaults to List. + // listKind is the serialized kind of the list for this resource. Defaults to "`kind`List". // +optional ListKind string `json:"listKind,omitempty" protobuf:"bytes,5,opt,name=listKind"` - // Categories is a list of grouped resources custom resources belong to (e.g. 'all') + // categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). + // This is published in API discovery documents, and used by clients to support invocations like + // `kubectl get all`. // +optional Categories []string `json:"categories,omitempty" protobuf:"bytes,6,rep,name=categories"` } @@ -275,43 +303,65 @@ const ( // NamesAccepted means the names chosen for this CustomResourceDefinition do not conflict with others in // the group and are therefore accepted. NamesAccepted CustomResourceDefinitionConditionType = "NamesAccepted" + // NonStructuralSchema means that one or more OpenAPI schema is not structural. + // + // A schema is structural if it specifies types for all values, with the only exceptions of those with + // - x-kubernetes-int-or-string: true — for fields which can be integer or string + // - x-kubernetes-preserve-unknown-fields: true — for raw, unspecified JSON values + // and there is no type, additionalProperties, default, nullable or x-kubernetes-* vendor extenions + // specified under allOf, anyOf, oneOf or not. + // + // Non-structural schemas will not be allowed anymore in v1 API groups. Moreover, new features will not be + // available for non-structural CRDs: + // - pruning + // - defaulting + // - read-only + // - OpenAPI publishing + // - webhook conversion + NonStructuralSchema CustomResourceDefinitionConditionType = "NonStructuralSchema" // Terminating means that the CustomResourceDefinition has been deleted and is cleaning up. Terminating CustomResourceDefinitionConditionType = "Terminating" + // KubernetesAPIApprovalPolicyConformant indicates that an API in *.k8s.io or *.kubernetes.io is or is not approved. For CRDs + // outside those groups, this condition will not be set. For CRDs inside those groups, the condition will + // be true if .metadata.annotations["api-approved.kubernetes.io"] is set to a URL, otherwise it will be false. + // See https://github.com/kubernetes/enhancements/pull/1111 for more details. + KubernetesAPIApprovalPolicyConformant CustomResourceDefinitionConditionType = "KubernetesAPIApprovalPolicyConformant" ) // CustomResourceDefinitionCondition contains details for the current condition of this pod. type CustomResourceDefinitionCondition struct { - // Type is the type of the condition. + // type is the type of the condition. Types include Established, NamesAccepted and Terminating. Type CustomResourceDefinitionConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=CustomResourceDefinitionConditionType"` - // Status is the status of the condition. + // status is the status of the condition. // Can be True, False, Unknown. Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` - // Last time the condition transitioned from one status to another. + // lastTransitionTime last time the condition transitioned from one status to another. // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // Unique, one-word, CamelCase reason for the condition's last transition. + // reason is a unique, one-word, CamelCase reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // Human-readable message indicating details about last transition. + // message is a human-readable message indicating details about last transition. // +optional Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } // CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition type CustomResourceDefinitionStatus struct { - // Conditions indicate state for particular aspects of a CustomResourceDefinition + // conditions indicate state for particular aspects of a CustomResourceDefinition + // +optional Conditions []CustomResourceDefinitionCondition `json:"conditions" protobuf:"bytes,1,opt,name=conditions"` - // AcceptedNames are the names that are actually being used to serve discovery + // acceptedNames are the names that are actually being used to serve discovery. // They may be different than the names in spec. AcceptedNames CustomResourceDefinitionNames `json:"acceptedNames" protobuf:"bytes,2,opt,name=acceptedNames"` - // StoredVersions are all versions of CustomResources that were ever persisted. Tracking these + // storedVersions lists all versions of CustomResources that were ever persisted. Tracking these // versions allows a migration path for stored versions in etcd. The field is mutable - // so the migration controller can first finish a migration to another version (i.e. - // that no old objects are left in the storage), and then remove the rest of the + // so a migration controller can finish a migration to another version (ensuring + // no old objects are left in storage), and then remove the rest of the // versions from this list. - // None of the versions in this list can be removed from the spec.Versions field. + // Versions may not be removed from `spec.versions` while they exist in this list. StoredVersions []string `json:"storedVersions" protobuf:"bytes,3,rep,name=storedVersions"` } @@ -325,13 +375,14 @@ const CustomResourceCleanupFinalizer = "customresourcecleanup.apiextensions.k8s. // CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format // <.spec.name>.<.spec.group>. +// Deprecated in v1.16, planned for removal in v1.19. Use apiextensions.k8s.io/v1 CustomResourceDefinition instead. type CustomResourceDefinition struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec describes how the user wants the resources to appear + // spec describes how the user wants the resources to appear Spec CustomResourceDefinitionSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status indicates the actual state of the CustomResourceDefinition + // status indicates the actual state of the CustomResourceDefinition // +optional Status CustomResourceDefinitionStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -343,23 +394,26 @@ type CustomResourceDefinitionList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items individual CustomResourceDefinitions + // items list individual CustomResourceDefinition objects Items []CustomResourceDefinition `json:"items" protobuf:"bytes,2,rep,name=items"` } // CustomResourceValidation is a list of validation methods for CustomResources. type CustomResourceValidation struct { - // OpenAPIV3Schema is the OpenAPI v3 schema to be validated against. + // openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. // +optional OpenAPIV3Schema *JSONSchemaProps `json:"openAPIV3Schema,omitempty" protobuf:"bytes,1,opt,name=openAPIV3Schema"` } // CustomResourceSubresources defines the status and scale subresources for CustomResources. type CustomResourceSubresources struct { - // Status denotes the status subresource for CustomResources + // status indicates the custom resource should serve a `/status` subresource. + // When enabled: + // 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. + // 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. // +optional Status *CustomResourceSubresourceStatus `json:"status,omitempty" protobuf:"bytes,1,opt,name=status"` - // Scale denotes the scale subresource for CustomResources + // scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. // +optional Scale *CustomResourceSubresourceScale `json:"scale,omitempty" protobuf:"bytes,2,opt,name=scale"` } @@ -373,22 +427,25 @@ type CustomResourceSubresourceStatus struct{} // CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. type CustomResourceSubresourceScale struct { - // SpecReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Spec.Replicas. + // specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .spec. - // If there is no value under the given path in the CustomResource, the /scale subresource will return an error on GET. + // Must be a JSON Path under `.spec`. + // If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. SpecReplicasPath string `json:"specReplicasPath" protobuf:"bytes,1,name=specReplicasPath"` - // StatusReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Replicas. + // statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .status. - // If there is no value under the given path in the CustomResource, the status replica value in the /scale subresource + // Must be a JSON Path under `.status`. + // If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource // will default to 0. StatusReplicasPath string `json:"statusReplicasPath" protobuf:"bytes,2,opt,name=statusReplicasPath"` - // LabelSelectorPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Selector. + // labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .status. - // Must be set to work with HPA. - // If there is no value under the given path in the CustomResource, the status label selector value in the /scale + // Must be a JSON Path under `.status` or `.spec`. + // Must be set to work with HorizontalPodAutoscaler. + // The field pointed by this JSON path must be a string field (not a complex selector struct) + // which contains a serialized label selector in string form. + // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource + // If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` // subresource will default to the empty string. // +optional LabelSelectorPath *string `json:"labelSelectorPath,omitempty" protobuf:"bytes,3,opt,name=labelSelectorPath"` @@ -399,38 +456,39 @@ type CustomResourceSubresourceScale struct { // ConversionReview describes a conversion request/response. type ConversionReview struct { metav1.TypeMeta `json:",inline"` - // `request` describes the attributes for the conversion request. + // request describes the attributes for the conversion request. // +optional Request *ConversionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` - // `response` describes the attributes for the conversion response. + // response describes the attributes for the conversion response. // +optional Response *ConversionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` } // ConversionRequest describes the conversion request parameters. type ConversionRequest struct { - // `uid` is an identifier for the individual request/response. It allows us to distinguish instances of requests which are - // otherwise identical (parallel requests, requests when earlier requests did not modify etc) - // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // uid is an identifier for the individual request/response. It allows distinguishing instances of requests which are + // otherwise identical (parallel requests, etc). + // The UID is meant to track the round trip (request/response) between the Kubernetes API server and the webhook, not the user request. // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` - // `desiredAPIVersion` is the version to convert given objects to. e.g. "myapi.example.com/v1" + // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" DesiredAPIVersion string `json:"desiredAPIVersion" protobuf:"bytes,2,name=desiredAPIVersion"` - // `objects` is the list of CR objects to be converted. + // objects is the list of custom resource objects to be converted. Objects []runtime.RawExtension `json:"objects" protobuf:"bytes,3,rep,name=objects"` } // ConversionResponse describes a conversion response. type ConversionResponse struct { - // `uid` is an identifier for the individual request/response. - // This should be copied over from the corresponding AdmissionRequest. + // uid is an identifier for the individual request/response. + // This should be copied over from the corresponding `request.uid`. UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` - // `convertedObjects` is the list of converted version of `request.objects` if the `result` is successful otherwise empty. - // The webhook is expected to set apiVersion of these objects to the ConversionRequest.desiredAPIVersion. The list - // must also has the same size as input list with the same objects in the same order(i.e. equal UIDs and object meta) + // convertedObjects is the list of converted version of `request.objects` if the `result` is successful, otherwise empty. + // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list + // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). + // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. ConvertedObjects []runtime.RawExtension `json:"convertedObjects" protobuf:"bytes,2,rep,name=convertedObjects"` - // `result` contains the result of conversion with extra details if the conversion failed. `result.status` determines if - // the conversion failed or succeeded. The `result.status` field is required and represent the success or failure of the + // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if + // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` // will be used to construct an error message for the end user. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go index 54c0a4ae1..ec1f8022d 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go @@ -18,13 +18,16 @@ package v1beta1 // JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). type JSONSchemaProps struct { - ID string `json:"id,omitempty" protobuf:"bytes,1,opt,name=id"` - Schema JSONSchemaURL `json:"$schema,omitempty" protobuf:"bytes,2,opt,name=schema"` - Ref *string `json:"$ref,omitempty" protobuf:"bytes,3,opt,name=ref"` - Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - Type string `json:"type,omitempty" protobuf:"bytes,5,opt,name=type"` - Format string `json:"format,omitempty" protobuf:"bytes,6,opt,name=format"` - Title string `json:"title,omitempty" protobuf:"bytes,7,opt,name=title"` + ID string `json:"id,omitempty" protobuf:"bytes,1,opt,name=id"` + Schema JSONSchemaURL `json:"$schema,omitempty" protobuf:"bytes,2,opt,name=schema"` + Ref *string `json:"$ref,omitempty" protobuf:"bytes,3,opt,name=ref"` + Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + Type string `json:"type,omitempty" protobuf:"bytes,5,opt,name=type"` + Format string `json:"format,omitempty" protobuf:"bytes,6,opt,name=format"` + Title string `json:"title,omitempty" protobuf:"bytes,7,opt,name=title"` + // default is a default value for undefined object fields. + // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. + // CustomResourceDefinitions with defaults must be created using the v1 (or newer) CustomResourceDefinition API. Default *JSON `json:"default,omitempty" protobuf:"bytes,8,opt,name=default"` Maximum *float64 `json:"maximum,omitempty" protobuf:"bytes,9,opt,name=maximum"` ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty" protobuf:"bytes,10,opt,name=exclusiveMaximum"` @@ -55,6 +58,65 @@ type JSONSchemaProps struct { ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty" protobuf:"bytes,35,opt,name=externalDocs"` Example *JSON `json:"example,omitempty" protobuf:"bytes,36,opt,name=example"` Nullable bool `json:"nullable,omitempty" protobuf:"bytes,37,opt,name=nullable"` + + // x-kubernetes-preserve-unknown-fields stops the API server + // decoding step from pruning fields which are not specified + // in the validation schema. This affects fields recursively, + // but switches back to normal pruning behaviour if nested + // properties or additionalProperties are specified in the schema. + // This can either be true or undefined. False is forbidden. + XPreserveUnknownFields *bool `json:"x-kubernetes-preserve-unknown-fields,omitempty" protobuf:"bytes,38,opt,name=xKubernetesPreserveUnknownFields"` + + // x-kubernetes-embedded-resource defines that the value is an + // embedded Kubernetes runtime.Object, with TypeMeta and + // ObjectMeta. The type must be object. It is allowed to further + // restrict the embedded object. kind, apiVersion and metadata + // are validated automatically. x-kubernetes-preserve-unknown-fields + // is allowed to be true, but does not have to be if the object + // is fully specified (up to kind, apiVersion, metadata). + XEmbeddedResource bool `json:"x-kubernetes-embedded-resource,omitempty" protobuf:"bytes,39,opt,name=xKubernetesEmbeddedResource"` + + // x-kubernetes-int-or-string specifies that this value is + // either an integer or a string. If this is true, an empty + // type is allowed and type as child of anyOf is permitted + // if following one of the following patterns: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + XIntOrString bool `json:"x-kubernetes-int-or-string,omitempty" protobuf:"bytes,40,opt,name=xKubernetesIntOrString"` + + // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used + // as the index of the map. + // + // This tag MUST only be used on lists that have the "x-kubernetes-list-type" + // extension set to "map". Also, the values specified for this attribute must + // be a scalar typed field of the child structure (no nesting is supported). + // + // +optional + XListMapKeys []string `json:"x-kubernetes-list-map-keys,omitempty" protobuf:"bytes,41,rep,name=xKubernetesListMapKeys"` + + // x-kubernetes-list-type annotates an array to further describe its topology. + // This extension must only be used on lists and may have 3 possible values: + // + // 1) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic lists will be entirely replaced when updated. This extension + // may be used on any type of list (struct, scalar, ...). + // 2) `set`: + // Sets are lists that must not have multiple items with the same value. Each + // value must be a scalar (or another atomic type). + // 3) `map`: + // These lists are like maps in that their elements have a non-index key + // used to identify them. Order is preserved upon merge. The map tag + // must only be used on a list with elements of type object. + // Defaults to atomic for arrays. + // +optional + XListType *string `json:"x-kubernetes-list-type,omitempty" protobuf:"bytes,42,opt,name=xKubernetesListType"` } // JSON represents any valid JSON value. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go index e7ae745b6..64073df08 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go @@ -24,6 +24,7 @@ import ( unsafe "unsafe" apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -295,7 +296,15 @@ func Convert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResou func autoConvert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in *CustomResourceConversion, out *apiextensions.CustomResourceConversion, s conversion.Scope) error { out.Strategy = apiextensions.ConversionStrategyType(in.Strategy) - out.WebhookClientConfig = (*apiextensions.WebhookClientConfig)(unsafe.Pointer(in.WebhookClientConfig)) + if in.WebhookClientConfig != nil { + in, out := &in.WebhookClientConfig, &out.WebhookClientConfig + *out = new(apiextensions.WebhookClientConfig) + if err := Convert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(*in, *out, s); err != nil { + return err + } + } else { + out.WebhookClientConfig = nil + } out.ConversionReviewVersions = *(*[]string)(unsafe.Pointer(&in.ConversionReviewVersions)) return nil } @@ -307,7 +316,15 @@ func Convert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceCon func autoConvert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(in *apiextensions.CustomResourceConversion, out *CustomResourceConversion, s conversion.Scope) error { out.Strategy = ConversionStrategyType(in.Strategy) - out.WebhookClientConfig = (*WebhookClientConfig)(unsafe.Pointer(in.WebhookClientConfig)) + if in.WebhookClientConfig != nil { + in, out := &in.WebhookClientConfig, &out.WebhookClientConfig + *out = new(WebhookClientConfig) + if err := Convert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(*in, *out, s); err != nil { + return err + } + } else { + out.WebhookClientConfig = nil + } out.ConversionReviewVersions = *(*[]string)(unsafe.Pointer(&in.ConversionReviewVersions)) return nil } @@ -478,7 +495,16 @@ func autoConvert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomRes out.Versions = nil } out.AdditionalPrinterColumns = *(*[]apiextensions.CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) - out.Conversion = (*apiextensions.CustomResourceConversion)(unsafe.Pointer(in.Conversion)) + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(apiextensions.CustomResourceConversion) + if err := Convert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(*in, *out, s); err != nil { + return err + } + } else { + out.Conversion = nil + } + out.PreserveUnknownFields = (*bool)(unsafe.Pointer(in.PreserveUnknownFields)) return nil } @@ -516,7 +542,16 @@ func autoConvert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomRes out.Versions = nil } out.AdditionalPrinterColumns = *(*[]CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) - out.Conversion = (*CustomResourceConversion)(unsafe.Pointer(in.Conversion)) + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(CustomResourceConversion) + if err := Convert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(*in, *out, s); err != nil { + return err + } + } else { + out.Conversion = nil + } + out.PreserveUnknownFields = (*bool)(unsafe.Pointer(in.PreserveUnknownFields)) return nil } @@ -905,6 +940,11 @@ func autoConvert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in *JS out.Example = nil } out.Nullable = in.Nullable + out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) + out.XEmbeddedResource = in.XEmbeddedResource + out.XIntOrString = in.XIntOrString + out.XListMapKeys = *(*[]string)(unsafe.Pointer(&in.XListMapKeys)) + out.XListType = (*string)(unsafe.Pointer(in.XListType)) return nil } @@ -1087,6 +1127,11 @@ func autoConvert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(in *ap } else { out.Example = nil } + out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) + out.XEmbeddedResource = in.XEmbeddedResource + out.XIntOrString = in.XIntOrString + out.XListMapKeys = *(*[]string)(unsafe.Pointer(&in.XListMapKeys)) + out.XListType = (*string)(unsafe.Pointer(in.XListType)) return nil } @@ -1228,6 +1273,9 @@ func autoConvert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(in * out.Namespace = in.Namespace out.Name = in.Name out.Path = (*string)(unsafe.Pointer(in.Path)) + if err := v1.Convert_Pointer_int32_To_int32(&in.Port, &out.Port, s); err != nil { + return err + } return nil } @@ -1240,6 +1288,9 @@ func autoConvert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(in * out.Namespace = in.Namespace out.Name = in.Name out.Path = (*string)(unsafe.Pointer(in.Path)) + if err := v1.Convert_int32_To_Pointer_int32(&in.Port, &out.Port, s); err != nil { + return err + } return nil } @@ -1250,7 +1301,15 @@ func Convert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(in *apie func autoConvert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in *WebhookClientConfig, out *apiextensions.WebhookClientConfig, s conversion.Scope) error { out.URL = (*string)(unsafe.Pointer(in.URL)) - out.Service = (*apiextensions.ServiceReference)(unsafe.Pointer(in.Service)) + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(apiextensions.ServiceReference) + if err := Convert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(*in, *out, s); err != nil { + return err + } + } else { + out.Service = nil + } out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) return nil } @@ -1262,7 +1321,15 @@ func Convert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in func autoConvert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in *apiextensions.WebhookClientConfig, out *WebhookClientConfig, s conversion.Scope) error { out.URL = (*string)(unsafe.Pointer(in.URL)) - out.Service = (*ServiceReference)(unsafe.Pointer(in.Service)) + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + if err := Convert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(*in, *out, s); err != nil { + return err + } + } else { + out.Service = nil + } out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) return nil } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go index 20ded01bf..82bbb2be4 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go @@ -197,7 +197,7 @@ func (in *CustomResourceDefinitionCondition) DeepCopy() *CustomResourceDefinitio func (in *CustomResourceDefinitionList) DeepCopyInto(out *CustomResourceDefinitionList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CustomResourceDefinition, len(*in)) @@ -283,6 +283,11 @@ func (in *CustomResourceDefinitionSpec) DeepCopyInto(out *CustomResourceDefiniti *out = new(CustomResourceConversion) (*in).DeepCopyInto(*out) } + if in.PreserveUnknownFields != nil { + in, out := &in.PreserveUnknownFields, &out.PreserveUnknownFields + *out = new(bool) + **out = **in + } return } @@ -607,6 +612,11 @@ func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { *out = new(string) **out = **in } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } return } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go index f65f47a03..e1807243f 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go @@ -38,6 +38,13 @@ func RegisterDefaults(scheme *runtime.Scheme) error { func SetObjectDefaults_CustomResourceDefinition(in *CustomResourceDefinition) { SetDefaults_CustomResourceDefinition(in) SetDefaults_CustomResourceDefinitionSpec(&in.Spec) + if in.Spec.Conversion != nil { + if in.Spec.Conversion.WebhookClientConfig != nil { + if in.Spec.Conversion.WebhookClientConfig.Service != nil { + SetDefaults_ServiceReference(in.Spec.Conversion.WebhookClientConfig.Service) + } + } + } } func SetObjectDefaults_CustomResourceDefinitionList(in *CustomResourceDefinitionList) { diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go index 667e556ac..682e6fd4c 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go @@ -115,7 +115,7 @@ func (in *CustomResourceDefinitionCondition) DeepCopy() *CustomResourceDefinitio func (in *CustomResourceDefinitionList) DeepCopyInto(out *CustomResourceDefinitionList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CustomResourceDefinition, len(*in)) @@ -201,6 +201,11 @@ func (in *CustomResourceDefinitionSpec) DeepCopyInto(out *CustomResourceDefiniti *out = new(CustomResourceConversion) (*in).DeepCopyInto(*out) } + if in.PreserveUnknownFields != nil { + in, out := &in.PreserveUnknownFields, &out.PreserveUnknownFields + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go index aa2dbcbb0..de6ad042e 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go @@ -19,6 +19,9 @@ limitations under the License. package clientset import ( + "fmt" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" @@ -28,6 +31,7 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface ApiextensionsV1beta1() apiextensionsv1beta1.ApiextensionsV1beta1Interface + ApiextensionsV1() apiextensionsv1.ApiextensionsV1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -35,6 +39,7 @@ type Interface interface { type Clientset struct { *discovery.DiscoveryClient apiextensionsV1beta1 *apiextensionsv1beta1.ApiextensionsV1beta1Client + apiextensionsV1 *apiextensionsv1.ApiextensionsV1Client } // ApiextensionsV1beta1 retrieves the ApiextensionsV1beta1Client @@ -42,6 +47,11 @@ func (c *Clientset) ApiextensionsV1beta1() apiextensionsv1beta1.ApiextensionsV1b return c.apiextensionsV1beta1 } +// ApiextensionsV1 retrieves the ApiextensionsV1Client +func (c *Clientset) ApiextensionsV1() apiextensionsv1.ApiextensionsV1Interface { + return c.apiextensionsV1 +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -51,9 +61,14 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface { } // NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. func NewForConfig(c *rest.Config) (*Clientset, error) { configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("Burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } var cs Clientset @@ -62,6 +77,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.apiextensionsV1, err = apiextensionsv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { @@ -75,6 +94,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset cs.apiextensionsV1beta1 = apiextensionsv1beta1.NewForConfigOrDie(c) + cs.apiextensionsV1 = apiextensionsv1.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) return &cs @@ -84,6 +104,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { func New(c rest.Interface) *Clientset { var cs Clientset cs.apiextensionsV1beta1 = apiextensionsv1beta1.New(c) + cs.apiextensionsV1 = apiextensionsv1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go index 4232ce5ce..144c20666 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go @@ -19,6 +19,7 @@ limitations under the License. package scheme import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -32,6 +33,7 @@ var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ apiextensionsv1beta1.AddToScheme, + apiextensionsv1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go new file mode 100644 index 000000000..8823cb6a9 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + rest "k8s.io/client-go/rest" +) + +type ApiextensionsV1Interface interface { + RESTClient() rest.Interface + CustomResourceDefinitionsGetter +} + +// ApiextensionsV1Client is used to interact with features provided by the apiextensions.k8s.io group. +type ApiextensionsV1Client struct { + restClient rest.Interface +} + +func (c *ApiextensionsV1Client) CustomResourceDefinitions() CustomResourceDefinitionInterface { + return newCustomResourceDefinitions(c) +} + +// NewForConfig creates a new ApiextensionsV1Client for the given config. +func NewForConfig(c *rest.Config) (*ApiextensionsV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &ApiextensionsV1Client{client}, nil +} + +// NewForConfigOrDie creates a new ApiextensionsV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ApiextensionsV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ApiextensionsV1Client for the given RESTClient. +func New(c rest.Interface) *ApiextensionsV1Client { + return &ApiextensionsV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ApiextensionsV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go new file mode 100644 index 000000000..e3fa43697 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go @@ -0,0 +1,180 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + scheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CustomResourceDefinitionsGetter has a method to return a CustomResourceDefinitionInterface. +// A group's client should implement this interface. +type CustomResourceDefinitionsGetter interface { + CustomResourceDefinitions() CustomResourceDefinitionInterface +} + +// CustomResourceDefinitionInterface has methods to work with CustomResourceDefinition resources. +type CustomResourceDefinitionInterface interface { + Create(*v1.CustomResourceDefinition) (*v1.CustomResourceDefinition, error) + Update(*v1.CustomResourceDefinition) (*v1.CustomResourceDefinition, error) + UpdateStatus(*v1.CustomResourceDefinition) (*v1.CustomResourceDefinition, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.CustomResourceDefinition, error) + List(opts metav1.ListOptions) (*v1.CustomResourceDefinitionList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.CustomResourceDefinition, err error) + CustomResourceDefinitionExpansion +} + +// customResourceDefinitions implements CustomResourceDefinitionInterface +type customResourceDefinitions struct { + client rest.Interface +} + +// newCustomResourceDefinitions returns a CustomResourceDefinitions +func newCustomResourceDefinitions(c *ApiextensionsV1Client) *customResourceDefinitions { + return &customResourceDefinitions{ + client: c.RESTClient(), + } +} + +// Get takes name of the customResourceDefinition, and returns the corresponding customResourceDefinition object, and an error if there is any. +func (c *customResourceDefinitions) Get(name string, options metav1.GetOptions) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Get(). + Resource("customresourcedefinitions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CustomResourceDefinitions that match those selectors. +func (c *customResourceDefinitions) List(opts metav1.ListOptions) (result *v1.CustomResourceDefinitionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CustomResourceDefinitionList{} + err = c.client.Get(). + Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested customResourceDefinitions. +func (c *customResourceDefinitions) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a customResourceDefinition and creates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. +func (c *customResourceDefinitions) Create(customResourceDefinition *v1.CustomResourceDefinition) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Post(). + Resource("customresourcedefinitions"). + Body(customResourceDefinition). + Do(). + Into(result) + return +} + +// Update takes the representation of a customResourceDefinition and updates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. +func (c *customResourceDefinitions) Update(customResourceDefinition *v1.CustomResourceDefinition) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Put(). + Resource("customresourcedefinitions"). + Name(customResourceDefinition.Name). + Body(customResourceDefinition). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *customResourceDefinitions) UpdateStatus(customResourceDefinition *v1.CustomResourceDefinition) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Put(). + Resource("customresourcedefinitions"). + Name(customResourceDefinition.Name). + SubResource("status"). + Body(customResourceDefinition). + Do(). + Into(result) + return +} + +// Delete takes name of the customResourceDefinition and deletes it. Returns an error if one occurs. +func (c *customResourceDefinitions) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("customresourcedefinitions"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *customResourceDefinitions) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("customresourcedefinitions"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched customResourceDefinition. +func (c *customResourceDefinitions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Patch(pt). + Resource("customresourcedefinitions"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/doc.go new file mode 100644 index 000000000..3af5d054f --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/generated_expansion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/generated_expansion.go new file mode 100644 index 000000000..e594636af --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type CustomResourceDefinitionExpansion interface{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go index a1fd337f9..ff1ec4f25 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go @@ -21,7 +21,6 @@ package v1beta1 import ( v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go b/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go new file mode 100644 index 000000000..f02fa8e43 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go @@ -0,0 +1,49 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package equality + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" +) + +// Semantic can do semantic deep equality checks for api objects. +// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true +var Semantic = conversion.EqualitiesOrDie( + func(a, b resource.Quantity) bool { + // Ignore formatting, only care that numeric value stayed the same. + // TODO: if we decide it's important, it should be safe to start comparing the format. + // + // Uninitialized quantities are equivalent to 0 quantities. + return a.Cmp(b) == 0 + }, + func(a, b metav1.MicroTime) bool { + return a.UTC() == b.UTC() + }, + func(a, b metav1.Time) bool { + return a.UTC() == b.UTC() + }, + func(a, b labels.Selector) bool { + return a.String() == b.String() + }, + func(a, b fields.Selector) bool { + return a.String() == b.String() + }, +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index afd97f7ad..95d5c7a35 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -32,7 +32,9 @@ import ( const ( // StatusTooManyRequests means the server experienced too many requests within a // given window and that the client must wait to perform the action again. - StatusTooManyRequests = 429 + // DEPRECATED: please use http.StatusTooManyRequests, this will be removed in + // the future version. + StatusTooManyRequests = http.StatusTooManyRequests ) // StatusError is an error intended for consumption by a REST API server; it can also be @@ -349,7 +351,7 @@ func NewTimeoutError(message string, retryAfterSeconds int) *StatusError { func NewTooManyRequestsError(message string) *StatusError { return &StatusError{metav1.Status{ Status: metav1.StatusFailure, - Code: StatusTooManyRequests, + Code: http.StatusTooManyRequests, Reason: metav1.StatusReasonTooManyRequests, Message: fmt.Sprintf("Too many requests: %s", message), }} @@ -394,7 +396,11 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr case http.StatusNotAcceptable: reason = metav1.StatusReasonNotAcceptable // the server message has details about what types are acceptable - message = serverMessage + if len(serverMessage) == 0 || serverMessage == "unknown" { + message = "the server was unable to respond with a content type that the client supports" + } else { + message = serverMessage + } case http.StatusUnsupportedMediaType: reason = metav1.StatusReasonUnsupportedMediaType // the server message has details about what types are acceptable @@ -617,3 +623,46 @@ func ReasonForError(err error) metav1.StatusReason { } return metav1.StatusReasonUnknown } + +// ErrorReporter converts generic errors into runtime.Object errors without +// requiring the caller to take a dependency on meta/v1 (where Status lives). +// This prevents circular dependencies in core watch code. +type ErrorReporter struct { + code int + verb string + reason string +} + +// NewClientErrorReporter will respond with valid v1.Status objects that report +// unexpected server responses. Primarily used by watch to report errors when +// we attempt to decode a response from the server and it is not in the form +// we expect. Because watch is a dependency of the core api, we can't return +// meta/v1.Status in that package and so much inject this interface to convert a +// generic error as appropriate. The reason is passed as a unique status cause +// on the returned status, otherwise the generic "ClientError" is returned. +func NewClientErrorReporter(code int, verb string, reason string) *ErrorReporter { + return &ErrorReporter{ + code: code, + verb: verb, + reason: reason, + } +} + +// AsObject returns a valid error runtime.Object (a v1.Status) for the given +// error, using the code and verb of the reporter type. The error is set to +// indicate that this was an unexpected server response. +func (r *ErrorReporter) AsObject(err error) runtime.Object { + status := NewGenericServerResponse(r.code, r.verb, schema.GroupResource{}, "", err.Error(), 0, true) + if status.ErrStatus.Details == nil { + status.ErrStatus.Details = &metav1.StatusDetails{} + } + reason := r.reason + if len(reason) == 0 { + reason = "ClientError" + } + status.ErrStatus.Details.Causes = append(status.ErrStatus.Details.Causes, metav1.StatusCause{ + Type: metav1.CauseType(reason), + Message: err.Error(), + }) + return &status.ErrStatus +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go index 3425055f6..50468b533 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go @@ -17,30 +17,76 @@ limitations under the License. package meta import ( + "errors" "fmt" "reflect" + "sync" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" ) -// IsListType returns true if the provided Object has a slice called Items +var ( + // isListCache maintains a cache of types that are checked for lists + // which is used by IsListType. + // TODO: remove and replace with an interface check + isListCache = struct { + lock sync.RWMutex + byType map[reflect.Type]bool + }{ + byType: make(map[reflect.Type]bool, 1024), + } +) + +// IsListType returns true if the provided Object has a slice called Items. +// TODO: Replace the code in this check with an interface comparison by +// creating and enforcing that lists implement a list accessor. func IsListType(obj runtime.Object) bool { - // if we're a runtime.Unstructured, check whether this is a list. - // TODO: refactor GetItemsPtr to use an interface that returns []runtime.Object - if unstructured, ok := obj.(runtime.Unstructured); ok { - return unstructured.IsList() + switch t := obj.(type) { + case runtime.Unstructured: + return t.IsList() } + t := reflect.TypeOf(obj) - _, err := GetItemsPtr(obj) - return err == nil + isListCache.lock.RLock() + ok, exists := isListCache.byType[t] + isListCache.lock.RUnlock() + + if !exists { + _, err := getItemsPtr(obj) + ok = err == nil + + // cache only the first 1024 types + isListCache.lock.Lock() + if len(isListCache.byType) < 1024 { + isListCache.byType[t] = ok + } + isListCache.lock.Unlock() + } + + return ok } +var ( + errExpectFieldItems = errors.New("no Items field in this object") + errExpectSliceItems = errors.New("Items field must be a slice of objects") +) + // GetItemsPtr returns a pointer to the list object's Items member. // If 'list' doesn't have an Items member, it's not really a list type // and an error will be returned. // This function will either return a pointer to a slice, or an error, but not both. +// TODO: this will be replaced with an interface in the future func GetItemsPtr(list runtime.Object) (interface{}, error) { + obj, err := getItemsPtr(list) + if err != nil { + return nil, fmt.Errorf("%T is not a list: %v", list, err) + } + return obj, nil +} + +// getItemsPtr returns a pointer to the list object's Items member or an error. +func getItemsPtr(list runtime.Object) (interface{}, error) { v, err := conversion.EnforcePtr(list) if err != nil { return nil, err @@ -48,19 +94,19 @@ func GetItemsPtr(list runtime.Object) (interface{}, error) { items := v.FieldByName("Items") if !items.IsValid() { - return nil, fmt.Errorf("no Items field in %#v", list) + return nil, errExpectFieldItems } switch items.Kind() { case reflect.Interface, reflect.Ptr: target := reflect.TypeOf(items.Interface()).Elem() if target.Kind() != reflect.Slice { - return nil, fmt.Errorf("items: Expected slice, got %s", target.Kind()) + return nil, errExpectSliceItems } return items.Interface(), nil case reflect.Slice: return items.Addr().Interface(), nil default: - return nil, fmt.Errorf("items: Expected slice, got %s", items.Kind()) + return nil, errExpectSliceItems } } diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go index b50337e13..fa4b76731 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -21,7 +21,6 @@ import ( "reflect" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -114,12 +113,12 @@ func Accessor(obj interface{}) (metav1.Object, error) { // AsPartialObjectMetadata takes the metav1 interface and returns a partial object. // TODO: consider making this solely a conversion action. -func AsPartialObjectMetadata(m metav1.Object) *metav1beta1.PartialObjectMetadata { +func AsPartialObjectMetadata(m metav1.Object) *metav1.PartialObjectMetadata { switch t := m.(type) { case *metav1.ObjectMeta: - return &metav1beta1.PartialObjectMetadata{ObjectMeta: *t} + return &metav1.PartialObjectMetadata{ObjectMeta: *t} default: - return &metav1beta1.PartialObjectMetadata{ + return &metav1.PartialObjectMetadata{ ObjectMeta: metav1.ObjectMeta{ Name: m.GetName(), GenerateName: m.GetGenerateName(), @@ -136,7 +135,6 @@ func AsPartialObjectMetadata(m metav1.Object) *metav1beta1.PartialObjectMetadata OwnerReferences: m.GetOwnerReferences(), Finalizers: m.GetFinalizers(), ClusterName: m.GetClusterName(), - Initializers: m.GetInitializers(), ManagedFields: m.GetManagedFields(), }, } diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go index 9d7835bc2..9fca2e165 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -17,20 +17,15 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto -/* -Package resource is a generated protocol buffer package. +package resource -It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +import ( + fmt "fmt" -It has these top-level messages: - Quantity -*/ -package resource + math "math" -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" + proto "github.com/gogo/protobuf/proto" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -43,19 +38,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Quantity) Reset() { *m = Quantity{} } -func (*Quantity) ProtoMessage() {} -func (*Quantity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Quantity) Reset() { *m = Quantity{} } +func (*Quantity) ProtoMessage() {} +func (*Quantity) Descriptor() ([]byte, []int) { + return fileDescriptor_612bba87bd70906c, []int{0} +} +func (m *Quantity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Quantity.Unmarshal(m, b) +} +func (m *Quantity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Quantity.Marshal(b, m, deterministic) +} +func (m *Quantity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantity.Merge(m, src) +} +func (m *Quantity) XXX_Size() int { + return xxx_messageInfo_Quantity.Size(m) +} +func (m *Quantity) XXX_DiscardUnknown() { + xxx_messageInfo_Quantity.DiscardUnknown(m) +} + +var xxx_messageInfo_Quantity proto.InternalMessageInfo func init() { proto.RegisterType((*Quantity)(nil), "k8s.io.apimachinery.pkg.api.resource.Quantity") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptor_612bba87bd70906c) } -var fileDescriptorGenerated = []byte{ +var fileDescriptor_612bba87bd70906c = []byte{ // 237 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8e, 0xb1, 0x4e, 0xc3, 0x30, 0x10, 0x40, 0xcf, 0x0b, 0x2a, 0x19, 0x2b, 0x84, 0x10, 0xc3, 0xa5, 0x42, 0x0c, 0x2c, 0xd8, 0x6b, diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto index acc904445..18a6c7cd6 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -26,7 +26,7 @@ option go_package = "resource"; // Quantity is a fixed-point representation of a number. // It provides convenient marshaling/unmarshaling in JSON and YAML, -// in addition to String() and Int64() accessors. +// in addition to String() and AsInt64() accessors. // // The serialization format is: // diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go index 72d3880c0..7f63175d3 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go @@ -194,9 +194,9 @@ func negativeScaleInt64(base int64, scale Scale) (result int64, exact bool) { } if fraction { if base > 0 { - value += 1 + value++ } else { - value += -1 + value-- } } return value, !fraction diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index 54fda5806..516d041da 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -29,7 +29,7 @@ import ( // Quantity is a fixed-point representation of a number. // It provides convenient marshaling/unmarshaling in JSON and YAML, -// in addition to String() and Int64() accessors. +// in addition to String() and AsInt64() accessors. // // The serialization format is: // @@ -584,6 +584,12 @@ func (q *Quantity) Neg() { q.d.Dec.Neg(q.d.Dec) } +// Equal checks equality of two Quantities. This is useful for testing with +// cmp.Equal. +func (q Quantity) Equal(v Quantity) bool { + return q.Cmp(v) == 0 +} + // int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation // of most Quantity values. const int64QuantityExpectedBytes = 18 @@ -691,7 +697,9 @@ func (q *Quantity) MilliValue() int64 { return q.ScaledValue(Milli) } -// ScaledValue returns the value of ceil(q * 10^scale); this could overflow an int64. +// ScaledValue returns the value of ceil(q / 10^scale). +// For example, NewQuantity(1, DecimalSI).ScaledValue(Milli) returns 1000. +// This could overflow an int64. // To detect overflow, call Value() first and verify the expected magnitude. func (q *Quantity) ScaledValue(scale Scale) int64 { if q.d.Dec == nil { @@ -718,21 +726,3 @@ func (q *Quantity) SetScaled(value int64, scale Scale) { q.d.Dec = nil q.i = int64Amount{value: value, scale: scale} } - -// Copy is a convenience function that makes a deep copy for you. Non-deep -// copies of quantities share pointers and you will regret that. -func (q *Quantity) Copy() *Quantity { - if q.d.Dec == nil { - return &Quantity{ - s: q.s, - i: q.i, - Format: q.Format, - } - } - tmp := &inf.Dec{} - return &Quantity{ - s: q.s, - d: infDecAmount{tmp.Set(q.d.Dec)}, - Format: q.Format, - } -} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go index 74dfb4e4b..f89ca163c 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go @@ -19,6 +19,7 @@ package resource import ( "fmt" "io" + "math/bits" "github.com/gogo/protobuf/proto" ) @@ -28,7 +29,7 @@ var _ proto.Sizer = &Quantity{} func (m *Quantity) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) - n, err := m.MarshalTo(data) + n, err := m.MarshalToSizedBuffer(data[:size]) if err != nil { return nil, err } @@ -38,30 +39,40 @@ func (m *Quantity) Marshal() (data []byte, err error) { // MarshalTo is a customized version of the generated Protobuf unmarshaler for a struct // with a single string field. func (m *Quantity) MarshalTo(data []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(data[:size]) +} + +// MarshalToSizedBuffer is a customized version of the generated +// Protobuf unmarshaler for a struct with a single string field. +func (m *Quantity) MarshalToSizedBuffer(data []byte) (int, error) { + i := len(data) _ = i var l int _ = l - data[i] = 0xa - i++ // BEGIN CUSTOM MARSHAL out := m.String() + i -= len(out) + copy(data[i:], out) i = encodeVarintGenerated(data, i, uint64(len(out))) - i += copy(data[i:], out) // END CUSTOM MARSHAL + i-- + data[i] = 0xa - return i, nil + return len(data) - i, nil } func encodeVarintGenerated(data []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { data[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } data[offset] = uint8(v) - return offset + 1 + return base } func (m *Quantity) Size() (n int) { @@ -77,14 +88,7 @@ func (m *Quantity) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (bits.Len64(x|1) + 6) / 7 } // Unmarshal is a customized version of the generated Protobuf unmarshaler for a struct diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go deleted file mode 100644 index bdb71340a..000000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion" -) - -func Convert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *metav1.ListOptions, s conversion.Scope) error { - if err := metav1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { - return err - } - if err := metav1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { - return err - } - out.ResourceVersion = in.ResourceVersion - out.TimeoutSeconds = in.TimeoutSeconds - out.Watch = in.Watch - out.Limit = in.Limit - out.Continue = in.Continue - return nil -} - -func Convert_v1_ListOptions_To_internalversion_ListOptions(in *metav1.ListOptions, out *ListOptions, s conversion.Scope) error { - if err := metav1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { - return err - } - if err := metav1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { - return err - } - out.ResourceVersion = in.ResourceVersion - out.TimeoutSeconds = in.TimeoutSeconds - out.Watch = in.Watch - out.Limit = in.Limit - out.Continue = in.Continue - return nil -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go index 46b8605f4..3fea2c380 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go @@ -66,9 +66,6 @@ func addToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) metav1.Convert_Map_string_To_string_To_v1_LabelSelector, metav1.Convert_v1_LabelSelector_To_Map_string_To_string, - - Convert_internalversion_ListOptions_To_v1_ListOptions, - Convert_v1_ListOptions_To_internalversion_ListOptions, ) if err != nil { return err @@ -89,12 +86,12 @@ func addToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) &metav1beta1.PartialObjectMetadata{}, &metav1beta1.PartialObjectMetadataList{}, ) - scheme.AddKnownTypes(metav1beta1.SchemeGroupVersion, - &metav1beta1.Table{}, - &metav1beta1.TableOptions{}, - &metav1beta1.PartialObjectMetadata{}, - &metav1beta1.PartialObjectMetadataList{}, - ) + if err := metav1beta1.AddMetaToScheme(scheme); err != nil { + return err + } + if err := metav1.AddMetaToScheme(scheme); err != nil { + return err + } // Allow delete options to be decoded across all version in this scheme (we may want to be more clever than this) scheme.AddUnversionedTypes(SchemeGroupVersion, &metav1.DeleteOptions{}, diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go index e434e5055..8d2544168 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go @@ -35,6 +35,15 @@ type ListOptions struct { FieldSelector fields.Selector // If true, watch for changes to this list Watch bool + // allowWatchBookmarks requests watch events with type "BOOKMARK". + // Servers that do not implement bookmarks may ignore this flag and + // bookmarks are sent at the server's discretion. Clients should not + // assume bookmarks are returned at any specific interval, nor may they + // assume the server will send any BOOKMARK event during a session. + // If this is not a watch, this field is ignored. + // If the feature gate WatchBookmarks is not enabled in apiserver, + // this field is ignored. + AllowWatchBookmarks bool // When specified with a watch call, shows changes that occur after that particular version of a resource. // Defaults to changes from the beginning of history. // When specified for list: diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go index 79b756736..35adbca12 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go @@ -55,16 +55,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*ListOptions)(nil), (*v1.ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_internalversion_ListOptions_To_v1_ListOptions(a.(*ListOptions), b.(*v1.ListOptions), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1.ListOptions)(nil), (*ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ListOptions_To_internalversion_ListOptions(a.(*v1.ListOptions), b.(*ListOptions), scope) - }); err != nil { - return err - } return nil } @@ -118,6 +108,7 @@ func autoConvert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, return err } out.Watch = in.Watch + out.AllowWatchBookmarks = in.AllowWatchBookmarks out.ResourceVersion = in.ResourceVersion out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) out.Limit = in.Limit @@ -125,6 +116,11 @@ func autoConvert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, return nil } +// Convert_internalversion_ListOptions_To_v1_ListOptions is an autogenerated conversion function. +func Convert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *v1.ListOptions, s conversion.Scope) error { + return autoConvert_internalversion_ListOptions_To_v1_ListOptions(in, out, s) +} + func autoConvert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOptions, out *ListOptions, s conversion.Scope) error { if err := v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { return err @@ -133,9 +129,15 @@ func autoConvert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOption return err } out.Watch = in.Watch + out.AllowWatchBookmarks = in.AllowWatchBookmarks out.ResourceVersion = in.ResourceVersion out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) out.Limit = in.Limit out.Continue = in.Continue return nil } + +// Convert_v1_ListOptions_To_internalversion_ListOptions is an autogenerated conversion function. +func Convert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOptions, out *ListOptions, s conversion.Scope) error { + return autoConvert_v1_ListOptions_To_internalversion_ListOptions(in, out, s) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go index 81d85e96e..d5e4fc680 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go @@ -28,7 +28,7 @@ import ( func (in *List) DeepCopyInto(out *List) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]runtime.Object, len(*in)) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go index 5c36f82c1..d07069ef2 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go @@ -77,6 +77,8 @@ func AddConversionFuncs(scheme *runtime.Scheme) error { Convert_Slice_string_To_Slice_int32, Convert_Slice_string_To_v1_DeletionPropagation, + + Convert_Slice_string_To_v1_IncludeObjectPolicy, ) } @@ -317,3 +319,11 @@ func Convert_Slice_string_To_v1_DeletionPropagation(input *[]string, out *Deleti } return nil } + +// Convert_Slice_string_To_v1_IncludeObjectPolicy allows converting a URL query parameter value +func Convert_Slice_string_To_v1_IncludeObjectPolicy(input *[]string, out *IncludeObjectPolicy, s conversion.Scope) error { + if len(*input) > 0 { + *out = IncludeObjectPolicy((*input)[0]) + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go new file mode 100644 index 000000000..8751d0524 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go @@ -0,0 +1,46 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func (in *TableRow) DeepCopy() *TableRow { + if in == nil { + return nil + } + + out := new(TableRow) + + if in.Cells != nil { + out.Cells = make([]interface{}, len(in.Cells)) + for i := range in.Cells { + out.Cells[i] = runtime.DeepCopyJSONValue(in.Cells[i]) + } + } + + if in.Conditions != nil { + out.Conditions = make([]TableRowCondition, len(in.Conditions)) + for i := range in.Conditions { + in.Conditions[i].DeepCopyInto(&out.Conditions[i]) + } + } + + in.Object.DeepCopyInto(&out.Object) + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 69e650993..31b1d955e 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -17,73 +17,25 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto - - It has these top-level messages: - APIGroup - APIGroupList - APIResource - APIResourceList - APIVersions - CreateOptions - DeleteOptions - Duration - ExportOptions - Fields - GetOptions - GroupKind - GroupResource - GroupVersion - GroupVersionForDiscovery - GroupVersionKind - GroupVersionResource - Initializer - Initializers - LabelSelector - LabelSelectorRequirement - List - ListMeta - ListOptions - ManagedFieldsEntry - MicroTime - ObjectMeta - OwnerReference - Patch - PatchOptions - Preconditions - RootPaths - ServerAddressByClientCIDR - Status - StatusCause - StatusDetails - Time - Timestamp - TypeMeta - UpdateOptions - Verbs - WatchEvent -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" + io "io" -import time "time" -import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + runtime "k8s.io/apimachinery/pkg/runtime" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + time "time" -import strings "strings" -import reflect "reflect" - -import io "io" + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -97,179 +49,1199 @@ var _ = time.Kitchen // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *APIGroup) Reset() { *m = APIGroup{} } -func (*APIGroup) ProtoMessage() {} -func (*APIGroup) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *APIGroup) Reset() { *m = APIGroup{} } +func (*APIGroup) ProtoMessage() {} +func (*APIGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{0} +} +func (m *APIGroup) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIGroup.Merge(m, src) +} +func (m *APIGroup) XXX_Size() int { + return m.Size() +} +func (m *APIGroup) XXX_DiscardUnknown() { + xxx_messageInfo_APIGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_APIGroup proto.InternalMessageInfo + +func (m *APIGroupList) Reset() { *m = APIGroupList{} } +func (*APIGroupList) ProtoMessage() {} +func (*APIGroupList) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{1} +} +func (m *APIGroupList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIGroupList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIGroupList) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIGroupList.Merge(m, src) +} +func (m *APIGroupList) XXX_Size() int { + return m.Size() +} +func (m *APIGroupList) XXX_DiscardUnknown() { + xxx_messageInfo_APIGroupList.DiscardUnknown(m) +} + +var xxx_messageInfo_APIGroupList proto.InternalMessageInfo + +func (m *APIResource) Reset() { *m = APIResource{} } +func (*APIResource) ProtoMessage() {} +func (*APIResource) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{2} +} +func (m *APIResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIResource.Merge(m, src) +} +func (m *APIResource) XXX_Size() int { + return m.Size() +} +func (m *APIResource) XXX_DiscardUnknown() { + xxx_messageInfo_APIResource.DiscardUnknown(m) +} + +var xxx_messageInfo_APIResource proto.InternalMessageInfo + +func (m *APIResourceList) Reset() { *m = APIResourceList{} } +func (*APIResourceList) ProtoMessage() {} +func (*APIResourceList) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{3} +} +func (m *APIResourceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIResourceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIResourceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIResourceList.Merge(m, src) +} +func (m *APIResourceList) XXX_Size() int { + return m.Size() +} +func (m *APIResourceList) XXX_DiscardUnknown() { + xxx_messageInfo_APIResourceList.DiscardUnknown(m) +} + +var xxx_messageInfo_APIResourceList proto.InternalMessageInfo + +func (m *APIVersions) Reset() { *m = APIVersions{} } +func (*APIVersions) ProtoMessage() {} +func (*APIVersions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{4} +} +func (m *APIVersions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIVersions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIVersions) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIVersions.Merge(m, src) +} +func (m *APIVersions) XXX_Size() int { + return m.Size() +} +func (m *APIVersions) XXX_DiscardUnknown() { + xxx_messageInfo_APIVersions.DiscardUnknown(m) +} + +var xxx_messageInfo_APIVersions proto.InternalMessageInfo -func (m *APIGroupList) Reset() { *m = APIGroupList{} } -func (*APIGroupList) ProtoMessage() {} -func (*APIGroupList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *CreateOptions) Reset() { *m = CreateOptions{} } +func (*CreateOptions) ProtoMessage() {} +func (*CreateOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{5} +} +func (m *CreateOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CreateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CreateOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateOptions.Merge(m, src) +} +func (m *CreateOptions) XXX_Size() int { + return m.Size() +} +func (m *CreateOptions) XXX_DiscardUnknown() { + xxx_messageInfo_CreateOptions.DiscardUnknown(m) +} -func (m *APIResource) Reset() { *m = APIResource{} } -func (*APIResource) ProtoMessage() {} -func (*APIResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_CreateOptions proto.InternalMessageInfo -func (m *APIResourceList) Reset() { *m = APIResourceList{} } -func (*APIResourceList) ProtoMessage() {} -func (*APIResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } +func (*DeleteOptions) ProtoMessage() {} +func (*DeleteOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{6} +} +func (m *DeleteOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeleteOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteOptions.Merge(m, src) +} +func (m *DeleteOptions) XXX_Size() int { + return m.Size() +} +func (m *DeleteOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteOptions.DiscardUnknown(m) +} -func (m *APIVersions) Reset() { *m = APIVersions{} } -func (*APIVersions) ProtoMessage() {} -func (*APIVersions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_DeleteOptions proto.InternalMessageInfo -func (m *CreateOptions) Reset() { *m = CreateOptions{} } -func (*CreateOptions) ProtoMessage() {} -func (*CreateOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *Duration) Reset() { *m = Duration{} } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{7} +} +func (m *Duration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(m, src) +} +func (m *Duration) XXX_Size() int { + return m.Size() +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo + +func (m *ExportOptions) Reset() { *m = ExportOptions{} } +func (*ExportOptions) ProtoMessage() {} +func (*ExportOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{8} +} +func (m *ExportOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExportOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExportOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportOptions.Merge(m, src) +} +func (m *ExportOptions) XXX_Size() int { + return m.Size() +} +func (m *ExportOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExportOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportOptions proto.InternalMessageInfo -func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } -func (*DeleteOptions) ProtoMessage() {} -func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *FieldsV1) Reset() { *m = FieldsV1{} } +func (*FieldsV1) ProtoMessage() {} +func (*FieldsV1) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{9} +} +func (m *FieldsV1) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldsV1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FieldsV1) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldsV1.Merge(m, src) +} +func (m *FieldsV1) XXX_Size() int { + return m.Size() +} +func (m *FieldsV1) XXX_DiscardUnknown() { + xxx_messageInfo_FieldsV1.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldsV1 proto.InternalMessageInfo + +func (m *GetOptions) Reset() { *m = GetOptions{} } +func (*GetOptions) ProtoMessage() {} +func (*GetOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{10} +} +func (m *GetOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GetOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetOptions.Merge(m, src) +} +func (m *GetOptions) XXX_Size() int { + return m.Size() +} +func (m *GetOptions) XXX_DiscardUnknown() { + xxx_messageInfo_GetOptions.DiscardUnknown(m) +} -func (m *Duration) Reset() { *m = Duration{} } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_GetOptions proto.InternalMessageInfo -func (m *ExportOptions) Reset() { *m = ExportOptions{} } -func (*ExportOptions) ProtoMessage() {} -func (*ExportOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *GroupKind) Reset() { *m = GroupKind{} } +func (*GroupKind) ProtoMessage() {} +func (*GroupKind) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{11} +} +func (m *GroupKind) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupKind) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupKind.Merge(m, src) +} +func (m *GroupKind) XXX_Size() int { + return m.Size() +} +func (m *GroupKind) XXX_DiscardUnknown() { + xxx_messageInfo_GroupKind.DiscardUnknown(m) +} -func (m *Fields) Reset() { *m = Fields{} } -func (*Fields) ProtoMessage() {} -func (*Fields) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_GroupKind proto.InternalMessageInfo -func (m *GetOptions) Reset() { *m = GetOptions{} } -func (*GetOptions) ProtoMessage() {} -func (*GetOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *GroupResource) Reset() { *m = GroupResource{} } +func (*GroupResource) ProtoMessage() {} +func (*GroupResource) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{12} +} +func (m *GroupResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupResource.Merge(m, src) +} +func (m *GroupResource) XXX_Size() int { + return m.Size() +} +func (m *GroupResource) XXX_DiscardUnknown() { + xxx_messageInfo_GroupResource.DiscardUnknown(m) +} -func (m *GroupKind) Reset() { *m = GroupKind{} } -func (*GroupKind) ProtoMessage() {} -func (*GroupKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_GroupResource proto.InternalMessageInfo -func (m *GroupResource) Reset() { *m = GroupResource{} } -func (*GroupResource) ProtoMessage() {} -func (*GroupResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *GroupVersion) Reset() { *m = GroupVersion{} } +func (*GroupVersion) ProtoMessage() {} +func (*GroupVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{13} +} +func (m *GroupVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersion.Merge(m, src) +} +func (m *GroupVersion) XXX_Size() int { + return m.Size() +} +func (m *GroupVersion) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersion.DiscardUnknown(m) +} -func (m *GroupVersion) Reset() { *m = GroupVersion{} } -func (*GroupVersion) ProtoMessage() {} -func (*GroupVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_GroupVersion proto.InternalMessageInfo func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } func (*GroupVersionForDiscovery) ProtoMessage() {} func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{14} + return fileDescriptor_cf52fa777ced5367, []int{14} +} +func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersionForDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersionForDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersionForDiscovery.Merge(m, src) +} +func (m *GroupVersionForDiscovery) XXX_Size() int { + return m.Size() +} +func (m *GroupVersionForDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersionForDiscovery.DiscardUnknown(m) } -func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } -func (*GroupVersionKind) ProtoMessage() {} -func (*GroupVersionKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_GroupVersionForDiscovery proto.InternalMessageInfo -func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } -func (*GroupVersionResource) ProtoMessage() {} -func (*GroupVersionResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } +func (*GroupVersionKind) ProtoMessage() {} +func (*GroupVersionKind) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{15} +} +func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersionKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersionKind) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersionKind.Merge(m, src) +} +func (m *GroupVersionKind) XXX_Size() int { + return m.Size() +} +func (m *GroupVersionKind) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersionKind.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupVersionKind proto.InternalMessageInfo + +func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } +func (*GroupVersionResource) ProtoMessage() {} +func (*GroupVersionResource) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{16} +} +func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersionResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersionResource.Merge(m, src) +} +func (m *GroupVersionResource) XXX_Size() int { + return m.Size() +} +func (m *GroupVersionResource) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersionResource.DiscardUnknown(m) +} -func (m *Initializer) Reset() { *m = Initializer{} } -func (*Initializer) ProtoMessage() {} -func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo -func (m *Initializers) Reset() { *m = Initializers{} } -func (*Initializers) ProtoMessage() {} -func (*Initializers) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (m *LabelSelector) Reset() { *m = LabelSelector{} } +func (*LabelSelector) ProtoMessage() {} +func (*LabelSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{17} +} +func (m *LabelSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LabelSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelSelector.Merge(m, src) +} +func (m *LabelSelector) XXX_Size() int { + return m.Size() +} +func (m *LabelSelector) XXX_DiscardUnknown() { + xxx_messageInfo_LabelSelector.DiscardUnknown(m) +} -func (m *LabelSelector) Reset() { *m = LabelSelector{} } -func (*LabelSelector) ProtoMessage() {} -func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +var xxx_messageInfo_LabelSelector proto.InternalMessageInfo func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } func (*LabelSelectorRequirement) ProtoMessage() {} func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} + return fileDescriptor_cf52fa777ced5367, []int{18} +} +func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LabelSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelSelectorRequirement.Merge(m, src) +} +func (m *LabelSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *LabelSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_LabelSelectorRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo + +func (m *List) Reset() { *m = List{} } +func (*List) ProtoMessage() {} +func (*List) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{19} +} +func (m *List) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *List) XXX_Merge(src proto.Message) { + xxx_messageInfo_List.Merge(m, src) +} +func (m *List) XXX_Size() int { + return m.Size() +} +func (m *List) XXX_DiscardUnknown() { + xxx_messageInfo_List.DiscardUnknown(m) +} + +var xxx_messageInfo_List proto.InternalMessageInfo + +func (m *ListMeta) Reset() { *m = ListMeta{} } +func (*ListMeta) ProtoMessage() {} +func (*ListMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{20} +} +func (m *ListMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ListMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMeta.Merge(m, src) +} +func (m *ListMeta) XXX_Size() int { + return m.Size() +} +func (m *ListMeta) XXX_DiscardUnknown() { + xxx_messageInfo_ListMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMeta proto.InternalMessageInfo + +func (m *ListOptions) Reset() { *m = ListOptions{} } +func (*ListOptions) ProtoMessage() {} +func (*ListOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{21} +} +func (m *ListOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ListOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListOptions.Merge(m, src) } +func (m *ListOptions) XXX_Size() int { + return m.Size() +} +func (m *ListOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ListOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ListOptions proto.InternalMessageInfo + +func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} } +func (*ManagedFieldsEntry) ProtoMessage() {} +func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{22} +} +func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ManagedFieldsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ManagedFieldsEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ManagedFieldsEntry.Merge(m, src) +} +func (m *ManagedFieldsEntry) XXX_Size() int { + return m.Size() +} +func (m *ManagedFieldsEntry) XXX_DiscardUnknown() { + xxx_messageInfo_ManagedFieldsEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_ManagedFieldsEntry proto.InternalMessageInfo -func (m *List) Reset() { *m = List{} } -func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (m *MicroTime) Reset() { *m = MicroTime{} } +func (*MicroTime) ProtoMessage() {} +func (*MicroTime) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{23} +} +func (m *MicroTime) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MicroTime.Unmarshal(m, b) +} +func (m *MicroTime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MicroTime.Marshal(b, m, deterministic) +} +func (m *MicroTime) XXX_Merge(src proto.Message) { + xxx_messageInfo_MicroTime.Merge(m, src) +} +func (m *MicroTime) XXX_Size() int { + return xxx_messageInfo_MicroTime.Size(m) +} +func (m *MicroTime) XXX_DiscardUnknown() { + xxx_messageInfo_MicroTime.DiscardUnknown(m) +} + +var xxx_messageInfo_MicroTime proto.InternalMessageInfo -func (m *ListMeta) Reset() { *m = ListMeta{} } -func (*ListMeta) ProtoMessage() {} -func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } +func (*ObjectMeta) ProtoMessage() {} +func (*ObjectMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{24} +} +func (m *ObjectMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMeta.Merge(m, src) +} +func (m *ObjectMeta) XXX_Size() int { + return m.Size() +} +func (m *ObjectMeta) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo + +func (m *OwnerReference) Reset() { *m = OwnerReference{} } +func (*OwnerReference) ProtoMessage() {} +func (*OwnerReference) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{25} +} +func (m *OwnerReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OwnerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OwnerReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_OwnerReference.Merge(m, src) +} +func (m *OwnerReference) XXX_Size() int { + return m.Size() +} +func (m *OwnerReference) XXX_DiscardUnknown() { + xxx_messageInfo_OwnerReference.DiscardUnknown(m) +} + +var xxx_messageInfo_OwnerReference proto.InternalMessageInfo + +func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } +func (*PartialObjectMetadata) ProtoMessage() {} +func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{26} +} +func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartialObjectMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PartialObjectMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialObjectMetadata.Merge(m, src) +} +func (m *PartialObjectMetadata) XXX_Size() int { + return m.Size() +} +func (m *PartialObjectMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_PartialObjectMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo + +func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } +func (*PartialObjectMetadataList) ProtoMessage() {} +func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{27} +} +func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialObjectMetadataList.Merge(m, src) +} +func (m *PartialObjectMetadataList) XXX_Size() int { + return m.Size() +} +func (m *PartialObjectMetadataList) XXX_DiscardUnknown() { + xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m) +} -func (m *ListOptions) Reset() { *m = ListOptions{} } -func (*ListOptions) ProtoMessage() {} -func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo -func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} } -func (*ManagedFieldsEntry) ProtoMessage() {} -func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (m *Patch) Reset() { *m = Patch{} } +func (*Patch) ProtoMessage() {} +func (*Patch) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{28} +} +func (m *Patch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Patch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Patch) XXX_Merge(src proto.Message) { + xxx_messageInfo_Patch.Merge(m, src) +} +func (m *Patch) XXX_Size() int { + return m.Size() +} +func (m *Patch) XXX_DiscardUnknown() { + xxx_messageInfo_Patch.DiscardUnknown(m) +} -func (m *MicroTime) Reset() { *m = MicroTime{} } -func (*MicroTime) ProtoMessage() {} -func (*MicroTime) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +var xxx_messageInfo_Patch proto.InternalMessageInfo -func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } -func (*ObjectMeta) ProtoMessage() {} -func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (m *PatchOptions) Reset() { *m = PatchOptions{} } +func (*PatchOptions) ProtoMessage() {} +func (*PatchOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{29} +} +func (m *PatchOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PatchOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PatchOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PatchOptions.Merge(m, src) +} +func (m *PatchOptions) XXX_Size() int { + return m.Size() +} +func (m *PatchOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PatchOptions.DiscardUnknown(m) +} -func (m *OwnerReference) Reset() { *m = OwnerReference{} } -func (*OwnerReference) ProtoMessage() {} -func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +var xxx_messageInfo_PatchOptions proto.InternalMessageInfo -func (m *Patch) Reset() { *m = Patch{} } -func (*Patch) ProtoMessage() {} -func (*Patch) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (m *Preconditions) Reset() { *m = Preconditions{} } +func (*Preconditions) ProtoMessage() {} +func (*Preconditions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{30} +} +func (m *Preconditions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Preconditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Preconditions) XXX_Merge(src proto.Message) { + xxx_messageInfo_Preconditions.Merge(m, src) +} +func (m *Preconditions) XXX_Size() int { + return m.Size() +} +func (m *Preconditions) XXX_DiscardUnknown() { + xxx_messageInfo_Preconditions.DiscardUnknown(m) +} -func (m *PatchOptions) Reset() { *m = PatchOptions{} } -func (*PatchOptions) ProtoMessage() {} -func (*PatchOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +var xxx_messageInfo_Preconditions proto.InternalMessageInfo -func (m *Preconditions) Reset() { *m = Preconditions{} } -func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (m *RootPaths) Reset() { *m = RootPaths{} } +func (*RootPaths) ProtoMessage() {} +func (*RootPaths) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{31} +} +func (m *RootPaths) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RootPaths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RootPaths) XXX_Merge(src proto.Message) { + xxx_messageInfo_RootPaths.Merge(m, src) +} +func (m *RootPaths) XXX_Size() int { + return m.Size() +} +func (m *RootPaths) XXX_DiscardUnknown() { + xxx_messageInfo_RootPaths.DiscardUnknown(m) +} -func (m *RootPaths) Reset() { *m = RootPaths{} } -func (*RootPaths) ProtoMessage() {} -func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +var xxx_messageInfo_RootPaths proto.InternalMessageInfo func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } func (*ServerAddressByClientCIDR) ProtoMessage() {} func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{32} + return fileDescriptor_cf52fa777ced5367, []int{32} +} +func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServerAddressByClientCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServerAddressByClientCIDR) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerAddressByClientCIDR.Merge(m, src) +} +func (m *ServerAddressByClientCIDR) XXX_Size() int { + return m.Size() +} +func (m *ServerAddressByClientCIDR) XXX_DiscardUnknown() { + xxx_messageInfo_ServerAddressByClientCIDR.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerAddressByClientCIDR proto.InternalMessageInfo + +func (m *Status) Reset() { *m = Status{} } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{33} +} +func (m *Status) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return m.Size() +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +func (m *StatusCause) Reset() { *m = StatusCause{} } +func (*StatusCause) ProtoMessage() {} +func (*StatusCause) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{34} +} +func (m *StatusCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatusCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusCause.Merge(m, src) +} +func (m *StatusCause) XXX_Size() int { + return m.Size() +} +func (m *StatusCause) XXX_DiscardUnknown() { + xxx_messageInfo_StatusCause.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusCause proto.InternalMessageInfo + +func (m *StatusDetails) Reset() { *m = StatusDetails{} } +func (*StatusDetails) ProtoMessage() {} +func (*StatusDetails) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{35} +} +func (m *StatusDetails) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatusDetails) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusDetails.Merge(m, src) +} +func (m *StatusDetails) XXX_Size() int { + return m.Size() +} +func (m *StatusDetails) XXX_DiscardUnknown() { + xxx_messageInfo_StatusDetails.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusDetails proto.InternalMessageInfo + +func (m *TableOptions) Reset() { *m = TableOptions{} } +func (*TableOptions) ProtoMessage() {} +func (*TableOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{36} +} +func (m *TableOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TableOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TableOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_TableOptions.Merge(m, src) +} +func (m *TableOptions) XXX_Size() int { + return m.Size() +} +func (m *TableOptions) XXX_DiscardUnknown() { + xxx_messageInfo_TableOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_TableOptions proto.InternalMessageInfo + +func (m *Time) Reset() { *m = Time{} } +func (*Time) ProtoMessage() {} +func (*Time) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{37} +} +func (m *Time) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Time.Unmarshal(m, b) +} +func (m *Time) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Time.Marshal(b, m, deterministic) +} +func (m *Time) XXX_Merge(src proto.Message) { + xxx_messageInfo_Time.Merge(m, src) +} +func (m *Time) XXX_Size() int { + return xxx_messageInfo_Time.Size(m) +} +func (m *Time) XXX_DiscardUnknown() { + xxx_messageInfo_Time.DiscardUnknown(m) +} + +var xxx_messageInfo_Time proto.InternalMessageInfo + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{38} +} +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(m, src) +} +func (m *Timestamp) XXX_Size() int { + return m.Size() +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) } -func (m *Status) Reset() { *m = Status{} } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +var xxx_messageInfo_Timestamp proto.InternalMessageInfo -func (m *StatusCause) Reset() { *m = StatusCause{} } -func (*StatusCause) ProtoMessage() {} -func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{39} +} +func (m *TypeMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypeMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeMeta.Merge(m, src) +} +func (m *TypeMeta) XXX_Size() int { + return m.Size() +} +func (m *TypeMeta) XXX_DiscardUnknown() { + xxx_messageInfo_TypeMeta.DiscardUnknown(m) +} -func (m *StatusDetails) Reset() { *m = StatusDetails{} } -func (*StatusDetails) ProtoMessage() {} -func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +var xxx_messageInfo_TypeMeta proto.InternalMessageInfo -func (m *Time) Reset() { *m = Time{} } -func (*Time) ProtoMessage() {} -func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } +func (*UpdateOptions) ProtoMessage() {} +func (*UpdateOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{40} +} +func (m *UpdateOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UpdateOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateOptions.Merge(m, src) +} +func (m *UpdateOptions) XXX_Size() int { + return m.Size() +} +func (m *UpdateOptions) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateOptions.DiscardUnknown(m) +} -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +var xxx_messageInfo_UpdateOptions proto.InternalMessageInfo -func (m *TypeMeta) Reset() { *m = TypeMeta{} } -func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (m *Verbs) Reset() { *m = Verbs{} } +func (*Verbs) ProtoMessage() {} +func (*Verbs) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{41} +} +func (m *Verbs) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Verbs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Verbs) XXX_Merge(src proto.Message) { + xxx_messageInfo_Verbs.Merge(m, src) +} +func (m *Verbs) XXX_Size() int { + return m.Size() +} +func (m *Verbs) XXX_DiscardUnknown() { + xxx_messageInfo_Verbs.DiscardUnknown(m) +} -func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } -func (*UpdateOptions) ProtoMessage() {} -func (*UpdateOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +var xxx_messageInfo_Verbs proto.InternalMessageInfo -func (m *Verbs) Reset() { *m = Verbs{} } -func (*Verbs) ProtoMessage() {} -func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (m *WatchEvent) Reset() { *m = WatchEvent{} } +func (*WatchEvent) ProtoMessage() {} +func (*WatchEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{42} +} +func (m *WatchEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WatchEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WatchEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_WatchEvent.Merge(m, src) +} +func (m *WatchEvent) XXX_Size() int { + return m.Size() +} +func (m *WatchEvent) XXX_DiscardUnknown() { + xxx_messageInfo_WatchEvent.DiscardUnknown(m) +} -func (m *WatchEvent) Reset() { *m = WatchEvent{} } -func (*WatchEvent) ProtoMessage() {} -func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +var xxx_messageInfo_WatchEvent proto.InternalMessageInfo func init() { proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup") @@ -281,7 +1253,7 @@ func init() { proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions") proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration") proto.RegisterType((*ExportOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ExportOptions") - proto.RegisterType((*Fields)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Fields") + proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1") proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions") proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind") proto.RegisterType((*GroupResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupResource") @@ -289,9 +1261,8 @@ func init() { proto.RegisterType((*GroupVersionForDiscovery)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery") proto.RegisterType((*GroupVersionKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind") proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource") - proto.RegisterType((*Initializer)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializer") - proto.RegisterType((*Initializers)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializers") proto.RegisterType((*LabelSelector)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector.MatchLabelsEntry") proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement") proto.RegisterType((*List)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.List") proto.RegisterType((*ListMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta") @@ -299,7 +1270,11 @@ func init() { proto.RegisterType((*ManagedFieldsEntry)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry") proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime") proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta.LabelsEntry") proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference") + proto.RegisterType((*PartialObjectMetadata)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata") + proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadataList") proto.RegisterType((*Patch)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Patch") proto.RegisterType((*PatchOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PatchOptions") proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions") @@ -308,6 +1283,7 @@ func init() { proto.RegisterType((*Status)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Status") proto.RegisterType((*StatusCause)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusCause") proto.RegisterType((*StatusDetails)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusDetails") + proto.RegisterType((*TableOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TableOptions") proto.RegisterType((*Time)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Time") proto.RegisterType((*Timestamp)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Timestamp") proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TypeMeta") @@ -315,10 +1291,189 @@ func init() { proto.RegisterType((*Verbs)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Verbs") proto.RegisterType((*WatchEvent)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.WatchEvent") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptor_cf52fa777ced5367) +} + +var fileDescriptor_cf52fa777ced5367 = []byte{ + // 2713 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x19, 0xcd, 0x6f, 0x1b, 0x59, + 0x3d, 0x63, 0xc7, 0x8e, 0xfd, 0x73, 0x9c, 0x8f, 0x97, 0x16, 0xdc, 0x00, 0x71, 0x76, 0x16, 0xad, + 0x52, 0xe8, 0x3a, 0x9b, 0x02, 0xab, 0xd2, 0x65, 0x0b, 0x71, 0x9c, 0x74, 0xc3, 0x36, 0x4d, 0xf4, + 0xd2, 0x16, 0x28, 0x15, 0xea, 0x64, 0xe6, 0xc5, 0x19, 0x32, 0x9e, 0xf1, 0xbe, 0x19, 0x27, 0x35, + 0x1c, 0xd8, 0x03, 0x08, 0x90, 0x60, 0xd5, 0x23, 0xe2, 0x80, 0xb6, 0x82, 0xbf, 0x80, 0x13, 0x7f, + 0x00, 0x12, 0xbd, 0x20, 0xad, 0xc4, 0x65, 0x25, 0x90, 0xb5, 0x0d, 0x07, 0x8e, 0x88, 0x6b, 0x4e, + 0xe8, 0x7d, 0xcd, 0x87, 0x1d, 0x37, 0x63, 0xba, 0xac, 0xf6, 0xe6, 0xf9, 0x7d, 0xff, 0xde, 0xfb, + 0xbd, 0xdf, 0x97, 0x61, 0xeb, 0xf0, 0x9a, 0x5f, 0xb3, 0xbd, 0xe5, 0xc3, 0xce, 0x1e, 0xa1, 0x2e, + 0x09, 0x88, 0xbf, 0x7c, 0x44, 0x5c, 0xcb, 0xa3, 0xcb, 0x12, 0x61, 0xb4, 0xed, 0x96, 0x61, 0x1e, + 0xd8, 0x2e, 0xa1, 0xdd, 0xe5, 0xf6, 0x61, 0x93, 0x01, 0xfc, 0xe5, 0x16, 0x09, 0x8c, 0xe5, 0xa3, + 0x95, 0xe5, 0x26, 0x71, 0x09, 0x35, 0x02, 0x62, 0xd5, 0xda, 0xd4, 0x0b, 0x3c, 0xf4, 0x45, 0xc1, + 0x55, 0x8b, 0x73, 0xd5, 0xda, 0x87, 0x4d, 0x06, 0xf0, 0x6b, 0x8c, 0xab, 0x76, 0xb4, 0x32, 0xff, + 0x6a, 0xd3, 0x0e, 0x0e, 0x3a, 0x7b, 0x35, 0xd3, 0x6b, 0x2d, 0x37, 0xbd, 0xa6, 0xb7, 0xcc, 0x99, + 0xf7, 0x3a, 0xfb, 0xfc, 0x8b, 0x7f, 0xf0, 0x5f, 0x42, 0xe8, 0xfc, 0x50, 0x53, 0x68, 0xc7, 0x0d, + 0xec, 0x16, 0xe9, 0xb7, 0x62, 0xfe, 0xf5, 0xf3, 0x18, 0x7c, 0xf3, 0x80, 0xb4, 0x8c, 0x7e, 0x3e, + 0xfd, 0x2f, 0x59, 0x28, 0xac, 0xee, 0x6c, 0xde, 0xa4, 0x5e, 0xa7, 0x8d, 0x16, 0x61, 0xdc, 0x35, + 0x5a, 0xa4, 0xa2, 0x2d, 0x6a, 0x4b, 0xc5, 0xfa, 0xe4, 0xd3, 0x5e, 0x75, 0xec, 0xa4, 0x57, 0x1d, + 0xbf, 0x6d, 0xb4, 0x08, 0xe6, 0x18, 0xe4, 0x40, 0xe1, 0x88, 0x50, 0xdf, 0xf6, 0x5c, 0xbf, 0x92, + 0x59, 0xcc, 0x2e, 0x95, 0xae, 0xde, 0xa8, 0xa5, 0xf1, 0xbf, 0xc6, 0x15, 0xdc, 0x13, 0xac, 0x1b, + 0x1e, 0x6d, 0xd8, 0xbe, 0xe9, 0x1d, 0x11, 0xda, 0xad, 0xcf, 0x48, 0x2d, 0x05, 0x89, 0xf4, 0x71, + 0xa8, 0x01, 0xfd, 0x54, 0x83, 0x99, 0x36, 0x25, 0xfb, 0x84, 0x52, 0x62, 0x49, 0x7c, 0x25, 0xbb, + 0xa8, 0x7d, 0x0c, 0x6a, 0x2b, 0x52, 0xed, 0xcc, 0x4e, 0x9f, 0x7c, 0x3c, 0xa0, 0x11, 0xfd, 0x5e, + 0x83, 0x79, 0x9f, 0xd0, 0x23, 0x42, 0x57, 0x2d, 0x8b, 0x12, 0xdf, 0xaf, 0x77, 0xd7, 0x1c, 0x9b, + 0xb8, 0xc1, 0xda, 0x66, 0x03, 0xfb, 0x95, 0x71, 0x7e, 0x0e, 0xdf, 0x4c, 0x67, 0xd0, 0xee, 0x30, + 0x39, 0x75, 0x5d, 0x5a, 0x34, 0x3f, 0x94, 0xc4, 0xc7, 0xcf, 0x31, 0x43, 0xdf, 0x87, 0x49, 0x75, + 0x91, 0xb7, 0x6c, 0x3f, 0x40, 0xf7, 0x20, 0xdf, 0x64, 0x1f, 0x7e, 0x45, 0xe3, 0x06, 0xd6, 0xd2, + 0x19, 0xa8, 0x64, 0xd4, 0xa7, 0xa4, 0x3d, 0x79, 0xfe, 0xe9, 0x63, 0x29, 0x4d, 0xff, 0xe5, 0x38, + 0x94, 0x56, 0x77, 0x36, 0x31, 0xf1, 0xbd, 0x0e, 0x35, 0x49, 0x8a, 0xa0, 0xb9, 0x06, 0x93, 0xbe, + 0xed, 0x36, 0x3b, 0x8e, 0x41, 0x19, 0xb4, 0x92, 0xe7, 0x94, 0x17, 0x24, 0xe5, 0xe4, 0x6e, 0x0c, + 0x87, 0x13, 0x94, 0xe8, 0x2a, 0x00, 0x93, 0xe0, 0xb7, 0x0d, 0x93, 0x58, 0x95, 0xcc, 0xa2, 0xb6, + 0x54, 0xa8, 0x23, 0xc9, 0x07, 0xb7, 0x43, 0x0c, 0x8e, 0x51, 0xa1, 0x97, 0x21, 0xc7, 0x2d, 0xad, + 0x14, 0xb8, 0x9a, 0xb2, 0x24, 0xcf, 0x71, 0x37, 0xb0, 0xc0, 0xa1, 0xcb, 0x30, 0x21, 0xa3, 0xac, + 0x52, 0xe4, 0x64, 0xd3, 0x92, 0x6c, 0x42, 0x85, 0x81, 0xc2, 0x33, 0xff, 0x0e, 0x6d, 0xd7, 0xe2, + 0x71, 0x17, 0xf3, 0xef, 0x6d, 0xdb, 0xb5, 0x30, 0xc7, 0xa0, 0x5b, 0x90, 0x3b, 0x22, 0x74, 0x8f, + 0x45, 0x02, 0x0b, 0xcd, 0x2f, 0xa7, 0x3b, 0xe8, 0x7b, 0x8c, 0xa5, 0x5e, 0x64, 0xa6, 0xf1, 0x9f, + 0x58, 0x08, 0x41, 0x35, 0x00, 0xff, 0xc0, 0xa3, 0x01, 0x77, 0xaf, 0x92, 0x5b, 0xcc, 0x2e, 0x15, + 0xeb, 0x53, 0xcc, 0xdf, 0xdd, 0x10, 0x8a, 0x63, 0x14, 0x8c, 0xde, 0x34, 0x02, 0xd2, 0xf4, 0xa8, + 0x4d, 0xfc, 0xca, 0x44, 0x44, 0xbf, 0x16, 0x42, 0x71, 0x8c, 0x02, 0x7d, 0x1b, 0x90, 0x1f, 0x78, + 0xd4, 0x68, 0x12, 0xe9, 0xea, 0x5b, 0x86, 0x7f, 0x50, 0x01, 0xee, 0xdd, 0xbc, 0xf4, 0x0e, 0xed, + 0x0e, 0x50, 0xe0, 0x33, 0xb8, 0xf4, 0x3f, 0x6a, 0x30, 0x1d, 0x8b, 0x05, 0x1e, 0x77, 0xd7, 0x60, + 0xb2, 0x19, 0x7b, 0x75, 0x32, 0x2e, 0xc2, 0xdb, 0x8e, 0xbf, 0x48, 0x9c, 0xa0, 0x44, 0x04, 0x8a, + 0x54, 0x4a, 0x52, 0xd9, 0x65, 0x25, 0x75, 0xd0, 0x2a, 0x1b, 0x22, 0x4d, 0x31, 0xa0, 0x8f, 0x23, + 0xc9, 0xfa, 0xbf, 0x34, 0x1e, 0xc0, 0x2a, 0xdf, 0xa0, 0xa5, 0x58, 0x4e, 0xd3, 0xf8, 0xf1, 0x4d, + 0x0e, 0xc9, 0x47, 0xe7, 0x24, 0x82, 0xcc, 0xa7, 0x22, 0x11, 0x5c, 0x2f, 0xfc, 0xe6, 0xfd, 0xea, + 0xd8, 0xbb, 0xff, 0x58, 0x1c, 0xd3, 0x5b, 0x50, 0x5e, 0xa3, 0xc4, 0x08, 0xc8, 0x76, 0x3b, 0xe0, + 0x0e, 0xe8, 0x90, 0xb7, 0x68, 0x17, 0x77, 0x5c, 0xe9, 0x28, 0xb0, 0xf7, 0xdd, 0xe0, 0x10, 0x2c, + 0x31, 0xec, 0xfe, 0xf6, 0x6d, 0xe2, 0x58, 0x5b, 0x86, 0x6b, 0x34, 0x09, 0x95, 0x71, 0x1f, 0x9e, + 0xea, 0x46, 0x0c, 0x87, 0x13, 0x94, 0xfa, 0xcf, 0xb3, 0x50, 0x6e, 0x10, 0x87, 0x44, 0xfa, 0x36, + 0x00, 0x35, 0xa9, 0x61, 0x92, 0x1d, 0x42, 0x6d, 0xcf, 0xda, 0x25, 0xa6, 0xe7, 0x5a, 0x3e, 0x8f, + 0x88, 0x6c, 0xfd, 0x33, 0x2c, 0xce, 0x6e, 0x0e, 0x60, 0xf1, 0x19, 0x1c, 0xc8, 0x81, 0x72, 0x9b, + 0xf2, 0xdf, 0x76, 0x20, 0x6b, 0x0f, 0x7b, 0x69, 0x5f, 0x49, 0x77, 0xd4, 0x3b, 0x71, 0xd6, 0xfa, + 0xec, 0x49, 0xaf, 0x5a, 0x4e, 0x80, 0x70, 0x52, 0x38, 0xfa, 0x16, 0xcc, 0x78, 0xb4, 0x7d, 0x60, + 0xb8, 0x0d, 0xd2, 0x26, 0xae, 0x45, 0xdc, 0xc0, 0xe7, 0xa7, 0x50, 0xa8, 0x5f, 0x60, 0x15, 0x63, + 0xbb, 0x0f, 0x87, 0x07, 0xa8, 0xd1, 0x7d, 0x98, 0x6d, 0x53, 0xaf, 0x6d, 0x34, 0x0d, 0x26, 0x71, + 0xc7, 0x73, 0x6c, 0xb3, 0xcb, 0xb3, 0x43, 0xb1, 0x7e, 0xe5, 0xa4, 0x57, 0x9d, 0xdd, 0xe9, 0x47, + 0x9e, 0xf6, 0xaa, 0x73, 0xfc, 0xe8, 0x18, 0x24, 0x42, 0xe2, 0x41, 0x31, 0xb1, 0x3b, 0xcc, 0x0d, + 0xbb, 0x43, 0x7d, 0x13, 0x0a, 0x8d, 0x0e, 0xe5, 0x5c, 0xe8, 0x4d, 0x28, 0x58, 0xf2, 0xb7, 0x3c, + 0xf9, 0x97, 0x54, 0xc9, 0x55, 0x34, 0xa7, 0xbd, 0x6a, 0x99, 0x35, 0x09, 0x35, 0x05, 0xc0, 0x21, + 0x8b, 0xfe, 0x00, 0xca, 0xeb, 0x8f, 0xda, 0x1e, 0x0d, 0xd4, 0x9d, 0xbe, 0x02, 0x79, 0xc2, 0x01, + 0x5c, 0x5a, 0x21, 0xaa, 0x13, 0x82, 0x0c, 0x4b, 0x2c, 0xcb, 0xc3, 0xe4, 0x91, 0x61, 0x06, 0x32, + 0x6d, 0x87, 0x79, 0x78, 0x9d, 0x01, 0xb1, 0xc0, 0xe9, 0x9f, 0x87, 0x02, 0x0f, 0x28, 0xff, 0xde, + 0x0a, 0x9a, 0x81, 0x2c, 0x36, 0x8e, 0xb9, 0xd4, 0x49, 0x9c, 0xa5, 0xc6, 0xb1, 0xbe, 0x0d, 0x70, + 0x93, 0x84, 0x8a, 0x57, 0x61, 0x5a, 0x3d, 0xe2, 0x64, 0x6e, 0xf9, 0xac, 0x14, 0x3d, 0x8d, 0x93, + 0x68, 0xdc, 0x4f, 0xaf, 0x3f, 0x80, 0x22, 0xcf, 0x3f, 0x2c, 0x79, 0x47, 0x85, 0x42, 0x7b, 0x4e, + 0xa1, 0x50, 0xd9, 0x3f, 0x33, 0x2c, 0xfb, 0xc7, 0x9e, 0x9b, 0x03, 0x65, 0xc1, 0xab, 0x4a, 0x63, + 0x2a, 0x0d, 0x57, 0xa0, 0xa0, 0xcc, 0x94, 0x5a, 0xc2, 0x96, 0x48, 0x09, 0xc2, 0x21, 0x45, 0x4c, + 0xdb, 0x01, 0x24, 0x72, 0x69, 0x3a, 0x65, 0xb1, 0xba, 0x97, 0x79, 0x7e, 0xdd, 0x8b, 0x69, 0xfa, + 0x09, 0x54, 0x86, 0xf5, 0x51, 0x2f, 0x90, 0xed, 0xd3, 0x9b, 0xa2, 0xbf, 0xa7, 0xc1, 0x4c, 0x5c, + 0x52, 0xfa, 0xeb, 0x4b, 0xaf, 0xe4, 0xfc, 0x3a, 0x1f, 0x3b, 0x91, 0xdf, 0x69, 0x70, 0x21, 0xe1, + 0xda, 0x48, 0x37, 0x3e, 0x82, 0x51, 0xf1, 0xe0, 0xc8, 0x8e, 0x10, 0x1c, 0x7f, 0xcb, 0x40, 0xf9, + 0x96, 0xb1, 0x47, 0x9c, 0x5d, 0xe2, 0x10, 0x33, 0xf0, 0x28, 0xfa, 0x31, 0x94, 0x5a, 0x46, 0x60, + 0x1e, 0x70, 0xa8, 0xea, 0x09, 0x1b, 0xe9, 0x12, 0x68, 0x42, 0x52, 0x6d, 0x2b, 0x12, 0xb3, 0xee, + 0x06, 0xb4, 0x5b, 0x9f, 0x93, 0x26, 0x95, 0x62, 0x18, 0x1c, 0xd7, 0xc6, 0x1b, 0x79, 0xfe, 0xbd, + 0xfe, 0xa8, 0xcd, 0x0a, 0xd6, 0xe8, 0xf3, 0x43, 0xc2, 0x04, 0x4c, 0xde, 0xe9, 0xd8, 0x94, 0xb4, + 0x88, 0x1b, 0x44, 0x8d, 0xfc, 0x56, 0x9f, 0x7c, 0x3c, 0xa0, 0x71, 0xfe, 0x06, 0xcc, 0xf4, 0x1b, + 0xcf, 0xb2, 0xce, 0x21, 0xe9, 0x8a, 0xfb, 0xc2, 0xec, 0x27, 0xba, 0x00, 0xb9, 0x23, 0xc3, 0xe9, + 0xc8, 0xd7, 0x88, 0xc5, 0xc7, 0xf5, 0xcc, 0x35, 0x4d, 0xff, 0x83, 0x06, 0x95, 0x61, 0x86, 0xa0, + 0x2f, 0xc4, 0x04, 0xd5, 0x4b, 0xd2, 0xaa, 0xec, 0xdb, 0xa4, 0x2b, 0xa4, 0xae, 0x43, 0xc1, 0x6b, + 0xb3, 0xd1, 0xcb, 0xa3, 0xf2, 0xd6, 0x2f, 0xab, 0x9b, 0xdc, 0x96, 0xf0, 0xd3, 0x5e, 0xf5, 0x62, + 0x42, 0xbc, 0x42, 0xe0, 0x90, 0x95, 0x65, 0x7f, 0x6e, 0x0f, 0xab, 0x48, 0x61, 0xf6, 0xbf, 0xc7, + 0x21, 0x58, 0x62, 0xf4, 0x3f, 0x69, 0x30, 0xce, 0x5b, 0xb1, 0x07, 0x50, 0x60, 0xe7, 0x67, 0x19, + 0x81, 0xc1, 0xed, 0x4a, 0x3d, 0x04, 0x30, 0xee, 0x2d, 0x12, 0x18, 0x51, 0xb4, 0x29, 0x08, 0x0e, + 0x25, 0x22, 0x0c, 0x39, 0x3b, 0x20, 0x2d, 0x75, 0x91, 0xaf, 0x0e, 0x15, 0x2d, 0x47, 0xd0, 0x1a, + 0x36, 0x8e, 0xd7, 0x1f, 0x05, 0xc4, 0x65, 0x97, 0x11, 0x3d, 0x8d, 0x4d, 0x26, 0x03, 0x0b, 0x51, + 0xfa, 0x7f, 0x34, 0x08, 0x55, 0xb1, 0xe0, 0xf7, 0x89, 0xb3, 0x7f, 0xcb, 0x76, 0x0f, 0xe5, 0xb1, + 0x86, 0xe6, 0xec, 0x4a, 0x38, 0x0e, 0x29, 0xce, 0x2a, 0x0f, 0x99, 0xd1, 0xca, 0x03, 0x53, 0x68, + 0x7a, 0x6e, 0x60, 0xbb, 0x9d, 0x81, 0xd7, 0xb6, 0x26, 0xe1, 0x38, 0xa4, 0x60, 0xcd, 0x0d, 0x25, + 0x2d, 0xc3, 0x76, 0x6d, 0xb7, 0xc9, 0x9c, 0x58, 0xf3, 0x3a, 0x6e, 0xc0, 0xab, 0xbc, 0x6c, 0x6e, + 0xf0, 0x00, 0x16, 0x9f, 0xc1, 0xa1, 0xff, 0x35, 0x0b, 0x25, 0xe6, 0xb3, 0xaa, 0x73, 0x6f, 0x40, + 0xd9, 0x89, 0x47, 0x81, 0xf4, 0xfd, 0xa2, 0x34, 0x25, 0xf9, 0xae, 0x71, 0x92, 0x96, 0x31, 0xf3, + 0x9e, 0x2c, 0x64, 0xce, 0x24, 0x99, 0x37, 0xe2, 0x48, 0x9c, 0xa4, 0x65, 0xd9, 0xeb, 0x98, 0xbd, + 0x0f, 0xd9, 0xed, 0x84, 0x57, 0xf4, 0x1d, 0x06, 0xc4, 0x02, 0x87, 0xb6, 0x60, 0xce, 0x70, 0x1c, + 0xef, 0x98, 0x03, 0xeb, 0x9e, 0x77, 0xd8, 0x32, 0xe8, 0xa1, 0xcf, 0xc7, 0xa8, 0x42, 0xfd, 0x73, + 0x92, 0x65, 0x6e, 0x75, 0x90, 0x04, 0x9f, 0xc5, 0x77, 0xd6, 0xb5, 0x8d, 0x8f, 0x78, 0x6d, 0xd7, + 0x61, 0x8a, 0xc5, 0x97, 0xd7, 0x09, 0x54, 0x87, 0x99, 0xe3, 0x97, 0x80, 0x4e, 0x7a, 0xd5, 0xa9, + 0x3b, 0x09, 0x0c, 0xee, 0xa3, 0x64, 0x2e, 0x3b, 0x76, 0xcb, 0x0e, 0x2a, 0x13, 0x9c, 0x25, 0x74, + 0xf9, 0x16, 0x03, 0x62, 0x81, 0x4b, 0xc4, 0x45, 0xe1, 0xbc, 0xb8, 0xd0, 0x7f, 0x9b, 0x05, 0x24, + 0x5a, 0x62, 0x4b, 0xf4, 0x36, 0x22, 0xd1, 0x5c, 0x86, 0x89, 0x96, 0x6c, 0xa9, 0xb5, 0x64, 0xd6, + 0x57, 0xdd, 0xb4, 0xc2, 0xa3, 0x2d, 0x28, 0x8a, 0x07, 0x1f, 0x05, 0xf1, 0xb2, 0x24, 0x2e, 0x6e, + 0x2b, 0xc4, 0x69, 0xaf, 0x3a, 0x9f, 0x50, 0x13, 0x62, 0xee, 0x74, 0xdb, 0x04, 0x47, 0x12, 0xd8, + 0x14, 0x6d, 0xb4, 0xed, 0xf8, 0xfe, 0xa4, 0x18, 0x4d, 0xd1, 0xd1, 0x24, 0x84, 0x63, 0x54, 0xe8, + 0x2d, 0x18, 0x67, 0x27, 0x25, 0x47, 0xda, 0x2f, 0xa5, 0x4b, 0x1b, 0xec, 0xac, 0xeb, 0x05, 0x56, + 0x35, 0xd9, 0x2f, 0xcc, 0x25, 0x30, 0xed, 0x3c, 0xca, 0x7c, 0x66, 0x96, 0x9c, 0xfd, 0x43, 0xed, + 0x1b, 0x21, 0x06, 0xc7, 0xa8, 0xd0, 0x77, 0xa1, 0xb0, 0x2f, 0xdb, 0x42, 0x7e, 0x31, 0xa9, 0x13, + 0x97, 0x6a, 0x26, 0xc5, 0x08, 0xa7, 0xbe, 0x70, 0x28, 0x4d, 0x7f, 0x07, 0x8a, 0x5b, 0xb6, 0x49, + 0x3d, 0x66, 0x20, 0xbb, 0x12, 0x3f, 0x31, 0x93, 0x84, 0x57, 0xa2, 0xc2, 0x45, 0xe1, 0x59, 0x9c, + 0xb8, 0x86, 0xeb, 0x89, 0xc9, 0x23, 0x17, 0xc5, 0xc9, 0x6d, 0x06, 0xc4, 0x02, 0x77, 0xfd, 0x02, + 0xab, 0xbf, 0xbf, 0x78, 0x52, 0x1d, 0x7b, 0xfc, 0xa4, 0x3a, 0xf6, 0xfe, 0x13, 0x59, 0x8b, 0x4f, + 0x01, 0x60, 0x7b, 0xef, 0x87, 0xc4, 0x14, 0x59, 0x2d, 0xd5, 0xbe, 0x44, 0xad, 0xe9, 0xf8, 0xbe, + 0x24, 0xd3, 0xd7, 0x53, 0xc5, 0x70, 0x38, 0x41, 0x89, 0x96, 0xa1, 0x18, 0x6e, 0x42, 0xe4, 0x45, + 0xcf, 0xaa, 0xc0, 0x09, 0xd7, 0x25, 0x38, 0xa2, 0x49, 0xa4, 0xd8, 0xf1, 0x73, 0x53, 0x6c, 0x1d, + 0xb2, 0x1d, 0xdb, 0xe2, 0xaf, 0xab, 0x58, 0x7f, 0x4d, 0x95, 0xb8, 0xbb, 0x9b, 0x8d, 0xd3, 0x5e, + 0xf5, 0xa5, 0x61, 0x0b, 0xc8, 0xa0, 0xdb, 0x26, 0x7e, 0xed, 0xee, 0x66, 0x03, 0x33, 0xe6, 0xb3, + 0xde, 0x7b, 0x7e, 0xc4, 0xf7, 0x7e, 0x15, 0x40, 0x7a, 0xcd, 0xb8, 0xc5, 0xc3, 0x0d, 0x23, 0xea, + 0x66, 0x88, 0xc1, 0x31, 0x2a, 0xe4, 0xc3, 0xac, 0xc9, 0x46, 0x61, 0xf6, 0x3c, 0xec, 0x16, 0xf1, + 0x03, 0xa3, 0x25, 0x36, 0x44, 0xa3, 0x05, 0xf7, 0x25, 0xa9, 0x66, 0x76, 0xad, 0x5f, 0x18, 0x1e, + 0x94, 0x8f, 0x3c, 0x98, 0xb5, 0xe4, 0x50, 0x17, 0x29, 0x2d, 0x8e, 0xac, 0xf4, 0x22, 0x53, 0xd8, + 0xe8, 0x17, 0x84, 0x07, 0x65, 0xa3, 0x1f, 0xc0, 0xbc, 0x02, 0x0e, 0x4e, 0xd6, 0x7c, 0xc7, 0x93, + 0xad, 0x2f, 0x9c, 0xf4, 0xaa, 0xf3, 0x8d, 0xa1, 0x54, 0xf8, 0x39, 0x12, 0x90, 0x05, 0x79, 0x47, + 0xf4, 0x8f, 0x25, 0x5e, 0xf3, 0xbf, 0x91, 0xce, 0x8b, 0x28, 0xfa, 0x6b, 0xf1, 0xbe, 0x31, 0x9c, + 0x1c, 0x65, 0xcb, 0x28, 0x65, 0xa3, 0x47, 0x50, 0x32, 0x5c, 0xd7, 0x0b, 0x0c, 0x31, 0xeb, 0x4f, + 0x72, 0x55, 0xab, 0x23, 0xab, 0x5a, 0x8d, 0x64, 0xf4, 0xf5, 0xa9, 0x31, 0x0c, 0x8e, 0xab, 0x42, + 0xc7, 0x30, 0xed, 0x1d, 0xbb, 0x84, 0x62, 0xb2, 0x4f, 0x28, 0x71, 0x4d, 0xe2, 0x57, 0xca, 0x5c, + 0xfb, 0x57, 0x53, 0x6a, 0x4f, 0x30, 0x47, 0x21, 0x9d, 0x84, 0xfb, 0xb8, 0x5f, 0x0b, 0xaa, 0xb1, + 0x24, 0xe9, 0x1a, 0x8e, 0xfd, 0x23, 0x42, 0xfd, 0xca, 0x54, 0xb4, 0xc4, 0xdb, 0x08, 0xa1, 0x38, + 0x46, 0x81, 0xbe, 0x06, 0x25, 0xd3, 0xe9, 0xf8, 0x01, 0x11, 0x1b, 0xd5, 0x69, 0xfe, 0x82, 0x42, + 0xff, 0xd6, 0x22, 0x14, 0x8e, 0xd3, 0xa1, 0x0e, 0x94, 0x5b, 0xf1, 0x92, 0x51, 0x99, 0xe5, 0xde, + 0x5d, 0x4b, 0xe7, 0xdd, 0x60, 0x51, 0x8b, 0xfa, 0x8a, 0x04, 0x0e, 0x27, 0xb5, 0xcc, 0x7f, 0x1d, + 0x4a, 0xff, 0x63, 0xcb, 0xcd, 0x5a, 0xf6, 0xfe, 0x7b, 0x1c, 0xa9, 0x65, 0xff, 0x73, 0x06, 0xa6, + 0x92, 0xa7, 0xdf, 0x57, 0x0e, 0x73, 0xa9, 0xca, 0xa1, 0x1a, 0x0e, 0xb5, 0xa1, 0x4b, 0x60, 0x95, + 0xd6, 0xb3, 0x43, 0xd3, 0xba, 0xcc, 0x9e, 0xe3, 0x2f, 0x92, 0x3d, 0x6b, 0x00, 0xac, 0xcf, 0xa0, + 0x9e, 0xe3, 0x10, 0xca, 0x13, 0x67, 0x41, 0x2e, 0x7b, 0x43, 0x28, 0x8e, 0x51, 0xb0, 0x1e, 0x75, + 0xcf, 0xf1, 0xcc, 0x43, 0x7e, 0x04, 0xea, 0xd1, 0xf3, 0x94, 0x59, 0x10, 0x3d, 0x6a, 0x7d, 0x00, + 0x8b, 0xcf, 0xe0, 0xd0, 0xbb, 0x70, 0x71, 0xc7, 0xa0, 0x81, 0x6d, 0x38, 0xd1, 0x03, 0xe3, 0x43, + 0xc0, 0xc3, 0x81, 0x11, 0xe3, 0xb5, 0x51, 0x1f, 0x6a, 0x74, 0xf8, 0x11, 0x2c, 0x1a, 0x33, 0xf4, + 0xbf, 0x6b, 0x70, 0xe9, 0x4c, 0xdd, 0x9f, 0xc0, 0x88, 0xf3, 0x30, 0x39, 0xe2, 0xbc, 0x91, 0x72, + 0xdf, 0x78, 0x96, 0xb5, 0x43, 0x06, 0x9e, 0x09, 0xc8, 0xed, 0xb0, 0x86, 0x58, 0xff, 0xb5, 0x06, + 0x93, 0xfc, 0xd7, 0x28, 0xbb, 0xda, 0x2a, 0xe4, 0xf6, 0x3d, 0xb5, 0x38, 0x2a, 0x88, 0x3f, 0x13, + 0x36, 0x18, 0x00, 0x0b, 0xf8, 0x0b, 0x2c, 0x73, 0xdf, 0xd3, 0x20, 0xb9, 0x25, 0x45, 0x37, 0x44, + 0xfc, 0x6a, 0xe1, 0x1a, 0x73, 0xc4, 0xd8, 0x7d, 0x73, 0xd8, 0x80, 0x36, 0x97, 0x6a, 0x77, 0x77, + 0x05, 0x8a, 0xd8, 0xf3, 0x82, 0x1d, 0x23, 0x38, 0xf0, 0x99, 0xe3, 0x6d, 0xf6, 0x43, 0x9e, 0x0d, + 0x77, 0x9c, 0x63, 0xb0, 0x80, 0xeb, 0xbf, 0xd2, 0xe0, 0xd2, 0xd0, 0xfd, 0x39, 0x4b, 0x01, 0x66, + 0xf8, 0x25, 0x3d, 0x0a, 0xa3, 0x30, 0xa2, 0xc3, 0x31, 0x2a, 0x36, 0x59, 0x25, 0x96, 0xee, 0xfd, + 0x93, 0x55, 0x42, 0x1b, 0x4e, 0xd2, 0xea, 0xff, 0xce, 0x40, 0x7e, 0x37, 0x30, 0x82, 0x8e, 0xff, + 0x7f, 0x8e, 0xd8, 0x57, 0x20, 0xef, 0x73, 0x3d, 0xd2, 0xbc, 0xb0, 0xc6, 0x0a, 0xed, 0x58, 0x62, + 0xf9, 0x34, 0x42, 0x7c, 0xdf, 0x68, 0xaa, 0x8c, 0x15, 0x4d, 0x23, 0x02, 0x8c, 0x15, 0x1e, 0xbd, + 0x0e, 0x79, 0x4a, 0x0c, 0x3f, 0x1c, 0xcc, 0x16, 0x94, 0x48, 0xcc, 0xa1, 0xa7, 0xbd, 0xea, 0xa4, + 0x14, 0xce, 0xbf, 0xb1, 0xa4, 0x46, 0xf7, 0x61, 0xc2, 0x22, 0x81, 0x61, 0x3b, 0x62, 0x1e, 0x4b, + 0xbd, 0xae, 0x17, 0xc2, 0x1a, 0x82, 0xb5, 0x5e, 0x62, 0x36, 0xc9, 0x0f, 0xac, 0x04, 0xb2, 0x6c, + 0x6b, 0x7a, 0x96, 0x18, 0x27, 0x72, 0x51, 0xb6, 0x5d, 0xf3, 0x2c, 0x82, 0x39, 0x46, 0x7f, 0xac, + 0x41, 0x49, 0x48, 0x5a, 0x33, 0x3a, 0x3e, 0x41, 0x2b, 0xa1, 0x17, 0xe2, 0xba, 0x55, 0x27, 0x37, + 0xce, 0x06, 0x8e, 0xd3, 0x5e, 0xb5, 0xc8, 0xc9, 0xf8, 0x24, 0xa2, 0x1c, 0x88, 0x9d, 0x51, 0xe6, + 0x9c, 0x33, 0x7a, 0x19, 0x72, 0xfc, 0xf5, 0xc8, 0xc3, 0x0c, 0xdf, 0x3a, 0x7f, 0x60, 0x58, 0xe0, + 0xf4, 0x8f, 0x32, 0x50, 0x4e, 0x38, 0x97, 0x62, 0x16, 0x08, 0x17, 0x8a, 0x99, 0x14, 0x4b, 0xea, + 0xe1, 0x7f, 0x51, 0xca, 0xda, 0x93, 0x7f, 0x91, 0xda, 0xf3, 0x3d, 0xc8, 0x9b, 0xec, 0x8c, 0xd4, + 0x3f, 0xde, 0x2b, 0xa3, 0x5c, 0x27, 0x3f, 0xdd, 0x28, 0x1a, 0xf9, 0xa7, 0x8f, 0xa5, 0x40, 0x74, + 0x13, 0x66, 0x29, 0x09, 0x68, 0x77, 0x75, 0x3f, 0x20, 0x34, 0x3e, 0xc4, 0xe7, 0xa2, 0x8e, 0x1b, + 0xf7, 0x13, 0xe0, 0x41, 0x1e, 0x7d, 0x0f, 0x26, 0xef, 0x18, 0x7b, 0x4e, 0xf8, 0x07, 0x14, 0x86, + 0xb2, 0xed, 0x9a, 0x4e, 0xc7, 0x22, 0x22, 0x1b, 0xab, 0xec, 0xa5, 0x1e, 0xed, 0x66, 0x1c, 0x79, + 0xda, 0xab, 0xce, 0x25, 0x00, 0xe2, 0x1f, 0x17, 0x9c, 0x14, 0xa1, 0x3b, 0x30, 0xfe, 0x09, 0x4e, + 0x8f, 0xdf, 0x87, 0x62, 0xd4, 0xdf, 0x7f, 0xcc, 0x2a, 0xf5, 0x87, 0x50, 0x60, 0x11, 0xaf, 0xe6, + 0xd2, 0x73, 0x5a, 0x9c, 0x64, 0xe3, 0x94, 0x49, 0xd3, 0x38, 0xe9, 0x2d, 0x28, 0xdf, 0x6d, 0x5b, + 0x2f, 0xf8, 0x17, 0x64, 0x26, 0x75, 0xd5, 0xba, 0x0a, 0xe2, 0xcf, 0x74, 0x56, 0x20, 0x44, 0xe5, + 0x8e, 0x15, 0x88, 0x78, 0xe1, 0x8d, 0xed, 0xca, 0x7f, 0xa6, 0x01, 0xf0, 0xa5, 0xd4, 0xfa, 0x11, + 0x71, 0x03, 0x76, 0x0e, 0x2c, 0xf0, 0xfb, 0xcf, 0x81, 0x67, 0x06, 0x8e, 0x41, 0x77, 0x21, 0xef, + 0x89, 0x68, 0x12, 0x7f, 0x43, 0x8e, 0xb8, 0xf9, 0x0c, 0x1f, 0x81, 0x88, 0x27, 0x2c, 0x85, 0xd5, + 0x97, 0x9e, 0x3e, 0x5b, 0x18, 0xfb, 0xe0, 0xd9, 0xc2, 0xd8, 0x87, 0xcf, 0x16, 0xc6, 0xde, 0x3d, + 0x59, 0xd0, 0x9e, 0x9e, 0x2c, 0x68, 0x1f, 0x9c, 0x2c, 0x68, 0x1f, 0x9e, 0x2c, 0x68, 0x1f, 0x9d, + 0x2c, 0x68, 0x8f, 0xff, 0xb9, 0x30, 0x76, 0x3f, 0x73, 0xb4, 0xf2, 0xdf, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x61, 0xb7, 0xc5, 0x7c, 0xc2, 0x24, 0x00, 0x00, +} + func (m *APIGroup) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -326,53 +1481,65 @@ func (m *APIGroup) Marshal() (dAtA []byte, err error) { } func (m *APIGroup) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if len(m.Versions) > 0 { - for _, msg := range m.Versions { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ServerAddressByClientCIDRs) > 0 { + for iNdEx := len(m.ServerAddressByClientCIDRs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ServerAddressByClientCIDRs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x22 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PreferredVersion.Size())) - n1, err := m.PreferredVersion.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.PreferredVersion.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x1a + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *APIGroupList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -380,29 +1547,36 @@ func (m *APIGroupList) Marshal() (dAtA []byte, err error) { } func (m *APIGroupList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIGroupList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Groups) > 0 { - for _, msg := range m.Groups { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *APIResource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -410,89 +1584,90 @@ func (m *APIResource) Marshal() (dAtA []byte, err error) { } func (m *APIResource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - if m.Namespaced { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - if m.Verbs != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Verbs.Size())) - n2, err := m.Verbs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.StorageVersionHash) + copy(dAtA[i:], m.StorageVersionHash) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageVersionHash))) + i-- + dAtA[i] = 0x52 + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x4a + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x42 + if len(m.Categories) > 0 { + for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Categories[iNdEx]) + copy(dAtA[i:], m.Categories[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx]))) + i-- + dAtA[i] = 0x3a } - i += n2 } + i -= len(m.SingularName) + copy(dAtA[i:], m.SingularName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SingularName))) + i-- + dAtA[i] = 0x32 if len(m.ShortNames) > 0 { - for _, s := range m.ShortNames { + for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ShortNames[iNdEx]) + copy(dAtA[i:], m.ShortNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx]))) + i-- dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SingularName))) - i += copy(dAtA[i:], m.SingularName) - if len(m.Categories) > 0 { - for _, s := range m.Categories { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.Verbs != nil { + { + size, err := m.Verbs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageVersionHash))) - i += copy(dAtA[i:], m.StorageVersionHash) - return i, nil + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x1a + i-- + if m.Namespaced { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *APIResourceList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -500,33 +1675,41 @@ func (m *APIResourceList) Marshal() (dAtA []byte, err error) { } func (m *APIResourceList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIResourceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) - i += copy(dAtA[i:], m.GroupVersion) if len(m.APIResources) > 0 { - for _, msg := range m.APIResources { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.APIResources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.APIResources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.GroupVersion) + copy(dAtA[i:], m.GroupVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *APIVersions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -534,44 +1717,45 @@ func (m *APIVersions) Marshal() (dAtA []byte, err error) { } func (m *APIVersions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIVersions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Versions) > 0 { - for _, s := range m.Versions { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.ServerAddressByClientCIDRs) > 0 { + for iNdEx := len(m.ServerAddressByClientCIDRs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ServerAddressByClientCIDRs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x12 } } - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Versions[iNdEx]) + copy(dAtA[i:], m.Versions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Versions[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *CreateOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -579,36 +1763,36 @@ func (m *CreateOptions) Marshal() (dAtA []byte, err error) { } func (m *CreateOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CreateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.FieldManager) + copy(dAtA[i:], m.FieldManager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) + i-- + dAtA[i] = 0x1a if len(m.DryRun) > 0 { - for _, s := range m.DryRun { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) - i += copy(dAtA[i:], m.FieldManager) - return i, nil + return len(dAtA) - i, nil } func (m *DeleteOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -616,63 +1800,65 @@ func (m *DeleteOptions) Marshal() (dAtA []byte, err error) { } func (m *DeleteOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeleteOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.GracePeriodSeconds != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.GracePeriodSeconds)) - } - if m.Preconditions != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Preconditions.Size())) - n3, err := m.Preconditions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.DryRun) > 0 { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- + dAtA[i] = 0x2a } - i += n3 + } + if m.PropagationPolicy != nil { + i -= len(*m.PropagationPolicy) + copy(dAtA[i:], *m.PropagationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PropagationPolicy))) + i-- + dAtA[i] = 0x22 } if m.OrphanDependents != nil { - dAtA[i] = 0x18 - i++ + i-- if *m.OrphanDependents { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - if m.PropagationPolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PropagationPolicy))) - i += copy(dAtA[i:], *m.PropagationPolicy) + i-- + dAtA[i] = 0x18 } - if len(m.DryRun) > 0 { - for _, s := range m.DryRun { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.Preconditions != nil { + { + size, err := m.Preconditions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 + } + if m.GracePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.GracePeriodSeconds)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *Duration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -680,20 +1866,25 @@ func (m *Duration) Marshal() (dAtA []byte, err error) { } func (m *Duration) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Duration)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ExportOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -701,82 +1892,68 @@ func (m *ExportOptions) Marshal() (dAtA []byte, err error) { } func (m *ExportOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExportOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Export { + i-- + if m.Exact { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- dAtA[i] = 0x10 - i++ - if m.Exact { + i-- + if m.Export { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *Fields) Marshal() (dAtA []byte, err error) { +func (m *FieldsV1) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Fields) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *FieldsV1) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldsV1) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Map) > 0 { - keysForMap := make([]string, 0, len(m.Map)) - for k := range m.Map { - keysForMap = append(keysForMap, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMap) - for _, k := range keysForMap { - dAtA[i] = 0xa - i++ - v := m.Map[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n4, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } + if m.Raw != nil { + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *GetOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -784,21 +1961,27 @@ func (m *GetOptions) Marshal() (dAtA []byte, err error) { } func (m *GetOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *GroupKind) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -806,25 +1989,32 @@ func (m *GroupKind) Marshal() (dAtA []byte, err error) { } func (m *GroupKind) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *GroupResource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -832,199 +2022,174 @@ func (m *GroupResource) Marshal() (dAtA []byte, err error) { } func (m *GroupResource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - return i, nil -} - -func (m *GroupVersion) Marshal() (dAtA []byte, err error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GroupVersion) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GroupResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - return i, nil + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *GroupVersionForDiscovery) Marshal() (dAtA []byte, err error) { +func (m *GroupVersion) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *GroupVersionForDiscovery) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) - i += copy(dAtA[i:], m.GroupVersion) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - return i, nil -} - -func (m *GroupVersionKind) Marshal() (dAtA []byte, err error) { +func (m *GroupVersion) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GroupVersionKind) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GroupVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ + i -= len(m.Version) + copy(dAtA[i:], m.Version) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) { +func (m *GroupVersionForDiscovery) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GroupVersionForDiscovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupVersionForDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ + i -= len(m.Version) + copy(dAtA[i:], m.Version) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.GroupVersion) + copy(dAtA[i:], m.GroupVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Initializer) Marshal() (dAtA []byte, err error) { +func (m *GroupVersionKind) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Initializer) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GroupVersionKind) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupVersionKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x1a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + return len(dAtA) - i, nil } -func (m *Initializers) Marshal() (dAtA []byte, err error) { +func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Initializers) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupVersionResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Pending) > 0 { - for _, msg := range m.Pending { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Result != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) - n5, err := m.Result.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - return i, nil + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *LabelSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1032,51 +2197,60 @@ func (m *LabelSelector) Marshal() (dAtA []byte, err error) { } func (m *LabelSelector) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.MatchExpressions) > 0 { + for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } if len(m.MatchLabels) > 0 { keysForMatchLabels := make([]string, 0, len(m.MatchLabels)) for k := range m.MatchLabels { keysForMatchLabels = append(keysForMatchLabels, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels) - for _, k := range keysForMatchLabels { - dAtA[i] = 0xa - i++ - v := m.MatchLabels[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ + for iNdEx := len(keysForMatchLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.MatchLabels[string(keysForMatchLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i -= len(keysForMatchLabels[iNdEx]) + copy(dAtA[i:], keysForMatchLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMatchLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *LabelSelectorRequirement) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1084,40 +2258,41 @@ func (m *LabelSelectorRequirement) Marshal() (dAtA []byte, err error) { } func (m *LabelSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i += copy(dAtA[i:], m.Operator) if len(m.Values) > 0 { - for _, s := range m.Values { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *List) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1125,37 +2300,46 @@ func (m *List) Marshal() (dAtA []byte, err error) { } func (m *List) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *List) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ListMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1163,29 +2347,42 @@ func (m *ListMeta) Marshal() (dAtA []byte, err error) { } func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) - i += copy(dAtA[i:], m.SelfLink) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x1a - i++ + if m.RemainingItemCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RemainingItemCount)) + i-- + dAtA[i] = 0x20 + } + i -= len(m.Continue) + copy(dAtA[i:], m.Continue) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) - i += copy(dAtA[i:], m.Continue) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.SelfLink) + copy(dAtA[i:], m.SelfLink) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ListOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1193,322 +2390,451 @@ func (m *ListOptions) Marshal() (dAtA []byte, err error) { } func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.LabelSelector))) - i += copy(dAtA[i:], m.LabelSelector) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldSelector))) - i += copy(dAtA[i:], m.FieldSelector) - dAtA[i] = 0x18 - i++ + i-- + if m.AllowWatchBookmarks { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + i -= len(m.Continue) + copy(dAtA[i:], m.Continue) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) + i-- + dAtA[i] = 0x42 + i = encodeVarintGenerated(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x38 + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x28 + } + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x22 + i-- if m.Watch { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - if m.TimeoutSeconds != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x18 + i -= len(m.FieldSelector) + copy(dAtA[i:], m.FieldSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldSelector))) + i-- + dAtA[i] = 0x12 + i -= len(m.LabelSelector) + copy(dAtA[i:], m.LabelSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LabelSelector))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ManagedFieldsEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ManagedFieldsEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ManagedFieldsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FieldsV1 != nil { + { + size, err := m.FieldsV1.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i -= len(m.FieldsType) + copy(dAtA[i:], m.FieldsType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldsType))) + i-- + dAtA[i] = 0x32 + if m.Time != nil { + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x1a + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0x12 + i -= len(m.Manager) + copy(dAtA[i:], m.Manager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manager))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ObjectMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ManagedFields) > 0 { + for iNdEx := len(m.ManagedFields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ManagedFields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + } + i -= len(m.ClusterName) + copy(dAtA[i:], m.ClusterName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterName))) + i-- + dAtA[i] = 0x7a + if len(m.Finalizers) > 0 { + for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Finalizers[iNdEx]) + copy(dAtA[i:], m.Finalizers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx]))) + i-- + dAtA[i] = 0x72 + } + } + if len(m.OwnerReferences) > 0 { + for iNdEx := len(m.OwnerReferences) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.OwnerReferences[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + } + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 + } + } + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.Labels[string(keysForLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForLabels[iNdEx]) + copy(dAtA[i:], keysForLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x5a + } + } + if m.DeletionGracePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DeletionGracePeriodSeconds)) + i-- + dAtA[i] = 0x50 + } + if m.DeletionTimestamp != nil { + { + size, err := m.DeletionTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + { + size, err := m.CreationTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Limit)) + i-- dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) - i += copy(dAtA[i:], m.Continue) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x38 + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x32 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x2a + i -= len(m.SelfLink) + copy(dAtA[i:], m.SelfLink) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) + i-- + dAtA[i] = 0x22 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + i -= len(m.GenerateName) + copy(dAtA[i:], m.GenerateName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ManagedFieldsEntry) Marshal() (dAtA []byte, err error) { +func (m *OwnerReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ManagedFieldsEntry) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OwnerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manager))) - i += copy(dAtA[i:], m.Manager) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) - i += copy(dAtA[i:], m.Operation) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - if m.Time != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n7, err := m.Time.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.BlockOwnerDeletion != nil { + i-- + if *m.BlockOwnerDeletion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n7 + i-- + dAtA[i] = 0x38 } - if m.Fields != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Fields.Size())) - n8, err := m.Fields.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Controller != nil { + i-- + if *m.Controller { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n8 + i-- + dAtA[i] = 0x30 } - return i, nil + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x2a + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ObjectMeta) Marshal() (dAtA []byte, err error) { +func (m *PartialObjectMetadata) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PartialObjectMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartialObjectMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName))) - i += copy(dAtA[i:], m.GenerateName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) - i += copy(dAtA[i:], m.SelfLink) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size())) - n9, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - if m.DeletionTimestamp != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size())) - n10, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } - if m.DeletionGracePeriodSeconds != nil { - dAtA[i] = 0x50 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DeletionGracePeriodSeconds)) - } - if len(m.Labels) > 0 { - keysForLabels := make([]string, 0, len(m.Labels)) - for k := range m.Labels { - keysForLabels = append(keysForLabels, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - for _, k := range keysForLabels { - dAtA[i] = 0x5a - i++ - v := m.Labels[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.Annotations) > 0 { - keysForAnnotations := make([]string, 0, len(m.Annotations)) - for k := range m.Annotations { - keysForAnnotations = append(keysForAnnotations, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - for _, k := range keysForAnnotations { - dAtA[i] = 0x62 - i++ - v := m.Annotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.OwnerReferences) > 0 { - for _, msg := range m.OwnerReferences { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - dAtA[i] = 0x72 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterName))) - i += copy(dAtA[i:], m.ClusterName) - if m.Initializers != nil { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size())) - n11, err := m.Initializers.MarshalTo(dAtA[i:]) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n11 - } - if len(m.ManagedFields) > 0 { - for _, msg := range m.ManagedFields { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *OwnerReference) Marshal() (dAtA []byte, err error) { +func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartialObjectMetadataList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - if m.Controller != nil { - dAtA[i] = 0x30 - i++ - if *m.Controller { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i++ } - if m.BlockOwnerDeletion != nil { - dAtA[i] = 0x38 - i++ - if *m.BlockOwnerDeletion { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Patch) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1516,17 +2842,22 @@ func (m *Patch) Marshal() (dAtA []byte, err error) { } func (m *Patch) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Patch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - return i, nil + return len(dAtA) - i, nil } func (m *PatchOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1534,46 +2865,46 @@ func (m *PatchOptions) Marshal() (dAtA []byte, err error) { } func (m *PatchOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PatchOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.DryRun) > 0 { - for _, s := range m.DryRun { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } + i -= len(m.FieldManager) + copy(dAtA[i:], m.FieldManager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) + i-- + dAtA[i] = 0x1a if m.Force != nil { - dAtA[i] = 0x10 - i++ + i-- if *m.Force { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x10 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) - i += copy(dAtA[i:], m.FieldManager) - return i, nil + if len(m.DryRun) > 0 { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func (m *Preconditions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1581,29 +2912,36 @@ func (m *Preconditions) Marshal() (dAtA []byte, err error) { } func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Preconditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.UID != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) - i += copy(dAtA[i:], *m.UID) - } if m.ResourceVersion != nil { - dAtA[i] = 0x12 - i++ + i -= len(*m.ResourceVersion) + copy(dAtA[i:], *m.ResourceVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceVersion))) - i += copy(dAtA[i:], *m.ResourceVersion) + i-- + dAtA[i] = 0x12 + } + if m.UID != nil { + i -= len(*m.UID) + copy(dAtA[i:], *m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RootPaths) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1611,32 +2949,31 @@ func (m *RootPaths) Marshal() (dAtA []byte, err error) { } func (m *RootPaths) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RootPaths) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Paths) > 0 { - for _, s := range m.Paths { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Paths[iNdEx]) + copy(dAtA[i:], m.Paths[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Paths[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *ServerAddressByClientCIDR) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1644,25 +2981,32 @@ func (m *ServerAddressByClientCIDR) Marshal() (dAtA []byte, err error) { } func (m *ServerAddressByClientCIDR) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServerAddressByClientCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientCIDR))) - i += copy(dAtA[i:], m.ClientCIDR) - dAtA[i] = 0x12 - i++ + i -= len(m.ServerAddress) + copy(dAtA[i:], m.ServerAddress) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServerAddress))) - i += copy(dAtA[i:], m.ServerAddress) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.ClientCIDR) + copy(dAtA[i:], m.ClientCIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientCIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Status) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1670,50 +3014,62 @@ func (m *Status) Marshal() (dAtA []byte, err error) { } func (m *Status) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n12, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x30 if m.Details != nil { + { + size, err := m.Details.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Details.Size())) - n13, err := m.Details.MarshalTo(dAtA[i:]) + } + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n13 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Code)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatusCause) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1721,29 +3077,37 @@ func (m *StatusCause) Marshal() (dAtA []byte, err error) { } func (m *StatusCause) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x1a - i++ + i -= len(m.Field) + copy(dAtA[i:], m.Field) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Field))) - i += copy(dAtA[i:], m.Field) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatusDetails) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1751,48 +3115,87 @@ func (m *StatusDetails) Marshal() (dAtA []byte, err error) { } func (m *StatusDetails) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.RetryAfterSeconds)) + i-- + dAtA[i] = 0x28 if len(m.Causes) > 0 { - for _, msg := range m.Causes { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Causes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Causes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x22 } } - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RetryAfterSeconds)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - return i, nil + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x1a + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TableOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TableOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TableOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.IncludeObject) + copy(dAtA[i:], m.IncludeObject) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IncludeObject))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Timestamp) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1800,23 +3203,28 @@ func (m *Timestamp) Marshal() (dAtA []byte, err error) { } func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Timestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Nanos)) - return i, nil + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *TypeMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1824,25 +3232,32 @@ func (m *TypeMeta) Marshal() (dAtA []byte, err error) { } func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TypeMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *UpdateOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1850,36 +3265,36 @@ func (m *UpdateOptions) Marshal() (dAtA []byte, err error) { } func (m *UpdateOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.FieldManager) + copy(dAtA[i:], m.FieldManager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) + i-- + dAtA[i] = 0x12 if len(m.DryRun) > 0 { - for _, s := range m.DryRun { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) - i += copy(dAtA[i:], m.FieldManager) - return i, nil + return len(dAtA) - i, nil } func (m Verbs) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1887,32 +3302,31 @@ func (m Verbs) Marshal() (dAtA []byte, err error) { } func (m Verbs) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m Verbs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *WatchEvent) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1920,35 +3334,48 @@ func (m *WatchEvent) Marshal() (dAtA []byte, err error) { } func (m *WatchEvent) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WatchEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n14, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *APIGroup) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1971,6 +3398,9 @@ func (m *APIGroup) Size() (n int) { } func (m *APIGroupList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Groups) > 0 { @@ -1983,6 +3413,9 @@ func (m *APIGroupList) Size() (n int) { } func (m *APIResource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -2018,6 +3451,9 @@ func (m *APIResource) Size() (n int) { } func (m *APIResourceList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.GroupVersion) @@ -2032,6 +3468,9 @@ func (m *APIResourceList) Size() (n int) { } func (m *APIVersions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Versions) > 0 { @@ -2050,6 +3489,9 @@ func (m *APIVersions) Size() (n int) { } func (m *CreateOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.DryRun) > 0 { @@ -2064,6 +3506,9 @@ func (m *CreateOptions) Size() (n int) { } func (m *DeleteOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.GracePeriodSeconds != nil { @@ -2090,6 +3535,9 @@ func (m *DeleteOptions) Size() (n int) { } func (m *Duration) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Duration)) @@ -2097,6 +3545,9 @@ func (m *Duration) Size() (n int) { } func (m *ExportOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -2104,22 +3555,23 @@ func (m *ExportOptions) Size() (n int) { return n } -func (m *Fields) Size() (n int) { +func (m *FieldsV1) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.Map) > 0 { - for k, v := range m.Map { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) } return n } func (m *GetOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ResourceVersion) @@ -2128,6 +3580,9 @@ func (m *GetOptions) Size() (n int) { } func (m *GroupKind) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2138,6 +3593,9 @@ func (m *GroupKind) Size() (n int) { } func (m *GroupResource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2148,6 +3606,9 @@ func (m *GroupResource) Size() (n int) { } func (m *GroupVersion) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2158,6 +3619,9 @@ func (m *GroupVersion) Size() (n int) { } func (m *GroupVersionForDiscovery) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.GroupVersion) @@ -2168,6 +3632,9 @@ func (m *GroupVersionForDiscovery) Size() (n int) { } func (m *GroupVersionKind) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2180,6 +3647,9 @@ func (m *GroupVersionKind) Size() (n int) { } func (m *GroupVersionResource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2191,31 +3661,10 @@ func (m *GroupVersionResource) Size() (n int) { return n } -func (m *Initializer) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Initializers) Size() (n int) { - var l int - _ = l - if len(m.Pending) > 0 { - for _, e := range m.Pending { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Result != nil { - l = m.Result.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - func (m *LabelSelector) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.MatchLabels) > 0 { @@ -2236,6 +3685,9 @@ func (m *LabelSelector) Size() (n int) { } func (m *LabelSelectorRequirement) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Key) @@ -2252,6 +3704,9 @@ func (m *LabelSelectorRequirement) Size() (n int) { } func (m *List) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -2266,6 +3721,9 @@ func (m *List) Size() (n int) { } func (m *ListMeta) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.SelfLink) @@ -2274,10 +3732,16 @@ func (m *ListMeta) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Continue) n += 1 + l + sovGenerated(uint64(l)) + if m.RemainingItemCount != nil { + n += 1 + sovGenerated(uint64(*m.RemainingItemCount)) + } return n } func (m *ListOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.LabelSelector) @@ -2293,10 +3757,14 @@ func (m *ListOptions) Size() (n int) { n += 1 + sovGenerated(uint64(m.Limit)) l = len(m.Continue) n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } func (m *ManagedFieldsEntry) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Manager) @@ -2309,14 +3777,19 @@ func (m *ManagedFieldsEntry) Size() (n int) { l = m.Time.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.Fields != nil { - l = m.Fields.Size() + l = len(m.FieldsType) + n += 1 + l + sovGenerated(uint64(l)) + if m.FieldsV1 != nil { + l = m.FieldsV1.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } func (m *ObjectMeta) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -2371,10 +3844,6 @@ func (m *ObjectMeta) Size() (n int) { } l = len(m.ClusterName) n += 1 + l + sovGenerated(uint64(l)) - if m.Initializers != nil { - l = m.Initializers.Size() - n += 2 + l + sovGenerated(uint64(l)) - } if len(m.ManagedFields) > 0 { for _, e := range m.ManagedFields { l = e.Size() @@ -2385,6 +3854,9 @@ func (m *ObjectMeta) Size() (n int) { } func (m *OwnerReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -2404,13 +3876,47 @@ func (m *OwnerReference) Size() (n int) { return n } +func (m *PartialObjectMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PartialObjectMetadataList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *Patch) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l return n } func (m *PatchOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.DryRun) > 0 { @@ -2428,6 +3934,9 @@ func (m *PatchOptions) Size() (n int) { } func (m *Preconditions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.UID != nil { @@ -2442,6 +3951,9 @@ func (m *Preconditions) Size() (n int) { } func (m *RootPaths) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Paths) > 0 { @@ -2454,6 +3966,9 @@ func (m *RootPaths) Size() (n int) { } func (m *ServerAddressByClientCIDR) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ClientCIDR) @@ -2464,6 +3979,9 @@ func (m *ServerAddressByClientCIDR) Size() (n int) { } func (m *Status) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -2483,6 +4001,9 @@ func (m *Status) Size() (n int) { } func (m *StatusCause) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2495,6 +4016,9 @@ func (m *StatusCause) Size() (n int) { } func (m *StatusDetails) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -2515,7 +4039,21 @@ func (m *StatusDetails) Size() (n int) { return n } +func (m *TableOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IncludeObject) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *Timestamp) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Seconds)) @@ -2524,6 +4062,9 @@ func (m *Timestamp) Size() (n int) { } func (m *TypeMeta) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -2534,6 +4075,9 @@ func (m *TypeMeta) Size() (n int) { } func (m *UpdateOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.DryRun) > 0 { @@ -2548,6 +4092,9 @@ func (m *UpdateOptions) Size() (n int) { } func (m Verbs) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -2560,6 +4107,9 @@ func (m Verbs) Size() (n int) { } func (m *WatchEvent) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2570,14 +4120,7 @@ func (m *WatchEvent) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -2586,11 +4129,21 @@ func (this *APIGroup) String() string { if this == nil { return "nil" } + repeatedStringForVersions := "[]GroupVersionForDiscovery{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForVersions += "}" + repeatedStringForServerAddressByClientCIDRs := "[]ServerAddressByClientCIDR{" + for _, f := range this.ServerAddressByClientCIDRs { + repeatedStringForServerAddressByClientCIDRs += strings.Replace(strings.Replace(f.String(), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + "," + } + repeatedStringForServerAddressByClientCIDRs += "}" s := strings.Join([]string{`&APIGroup{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, + `Versions:` + repeatedStringForVersions + `,`, `PreferredVersion:` + strings.Replace(strings.Replace(this.PreferredVersion.String(), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, - `ServerAddressByClientCIDRs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServerAddressByClientCIDRs), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + `,`, + `ServerAddressByClientCIDRs:` + repeatedStringForServerAddressByClientCIDRs + `,`, `}`, }, "") return s @@ -2599,8 +4152,13 @@ func (this *APIGroupList) String() string { if this == nil { return "nil" } + repeatedStringForGroups := "[]APIGroup{" + for _, f := range this.Groups { + repeatedStringForGroups += strings.Replace(strings.Replace(f.String(), "APIGroup", "APIGroup", 1), `&`, ``, 1) + "," + } + repeatedStringForGroups += "}" s := strings.Join([]string{`&APIGroupList{`, - `Groups:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Groups), "APIGroup", "APIGroup", 1), `&`, ``, 1) + `,`, + `Groups:` + repeatedStringForGroups + `,`, `}`, }, "") return s @@ -2628,9 +4186,14 @@ func (this *APIResourceList) String() string { if this == nil { return "nil" } + repeatedStringForAPIResources := "[]APIResource{" + for _, f := range this.APIResources { + repeatedStringForAPIResources += strings.Replace(strings.Replace(f.String(), "APIResource", "APIResource", 1), `&`, ``, 1) + "," + } + repeatedStringForAPIResources += "}" s := strings.Join([]string{`&APIResourceList{`, `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, - `APIResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.APIResources), "APIResource", "APIResource", 1), `&`, ``, 1) + `,`, + `APIResources:` + repeatedStringForAPIResources + `,`, `}`, }, "") return s @@ -2652,7 +4215,7 @@ func (this *DeleteOptions) String() string { } s := strings.Join([]string{`&DeleteOptions{`, `GracePeriodSeconds:` + valueToStringGenerated(this.GracePeriodSeconds) + `,`, - `Preconditions:` + strings.Replace(fmt.Sprintf("%v", this.Preconditions), "Preconditions", "Preconditions", 1) + `,`, + `Preconditions:` + strings.Replace(this.Preconditions.String(), "Preconditions", "Preconditions", 1) + `,`, `OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`, `PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`, `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, @@ -2681,22 +4244,12 @@ func (this *ExportOptions) String() string { }, "") return s } -func (this *Fields) String() string { +func (this *FieldsV1) String() string { if this == nil { return "nil" } - keysForMap := make([]string, 0, len(this.Map)) - for k := range this.Map { - keysForMap = append(keysForMap, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMap) - mapStringForMap := "map[string]Fields{" - for _, k := range keysForMap { - mapStringForMap += fmt.Sprintf("%v: %v,", k, this.Map[k]) - } - mapStringForMap += "}" - s := strings.Join([]string{`&Fields{`, - `Map:` + mapStringForMap + `,`, + s := strings.Join([]string{`&FieldsV1{`, + `Raw:` + valueToStringGenerated(this.Raw) + `,`, `}`, }, "") return s @@ -2722,31 +4275,15 @@ func (this *GroupVersionForDiscovery) String() string { }, "") return s } -func (this *Initializer) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Initializer{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *Initializers) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Initializers{`, - `Pending:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Pending), "Initializer", "Initializer", 1), `&`, ``, 1) + `,`, - `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "Status", 1) + `,`, - `}`, - }, "") - return s -} func (this *LabelSelector) String() string { if this == nil { return "nil" } + repeatedStringForMatchExpressions := "[]LabelSelectorRequirement{" + for _, f := range this.MatchExpressions { + repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "LabelSelectorRequirement", "LabelSelectorRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchExpressions += "}" keysForMatchLabels := make([]string, 0, len(this.MatchLabels)) for k := range this.MatchLabels { keysForMatchLabels = append(keysForMatchLabels, k) @@ -2759,7 +4296,7 @@ func (this *LabelSelector) String() string { mapStringForMatchLabels += "}" s := strings.Join([]string{`&LabelSelector{`, `MatchLabels:` + mapStringForMatchLabels + `,`, - `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "LabelSelectorRequirement", "LabelSelectorRequirement", 1), `&`, ``, 1) + `,`, + `MatchExpressions:` + repeatedStringForMatchExpressions + `,`, `}`, }, "") return s @@ -2780,9 +4317,14 @@ func (this *List) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RawExtension{" + for _, f := range this.Items { + repeatedStringForItems += fmt.Sprintf("%v", f) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&List{`, `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2795,6 +4337,7 @@ func (this *ListMeta) String() string { `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, + `RemainingItemCount:` + valueToStringGenerated(this.RemainingItemCount) + `,`, `}`, }, "") return s @@ -2811,6 +4354,7 @@ func (this *ListOptions) String() string { `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, + `AllowWatchBookmarks:` + fmt.Sprintf("%v", this.AllowWatchBookmarks) + `,`, `}`, }, "") return s @@ -2824,7 +4368,8 @@ func (this *ManagedFieldsEntry) String() string { `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, `Time:` + strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "Time", 1) + `,`, - `Fields:` + strings.Replace(fmt.Sprintf("%v", this.Fields), "Fields", "Fields", 1) + `,`, + `FieldsType:` + fmt.Sprintf("%v", this.FieldsType) + `,`, + `FieldsV1:` + strings.Replace(this.FieldsV1.String(), "FieldsV1", "FieldsV1", 1) + `,`, `}`, }, "") return s @@ -2833,6 +4378,16 @@ func (this *ObjectMeta) String() string { if this == nil { return "nil" } + repeatedStringForOwnerReferences := "[]OwnerReference{" + for _, f := range this.OwnerReferences { + repeatedStringForOwnerReferences += strings.Replace(strings.Replace(f.String(), "OwnerReference", "OwnerReference", 1), `&`, ``, 1) + "," + } + repeatedStringForOwnerReferences += "}" + repeatedStringForManagedFields := "[]ManagedFieldsEntry{" + for _, f := range this.ManagedFields { + repeatedStringForManagedFields += strings.Replace(strings.Replace(f.String(), "ManagedFieldsEntry", "ManagedFieldsEntry", 1), `&`, ``, 1) + "," + } + repeatedStringForManagedFields += "}" keysForLabels := make([]string, 0, len(this.Labels)) for k := range this.Labels { keysForLabels = append(keysForLabels, k) @@ -2861,16 +4416,15 @@ func (this *ObjectMeta) String() string { `UID:` + fmt.Sprintf("%v", this.UID) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, - `CreationTimestamp:` + strings.Replace(strings.Replace(this.CreationTimestamp.String(), "Time", "Time", 1), `&`, ``, 1) + `,`, + `CreationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreationTimestamp), "Time", "Time", 1), `&`, ``, 1) + `,`, `DeletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.DeletionTimestamp), "Time", "Time", 1) + `,`, `DeletionGracePeriodSeconds:` + valueToStringGenerated(this.DeletionGracePeriodSeconds) + `,`, `Labels:` + mapStringForLabels + `,`, `Annotations:` + mapStringForAnnotations + `,`, - `OwnerReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OwnerReferences), "OwnerReference", "OwnerReference", 1), `&`, ``, 1) + `,`, + `OwnerReferences:` + repeatedStringForOwnerReferences + `,`, `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, - `Initializers:` + strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializers", "Initializers", 1) + `,`, - `ManagedFields:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ManagedFields), "ManagedFieldsEntry", "ManagedFieldsEntry", 1), `&`, ``, 1) + `,`, + `ManagedFields:` + repeatedStringForManagedFields + `,`, `}`, }, "") return s @@ -2890,6 +4444,32 @@ func (this *OwnerReference) String() string { }, "") return s } +func (this *PartialObjectMetadata) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PartialObjectMetadata{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PartialObjectMetadataList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PartialObjectMetadata{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PartialObjectMetadata", "PartialObjectMetadata", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PartialObjectMetadataList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} func (this *Patch) String() string { if this == nil { return "nil" @@ -2952,7 +4532,7 @@ func (this *Status) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Details:` + strings.Replace(fmt.Sprintf("%v", this.Details), "StatusDetails", "StatusDetails", 1) + `,`, + `Details:` + strings.Replace(this.Details.String(), "StatusDetails", "StatusDetails", 1) + `,`, `Code:` + fmt.Sprintf("%v", this.Code) + `,`, `}`, }, "") @@ -2974,17 +4554,32 @@ func (this *StatusDetails) String() string { if this == nil { return "nil" } + repeatedStringForCauses := "[]StatusCause{" + for _, f := range this.Causes { + repeatedStringForCauses += strings.Replace(strings.Replace(f.String(), "StatusCause", "StatusCause", 1), `&`, ``, 1) + "," + } + repeatedStringForCauses += "}" s := strings.Join([]string{`&StatusDetails{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Group:` + fmt.Sprintf("%v", this.Group) + `,`, `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Causes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Causes), "StatusCause", "StatusCause", 1), `&`, ``, 1) + `,`, + `Causes:` + repeatedStringForCauses + `,`, `RetryAfterSeconds:` + fmt.Sprintf("%v", this.RetryAfterSeconds) + `,`, `UID:` + fmt.Sprintf("%v", this.UID) + `,`, `}`, }, "") return s } +func (this *TableOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TableOptions{`, + `IncludeObject:` + fmt.Sprintf("%v", this.IncludeObject) + `,`, + `}`, + }, "") + return s +} func (this *Timestamp) String() string { if this == nil { return "nil" @@ -3024,7 +4619,7 @@ func (this *WatchEvent) String() string { } s := strings.Join([]string{`&WatchEvent{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(strings.Replace(this.Object.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3052,7 +4647,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3080,7 +4675,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3090,6 +4685,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3109,7 +4707,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3118,6 +4716,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3140,7 +4741,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3149,6 +4750,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3170,7 +4774,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3179,6 +4783,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3196,6 +4803,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3223,7 +4833,7 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3251,7 +4861,7 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3260,6 +4870,9 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3277,6 +4890,9 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3304,7 +4920,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3332,7 +4948,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3342,6 +4958,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3361,7 +4980,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3381,7 +5000,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3391,6 +5010,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3410,7 +5032,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3419,6 +5041,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3443,7 +5068,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3453,6 +5078,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3472,7 +5100,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3482,6 +5110,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3501,7 +5132,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3511,6 +5142,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3530,7 +5164,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3540,6 +5174,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3559,7 +5196,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3569,6 +5206,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3588,7 +5228,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3598,6 +5238,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3612,6 +5255,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3639,7 +5285,7 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3667,7 +5313,7 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3677,6 +5323,9 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3696,7 +5345,7 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3705,6 +5354,9 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3722,6 +5374,9 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3749,7 +5404,7 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3777,7 +5432,7 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3787,6 +5442,9 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3806,7 +5464,7 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3815,6 +5473,9 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3832,6 +5493,9 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3859,7 +5523,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3887,7 +5551,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3897,6 +5561,9 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3916,7 +5583,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3926,6 +5593,9 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3940,6 +5610,9 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3967,7 +5640,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3995,7 +5668,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4015,7 +5688,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4024,6 +5697,9 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4048,7 +5724,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4069,7 +5745,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4079,6 +5755,9 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4099,7 +5778,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4109,6 +5788,9 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4123,6 +5805,9 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4150,7 +5835,7 @@ func (m *Duration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4178,7 +5863,7 @@ func (m *Duration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Duration |= (time.Duration(b) & 0x7F) << shift + m.Duration |= time.Duration(b&0x7F) << shift if b < 0x80 { break } @@ -4192,6 +5877,9 @@ func (m *Duration) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4219,7 +5907,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4240,212 +5928,39 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Export = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Exact = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Fields) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Fields: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Fields: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Map", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Map == nil { - m.Map = make(map[string]Fields) - } - var mapkey string - mapvalue := &Fields{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &Fields{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break } } - m.Map[mapkey] = *mapvalue - iNdEx = postIndex + m.Export = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Exact = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4455,6 +5970,9 @@ func (m *Fields) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4467,7 +5985,7 @@ func (m *Fields) Unmarshal(dAtA []byte) error { } return nil } -func (m *GetOptions) Unmarshal(dAtA []byte) error { +func (m *FieldsV1) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4482,7 +6000,7 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4490,17 +6008,17 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetOptions: wiretype end group for non-group") + return fmt.Errorf("proto: FieldsV1: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FieldsV1: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4510,20 +6028,25 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } iNdEx = postIndex default: iNdEx = preIndex @@ -4534,6 +6057,9 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4546,7 +6072,7 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *GroupKind) Unmarshal(dAtA []byte) error { +func (m *GetOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4561,7 +6087,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4569,15 +6095,15 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GroupKind: wiretype end group for non-group") + return fmt.Errorf("proto: GetOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GroupKind: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4589,7 +6115,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4599,39 +6125,13 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -4642,6 +6142,9 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4654,7 +6157,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { } return nil } -func (m *GroupResource) Unmarshal(dAtA []byte) error { +func (m *GroupKind) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4669,7 +6172,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4677,10 +6180,10 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GroupResource: wiretype end group for non-group") + return fmt.Errorf("proto: GroupKind: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GroupResource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupKind: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -4697,7 +6200,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4707,6 +6210,9 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4714,7 +6220,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4726,7 +6232,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4736,10 +6242,13 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Resource = string(dAtA[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -4750,6 +6259,9 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4762,7 +6274,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { } return nil } -func (m *GroupVersion) Unmarshal(dAtA []byte) error { +func (m *GroupResource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4777,7 +6289,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4785,10 +6297,10 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GroupVersion: wiretype end group for non-group") + return fmt.Errorf("proto: GroupResource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersion: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupResource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -4805,7 +6317,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4815,6 +6327,9 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4822,7 +6337,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4834,7 +6349,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4844,10 +6359,13 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Version = string(dAtA[iNdEx:postIndex]) + m.Resource = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -4858,6 +6376,9 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4870,7 +6391,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { } return nil } -func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { +func (m *GroupVersion) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4885,7 +6406,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4893,15 +6414,15 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GroupVersionForDiscovery: wiretype end group for non-group") + return fmt.Errorf("proto: GroupVersion: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionForDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupVersion: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4913,7 +6434,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4923,10 +6444,13 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.GroupVersion = string(dAtA[iNdEx:postIndex]) + m.Group = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -4942,7 +6466,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4952,6 +6476,9 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4966,6 +6493,9 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4978,7 +6508,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { } return nil } -func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { +func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4993,7 +6523,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5001,15 +6531,15 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GroupVersionKind: wiretype end group for non-group") + return fmt.Errorf("proto: GroupVersionForDiscovery: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionKind: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupVersionForDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5021,7 +6551,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5031,10 +6561,13 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Group = string(dAtA[iNdEx:postIndex]) + m.GroupVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -5050,7 +6583,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5060,39 +6593,13 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -5103,6 +6610,9 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5115,7 +6625,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } return nil } -func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { +func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5130,7 +6640,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5138,10 +6648,10 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group") + return fmt.Errorf("proto: GroupVersionKind: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupVersionKind: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -5158,7 +6668,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5168,6 +6678,9 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5187,7 +6700,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5197,6 +6710,9 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5204,7 +6720,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5216,7 +6732,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5226,10 +6742,13 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Resource = string(dAtA[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -5240,6 +6759,9 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5252,7 +6774,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Initializer) Unmarshal(dAtA []byte) error { +func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5267,7 +6789,7 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5275,15 +6797,15 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Initializer: wiretype end group for non-group") + return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Initializer: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5295,7 +6817,7 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5305,66 +6827,19 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Initializers) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Initializers: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Initializers: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pending", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5374,28 +6849,29 @@ func (m *Initializers) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Pending = append(m.Pending, Initializer{}) - if err := m.Pending[len(m.Pending)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5405,24 +6881,23 @@ func (m *Initializers) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Result == nil { - m.Result = &Status{} - } - if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Resource = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -5433,6 +6908,9 @@ func (m *Initializers) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5460,7 +6938,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5488,7 +6966,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5497,6 +6975,9 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5517,7 +6998,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5534,7 +7015,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5544,6 +7025,9 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -5560,7 +7044,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5570,6 +7054,9 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -5606,7 +7093,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5615,6 +7102,9 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5632,6 +7122,9 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5659,7 +7152,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5687,7 +7180,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5697,6 +7190,9 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5716,7 +7212,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5726,6 +7222,9 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5745,7 +7244,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5755,6 +7254,9 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5769,6 +7271,9 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5796,7 +7301,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5824,7 +7329,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5833,6 +7338,9 @@ func (m *List) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5854,7 +7362,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5863,10 +7371,13 @@ func (m *List) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + m.Items = append(m.Items, runtime.RawExtension{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -5880,6 +7391,9 @@ func (m *List) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5907,7 +7421,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5935,7 +7449,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5945,6 +7459,9 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5964,7 +7481,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5974,6 +7491,9 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5993,7 +7513,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6003,11 +7523,34 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Continue = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RemainingItemCount", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RemainingItemCount = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6017,6 +7560,9 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6044,7 +7590,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6072,7 +7618,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6082,6 +7628,9 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6101,7 +7650,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6111,6 +7660,9 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6130,7 +7682,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6150,7 +7702,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6160,6 +7712,9 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6179,7 +7734,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6199,7 +7754,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Limit |= (int64(b) & 0x7F) << shift + m.Limit |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6218,7 +7773,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6228,11 +7783,34 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Continue = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowWatchBookmarks", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowWatchBookmarks = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6242,6 +7820,9 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6269,7 +7850,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6297,7 +7878,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6307,6 +7888,9 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6326,7 +7910,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6336,6 +7920,9 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6355,7 +7942,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6365,6 +7952,9 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6384,7 +7974,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6393,6 +7983,9 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6403,9 +7996,41 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 5: + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldsType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldsType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldsV1", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6417,7 +8042,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6426,13 +8051,16 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Fields == nil { - m.Fields = &Fields{} + if m.FieldsV1 == nil { + m.FieldsV1 = &FieldsV1{} } - if err := m.Fields.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FieldsV1.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -6445,6 +8073,9 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6472,7 +8103,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6500,7 +8131,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6510,6 +8141,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6529,7 +8163,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6539,6 +8173,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6558,7 +8195,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6568,6 +8205,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6587,7 +8227,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6597,6 +8237,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6616,7 +8259,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6626,6 +8269,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6645,7 +8291,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6655,6 +8301,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6674,7 +8323,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Generation |= (int64(b) & 0x7F) << shift + m.Generation |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6693,7 +8342,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6702,6 +8351,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6723,7 +8375,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6732,6 +8384,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6756,7 +8411,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6776,7 +8431,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6785,6 +8440,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6805,7 +8463,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6822,7 +8480,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6832,6 +8490,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6848,7 +8509,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6858,6 +8519,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -6894,7 +8558,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6903,6 +8567,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6923,7 +8590,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6940,7 +8607,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6950,6 +8617,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6966,7 +8636,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6976,6 +8646,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -6996,11 +8669,109 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Annotations[mapkey] = mapvalue + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OwnerReferences = append(m.OwnerReferences, OwnerReference{}) + if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Finalizers = append(m.Finalizers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 13: + case 17: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ManagedFields", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7012,7 +8783,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7021,17 +8792,73 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.OwnerReferences = append(m.OwnerReferences, OwnerReference{}) - if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ManagedFields = append(m.ManagedFields, ManagedFieldsEntry{}) + if err := m.ManagedFields[len(m.ManagedFields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 14: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OwnerReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OwnerReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OwnerReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7043,7 +8870,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7053,14 +8880,17 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Finalizers = append(m.Finalizers, string(dAtA[iNdEx:postIndex])) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 15: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7072,7 +8902,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7082,16 +8912,19 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterName = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 16: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Initializers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7101,30 +8934,29 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Initializers == nil { - m.Initializers = &Initializers{} - } - if err := m.Initializers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 17: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ManagedFields", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7134,23 +8966,66 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.ManagedFields = append(m.ManagedFields, ManagedFieldsEntry{}) - if err := m.ManagedFields[len(m.ManagedFields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.APIVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Controller = &b + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockOwnerDeletion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.BlockOwnerDeletion = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7160,6 +9035,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7172,7 +9050,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } return nil } -func (m *OwnerReference) Unmarshal(dAtA []byte) error { +func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7187,7 +9065,7 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7195,17 +9073,17 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: OwnerReference: wiretype end group for non-group") + return fmt.Errorf("proto: PartialObjectMetadata: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: OwnerReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PartialObjectMetadata: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7215,55 +9093,83 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + if skippy < 0 { + return ErrInvalidLengthGenerated } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7273,26 +9179,30 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7302,63 +9212,26 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Controller = &b - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockOwnerDeletion", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + m.Items = append(m.Items, PartialObjectMetadata{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - b := bool(v != 0) - m.BlockOwnerDeletion = &b + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7368,6 +9241,9 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7395,7 +9271,7 @@ func (m *Patch) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7418,6 +9294,9 @@ func (m *Patch) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7445,7 +9324,7 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7473,7 +9352,7 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7483,6 +9362,9 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7502,7 +9384,7 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7523,7 +9405,7 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7533,6 +9415,9 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7547,6 +9432,9 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7574,7 +9462,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7602,7 +9490,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7612,6 +9500,9 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7632,7 +9523,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7642,6 +9533,9 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7657,6 +9551,9 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7684,7 +9581,7 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7712,7 +9609,7 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7722,6 +9619,9 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7736,6 +9636,9 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7763,7 +9666,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7791,7 +9694,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7801,6 +9704,9 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7820,7 +9726,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7830,6 +9736,9 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7844,6 +9753,9 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7871,7 +9783,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7899,7 +9811,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7908,6 +9820,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7929,7 +9844,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7939,6 +9854,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7958,7 +9876,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7968,6 +9886,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7987,7 +9908,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7997,6 +9918,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8016,7 +9940,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8025,6 +9949,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8049,7 +9976,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Code |= (int32(b) & 0x7F) << shift + m.Code |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -8063,6 +9990,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8090,7 +10020,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8118,7 +10048,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8128,6 +10058,9 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8147,7 +10080,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8157,6 +10090,9 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8176,7 +10112,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8186,6 +10122,9 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8200,6 +10139,9 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8227,7 +10169,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8255,7 +10197,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8265,6 +10207,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8284,7 +10229,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8294,6 +10239,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8313,7 +10261,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8323,6 +10271,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8342,7 +10293,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8351,6 +10302,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8373,7 +10327,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RetryAfterSeconds |= (int32(b) & 0x7F) << shift + m.RetryAfterSeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -8392,7 +10346,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8402,6 +10356,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8416,6 +10373,94 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TableOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TableOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TableOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeObject", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IncludeObject = IncludeObjectPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8443,7 +10488,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8471,7 +10516,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Seconds |= (int64(b) & 0x7F) << shift + m.Seconds |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -8490,7 +10535,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Nanos |= (int32(b) & 0x7F) << shift + m.Nanos |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -8504,6 +10549,9 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8531,7 +10579,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8559,7 +10607,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8569,6 +10617,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8588,7 +10639,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8598,6 +10649,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8612,6 +10666,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8639,7 +10696,7 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8667,7 +10724,7 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8677,6 +10734,9 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8696,7 +10756,7 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8706,6 +10766,9 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8720,6 +10783,9 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8747,7 +10813,7 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8775,7 +10841,7 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8785,6 +10851,9 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8799,6 +10868,9 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8826,7 +10898,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8854,7 +10926,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8864,6 +10936,9 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8883,7 +10958,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8892,6 +10967,9 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8908,6 +10986,9 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8974,10 +11055,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -9006,6 +11090,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -9024,179 +11111,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 2674 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x19, 0x4d, 0x6c, 0x23, 0x57, - 0x39, 0x63, 0xc7, 0x8e, 0xfd, 0x39, 0xce, 0xcf, 0xeb, 0x16, 0x5c, 0x4b, 0xc4, 0xe9, 0x14, 0x55, - 0x29, 0x6c, 0x6d, 0x92, 0xd2, 0x6a, 0x59, 0x68, 0x21, 0x8e, 0x93, 0x6d, 0xe8, 0xa6, 0x89, 0x5e, - 0xba, 0x8b, 0x58, 0x56, 0x88, 0x89, 0xe7, 0xc5, 0x19, 0x62, 0xcf, 0x4c, 0xdf, 0x1b, 0x67, 0x37, - 0x70, 0xa0, 0x07, 0x10, 0x20, 0x41, 0xb5, 0x47, 0x4e, 0xa8, 0x2b, 0xb8, 0x70, 0xe5, 0xc4, 0x89, - 0x53, 0x25, 0xf6, 0x58, 0x89, 0x4b, 0x0f, 0xc8, 0xea, 0x06, 0x24, 0xb8, 0x71, 0xcf, 0x01, 0xa1, - 0xf7, 0x33, 0x33, 0x6f, 0xec, 0x78, 0x33, 0x66, 0x0b, 0xe2, 0x14, 0xcf, 0xf7, 0xff, 0xbe, 0xef, - 0x7b, 0xdf, 0xf7, 0xbd, 0x2f, 0xb0, 0x73, 0x7c, 0x8d, 0xd5, 0x1d, 0xaf, 0x71, 0xdc, 0x3f, 0x20, - 0xd4, 0x25, 0x01, 0x61, 0x8d, 0x13, 0xe2, 0xda, 0x1e, 0x6d, 0x28, 0x84, 0xe5, 0x3b, 0x3d, 0xab, - 0x7d, 0xe4, 0xb8, 0x84, 0x9e, 0x36, 0xfc, 0xe3, 0x0e, 0x07, 0xb0, 0x46, 0x8f, 0x04, 0x56, 0xe3, - 0x64, 0xb5, 0xd1, 0x21, 0x2e, 0xa1, 0x56, 0x40, 0xec, 0xba, 0x4f, 0xbd, 0xc0, 0x43, 0x9f, 0x97, - 0x5c, 0x75, 0x9d, 0xab, 0xee, 0x1f, 0x77, 0x38, 0x80, 0xd5, 0x39, 0x57, 0xfd, 0x64, 0xb5, 0xfa, - 0x72, 0xc7, 0x09, 0x8e, 0xfa, 0x07, 0xf5, 0xb6, 0xd7, 0x6b, 0x74, 0xbc, 0x8e, 0xd7, 0x10, 0xcc, - 0x07, 0xfd, 0x43, 0xf1, 0x25, 0x3e, 0xc4, 0x2f, 0x29, 0xb4, 0x3a, 0xd6, 0x14, 0xda, 0x77, 0x03, - 0xa7, 0x47, 0x86, 0xad, 0xa8, 0xbe, 0x76, 0x19, 0x03, 0x6b, 0x1f, 0x91, 0x9e, 0x35, 0xcc, 0x67, - 0xfe, 0x29, 0x0b, 0x85, 0xf5, 0xbd, 0xed, 0x1b, 0xd4, 0xeb, 0xfb, 0x68, 0x19, 0xa6, 0x5d, 0xab, - 0x47, 0x2a, 0xc6, 0xb2, 0xb1, 0x52, 0x6c, 0xce, 0x3e, 0x1a, 0xd4, 0xa6, 0xce, 0x06, 0xb5, 0xe9, - 0xb7, 0xad, 0x1e, 0xc1, 0x02, 0x83, 0xba, 0x50, 0x38, 0x21, 0x94, 0x39, 0x9e, 0xcb, 0x2a, 0x99, - 0xe5, 0xec, 0x4a, 0x69, 0xed, 0x8d, 0x7a, 0x9a, 0xf3, 0xd7, 0x85, 0x82, 0xdb, 0x92, 0x75, 0xcb, - 0xa3, 0x2d, 0x87, 0xb5, 0xbd, 0x13, 0x42, 0x4f, 0x9b, 0x0b, 0x4a, 0x4b, 0x41, 0x21, 0x19, 0x8e, - 0x34, 0xa0, 0x1f, 0x1b, 0xb0, 0xe0, 0x53, 0x72, 0x48, 0x28, 0x25, 0xb6, 0xc2, 0x57, 0xb2, 0xcb, - 0xc6, 0xa7, 0xa0, 0xb6, 0xa2, 0xd4, 0x2e, 0xec, 0x0d, 0xc9, 0xc7, 0x23, 0x1a, 0xd1, 0x6f, 0x0c, - 0xa8, 0x32, 0x42, 0x4f, 0x08, 0x5d, 0xb7, 0x6d, 0x4a, 0x18, 0x6b, 0x9e, 0x6e, 0x74, 0x1d, 0xe2, - 0x06, 0x1b, 0xdb, 0x2d, 0xcc, 0x2a, 0xd3, 0xc2, 0x0f, 0x5f, 0x4f, 0x67, 0xd0, 0xfe, 0x38, 0x39, - 0x4d, 0x53, 0x59, 0x54, 0x1d, 0x4b, 0xc2, 0xf0, 0x13, 0xcc, 0x30, 0x0f, 0x61, 0x36, 0x0c, 0xe4, - 0x4d, 0x87, 0x05, 0xe8, 0x36, 0xe4, 0x3b, 0xfc, 0x83, 0x55, 0x0c, 0x61, 0x60, 0x3d, 0x9d, 0x81, - 0xa1, 0x8c, 0xe6, 0x9c, 0xb2, 0x27, 0x2f, 0x3e, 0x19, 0x56, 0xd2, 0xcc, 0x9f, 0x4f, 0x43, 0x69, - 0x7d, 0x6f, 0x1b, 0x13, 0xe6, 0xf5, 0x69, 0x9b, 0xa4, 0x48, 0x9a, 0x35, 0x00, 0xfe, 0x97, 0xf9, - 0x56, 0x9b, 0xd8, 0x95, 0xcc, 0xb2, 0xb1, 0x52, 0x68, 0x22, 0x45, 0x07, 0x6f, 0x47, 0x18, 0xac, - 0x51, 0x71, 0xa9, 0xc7, 0x8e, 0x6b, 0x8b, 0x68, 0x6b, 0x52, 0xdf, 0x72, 0x5c, 0x1b, 0x0b, 0x0c, - 0xba, 0x09, 0xb9, 0x13, 0x42, 0x0f, 0xb8, 0xff, 0x79, 0x42, 0x7c, 0x31, 0xdd, 0xf1, 0x6e, 0x73, - 0x96, 0x66, 0xf1, 0x6c, 0x50, 0xcb, 0x89, 0x9f, 0x58, 0x0a, 0x41, 0x75, 0x00, 0x76, 0xe4, 0xd1, - 0x40, 0x98, 0x53, 0xc9, 0x2d, 0x67, 0x57, 0x8a, 0xcd, 0x39, 0x6e, 0xdf, 0x7e, 0x04, 0xc5, 0x1a, - 0x05, 0xba, 0x06, 0xb3, 0xcc, 0x71, 0x3b, 0xfd, 0xae, 0x45, 0x39, 0xa0, 0x92, 0x17, 0x76, 0x5e, - 0x51, 0x76, 0xce, 0xee, 0x6b, 0x38, 0x9c, 0xa0, 0xe4, 0x9a, 0xda, 0x56, 0x40, 0x3a, 0x1e, 0x75, - 0x08, 0xab, 0xcc, 0xc4, 0x9a, 0x36, 0x22, 0x28, 0xd6, 0x28, 0xd0, 0x0b, 0x90, 0x13, 0x9e, 0xaf, - 0x14, 0x84, 0x8a, 0xb2, 0x52, 0x91, 0x13, 0x61, 0xc1, 0x12, 0x87, 0x5e, 0x82, 0x19, 0x75, 0x6b, - 0x2a, 0x45, 0x41, 0x36, 0xaf, 0xc8, 0x66, 0xc2, 0xb4, 0x0e, 0xf1, 0xe8, 0x9b, 0x80, 0x58, 0xe0, - 0x51, 0xab, 0x43, 0x14, 0xea, 0x4d, 0x8b, 0x1d, 0x55, 0x40, 0x70, 0x55, 0x15, 0x17, 0xda, 0x1f, - 0xa1, 0xc0, 0x17, 0x70, 0x99, 0xbf, 0x37, 0x60, 0x5e, 0xcb, 0x05, 0x91, 0x77, 0xd7, 0x60, 0xb6, - 0xa3, 0xdd, 0x3a, 0x95, 0x17, 0x91, 0x67, 0xf4, 0x1b, 0x89, 0x13, 0x94, 0x88, 0x40, 0x91, 0x2a, - 0x49, 0x61, 0x75, 0x59, 0x4d, 0x9d, 0xb4, 0xa1, 0x0d, 0xb1, 0x26, 0x0d, 0xc8, 0x70, 0x2c, 0xd9, - 0xfc, 0xbb, 0x21, 0x12, 0x38, 0xac, 0x37, 0x68, 0x45, 0xab, 0x69, 0x86, 0x08, 0xc7, 0xec, 0x98, - 0x7a, 0x74, 0x49, 0x21, 0xc8, 0xfc, 0x5f, 0x14, 0x82, 0xeb, 0x85, 0x5f, 0x7d, 0x50, 0x9b, 0x7a, - 0xef, 0x2f, 0xcb, 0x53, 0x66, 0x0f, 0xca, 0x1b, 0x94, 0x58, 0x01, 0xd9, 0xf5, 0x03, 0x71, 0x00, - 0x13, 0xf2, 0x36, 0x3d, 0xc5, 0x7d, 0x57, 0x1d, 0x14, 0xf8, 0xfd, 0x6e, 0x09, 0x08, 0x56, 0x18, - 0x1e, 0xbf, 0x43, 0x87, 0x74, 0xed, 0x1d, 0xcb, 0xb5, 0x3a, 0x84, 0xaa, 0x1b, 0x18, 0x79, 0x75, - 0x4b, 0xc3, 0xe1, 0x04, 0xa5, 0xf9, 0xd3, 0x2c, 0x94, 0x5b, 0xa4, 0x4b, 0x62, 0x7d, 0x5b, 0x80, - 0x3a, 0xd4, 0x6a, 0x93, 0x3d, 0x42, 0x1d, 0xcf, 0xde, 0x27, 0x6d, 0xcf, 0xb5, 0x99, 0xc8, 0x88, - 0x6c, 0xf3, 0x33, 0x3c, 0xcf, 0x6e, 0x8c, 0x60, 0xf1, 0x05, 0x1c, 0xa8, 0x0b, 0x65, 0x9f, 0x8a, - 0xdf, 0x4e, 0xa0, 0x7a, 0x0f, 0xbf, 0xf3, 0xaf, 0xa4, 0x73, 0xf5, 0x9e, 0xce, 0xda, 0x5c, 0x3c, - 0x1b, 0xd4, 0xca, 0x09, 0x10, 0x4e, 0x0a, 0x47, 0xdf, 0x80, 0x05, 0x8f, 0xfa, 0x47, 0x96, 0xdb, - 0x22, 0x3e, 0x71, 0x6d, 0xe2, 0x06, 0x4c, 0x78, 0xa1, 0xd0, 0xbc, 0xc2, 0x3b, 0xc6, 0xee, 0x10, - 0x0e, 0x8f, 0x50, 0xa3, 0x3b, 0xb0, 0xe8, 0x53, 0xcf, 0xb7, 0x3a, 0x16, 0x97, 0xb8, 0xe7, 0x75, - 0x9d, 0xf6, 0xa9, 0xa8, 0x53, 0xc5, 0xe6, 0xd5, 0xb3, 0x41, 0x6d, 0x71, 0x6f, 0x18, 0x79, 0x3e, - 0xa8, 0x3d, 0x23, 0x5c, 0xc7, 0x21, 0x31, 0x12, 0x8f, 0x8a, 0xd1, 0x62, 0x98, 0x1b, 0x17, 0x43, - 0x73, 0x1b, 0x0a, 0xad, 0x3e, 0x15, 0x5c, 0xe8, 0x75, 0x28, 0xd8, 0xea, 0xb7, 0xf2, 0xfc, 0xf3, - 0x61, 0xcb, 0x0d, 0x69, 0xce, 0x07, 0xb5, 0x32, 0x1f, 0x12, 0xea, 0x21, 0x00, 0x47, 0x2c, 0xe6, - 0x5d, 0x28, 0x6f, 0xde, 0xf7, 0x3d, 0x1a, 0x84, 0x31, 0x7d, 0x11, 0xf2, 0x44, 0x00, 0x84, 0xb4, - 0x42, 0xdc, 0x27, 0x24, 0x19, 0x56, 0x58, 0x5e, 0xb7, 0xc8, 0x7d, 0xab, 0x1d, 0xa8, 0x82, 0x1f, - 0xd5, 0xad, 0x4d, 0x0e, 0xc4, 0x12, 0x67, 0x7e, 0x68, 0x40, 0x5e, 0x64, 0x14, 0x43, 0xef, 0x40, - 0xb6, 0x67, 0xf9, 0xaa, 0x59, 0xbd, 0x9a, 0x2e, 0xb2, 0x92, 0xb5, 0xbe, 0x63, 0xf9, 0x9b, 0x6e, - 0x40, 0x4f, 0x9b, 0x25, 0xa5, 0x24, 0xbb, 0x63, 0xf9, 0x98, 0x8b, 0xab, 0xda, 0x50, 0x08, 0xb1, - 0x68, 0x01, 0xb2, 0xc7, 0xe4, 0x54, 0x16, 0x24, 0xcc, 0x7f, 0xa2, 0x26, 0xe4, 0x4e, 0xac, 0x6e, - 0x9f, 0xa8, 0x7c, 0xba, 0x3a, 0x89, 0x56, 0x2c, 0x59, 0xaf, 0x67, 0xae, 0x19, 0xe6, 0x2e, 0xc0, - 0x0d, 0x12, 0x79, 0x68, 0x1d, 0xe6, 0xc3, 0x6a, 0x93, 0x2c, 0x82, 0x9f, 0x55, 0xe6, 0xcd, 0xe3, - 0x24, 0x1a, 0x0f, 0xd3, 0x9b, 0x77, 0xa1, 0x28, 0x0a, 0x25, 0xef, 0x77, 0x71, 0x07, 0x30, 0x9e, - 0xd0, 0x01, 0xc2, 0x86, 0x99, 0x19, 0xd7, 0x30, 0xb5, 0xba, 0xd0, 0x85, 0xb2, 0xe4, 0x0d, 0x7b, - 0x78, 0x2a, 0x0d, 0x57, 0xa1, 0x10, 0x9a, 0xa9, 0xb4, 0x44, 0xb3, 0x5b, 0x28, 0x08, 0x47, 0x14, - 0x9a, 0xb6, 0x23, 0x48, 0x14, 0xfd, 0x74, 0xca, 0xb4, 0x86, 0x96, 0x79, 0x72, 0x43, 0xd3, 0x34, - 0xfd, 0x08, 0x2a, 0xe3, 0x06, 0xbe, 0xa7, 0x68, 0x4b, 0xe9, 0x4d, 0x31, 0xdf, 0x37, 0x60, 0x41, - 0x97, 0x94, 0x3e, 0x7c, 0xe9, 0x95, 0x5c, 0x3e, 0x1a, 0x69, 0x1e, 0xf9, 0xb5, 0x01, 0x57, 0x12, - 0x47, 0x9b, 0x28, 0xe2, 0x13, 0x18, 0xa5, 0x27, 0x47, 0x76, 0x82, 0xe4, 0x68, 0x40, 0x69, 0xdb, - 0x75, 0x02, 0xc7, 0xea, 0x3a, 0x3f, 0x20, 0xf4, 0xf2, 0x61, 0xd2, 0xfc, 0xa3, 0x01, 0xb3, 0x1a, - 0x07, 0x43, 0x77, 0x61, 0x86, 0xd7, 0x5d, 0xc7, 0xed, 0xa8, 0xda, 0x91, 0x72, 0x66, 0xd0, 0x84, - 0xc4, 0xe7, 0xda, 0x93, 0x92, 0x70, 0x28, 0x12, 0xed, 0x41, 0x9e, 0x12, 0xd6, 0xef, 0x06, 0x93, - 0x95, 0x88, 0xfd, 0xc0, 0x0a, 0xfa, 0x4c, 0xd6, 0x66, 0x2c, 0xf8, 0xb1, 0x92, 0x63, 0xfe, 0x39, - 0x03, 0xe5, 0x9b, 0xd6, 0x01, 0xe9, 0xee, 0x93, 0x2e, 0x69, 0x07, 0x1e, 0x45, 0x3f, 0x84, 0x52, - 0xcf, 0x0a, 0xda, 0x47, 0x02, 0x1a, 0x8e, 0xeb, 0xad, 0x74, 0x8a, 0x12, 0x92, 0xea, 0x3b, 0xb1, - 0x18, 0x59, 0x10, 0x9f, 0x51, 0x07, 0x2b, 0x69, 0x18, 0xac, 0x6b, 0x13, 0x6f, 0x2c, 0xf1, 0xbd, - 0x79, 0xdf, 0xe7, 0xb3, 0xc4, 0xe4, 0x4f, 0xbb, 0x84, 0x09, 0x98, 0xbc, 0xdb, 0x77, 0x28, 0xe9, - 0x11, 0x37, 0x88, 0xdf, 0x58, 0x3b, 0x43, 0xf2, 0xf1, 0x88, 0xc6, 0xea, 0x1b, 0xb0, 0x30, 0x6c, - 0xfc, 0x05, 0xf5, 0xfa, 0x8a, 0x5e, 0xaf, 0x8b, 0x7a, 0x05, 0xfe, 0xad, 0x01, 0x95, 0x71, 0x86, - 0xa0, 0xcf, 0x69, 0x82, 0xe2, 0x1e, 0xf1, 0x16, 0x39, 0x95, 0x52, 0x37, 0xa1, 0xe0, 0xf9, 0xfc, - 0x55, 0xec, 0x51, 0x95, 0xe7, 0x2f, 0x85, 0xb9, 0xbb, 0xab, 0xe0, 0xe7, 0x83, 0xda, 0xb3, 0x09, - 0xf1, 0x21, 0x02, 0x47, 0xac, 0xbc, 0x31, 0x0b, 0x7b, 0xf8, 0xb0, 0x10, 0x35, 0xe6, 0xdb, 0x02, - 0x82, 0x15, 0xc6, 0xfc, 0x83, 0x01, 0xd3, 0x62, 0x4a, 0xbe, 0x0b, 0x05, 0xee, 0x3f, 0xdb, 0x0a, - 0x2c, 0x61, 0x57, 0xea, 0xf7, 0x19, 0xe7, 0xde, 0x21, 0x81, 0x15, 0xdf, 0xaf, 0x10, 0x82, 0x23, - 0x89, 0x08, 0x43, 0xce, 0x09, 0x48, 0x2f, 0x0c, 0xe4, 0xcb, 0x63, 0x45, 0xab, 0xed, 0x40, 0x1d, - 0x5b, 0xf7, 0x36, 0xef, 0x07, 0xc4, 0xe5, 0xc1, 0x88, 0x8b, 0xc1, 0x36, 0x97, 0x81, 0xa5, 0x28, - 0xf3, 0x77, 0x06, 0x44, 0xaa, 0xf8, 0x75, 0x67, 0xa4, 0x7b, 0x78, 0xd3, 0x71, 0x8f, 0x95, 0x5b, - 0x23, 0x73, 0xf6, 0x15, 0x1c, 0x47, 0x14, 0x17, 0x35, 0xc4, 0xcc, 0x64, 0x0d, 0x91, 0x2b, 0x6c, - 0x7b, 0x6e, 0xe0, 0xb8, 0xfd, 0x91, 0xfa, 0xb2, 0xa1, 0xe0, 0x38, 0xa2, 0x30, 0xff, 0x95, 0x81, - 0x12, 0xb7, 0x35, 0xec, 0xc8, 0x5f, 0x85, 0x72, 0x57, 0x8f, 0x9e, 0xb2, 0xf9, 0x59, 0x25, 0x22, - 0x79, 0x1f, 0x71, 0x92, 0x96, 0x33, 0x8b, 0x31, 0x37, 0x62, 0xce, 0x24, 0x99, 0xb7, 0x74, 0x24, - 0x4e, 0xd2, 0xf2, 0x3a, 0x7b, 0x8f, 0xe7, 0xb5, 0x1a, 0x20, 0x23, 0xd7, 0x7e, 0x8b, 0x03, 0xb1, - 0xc4, 0x5d, 0xe4, 0x9f, 0xe9, 0x09, 0xfd, 0x73, 0x1d, 0xe6, 0x78, 0x20, 0xbd, 0x7e, 0x10, 0x4e, - 0xd9, 0x39, 0x31, 0xeb, 0xa1, 0xb3, 0x41, 0x6d, 0xee, 0x9d, 0x04, 0x06, 0x0f, 0x51, 0x72, 0x1b, - 0xbb, 0x4e, 0xcf, 0x09, 0x2a, 0x33, 0x82, 0x25, 0xb2, 0xf1, 0x26, 0x07, 0x62, 0x89, 0x4b, 0x04, - 0xa0, 0x70, 0x69, 0x00, 0xfe, 0x91, 0x01, 0x24, 0x9f, 0x05, 0xb6, 0x9c, 0x96, 0xe4, 0x8d, 0x7e, - 0x09, 0x66, 0x7a, 0xea, 0x59, 0x61, 0x24, 0x1b, 0x4a, 0xf8, 0xa2, 0x08, 0xf1, 0x68, 0x07, 0x8a, - 0xf2, 0x66, 0xc5, 0xd9, 0xd2, 0x50, 0xc4, 0xc5, 0xdd, 0x10, 0x71, 0x3e, 0xa8, 0x55, 0x13, 0x6a, - 0x22, 0xcc, 0x3b, 0xa7, 0x3e, 0xc1, 0xb1, 0x04, 0xb4, 0x06, 0x60, 0xf9, 0x8e, 0xbe, 0x43, 0x2a, - 0xc6, 0x3b, 0x88, 0xf8, 0x35, 0x88, 0x35, 0x2a, 0xf4, 0x26, 0x4c, 0x73, 0x4f, 0xa9, 0x05, 0xc3, - 0x17, 0xd2, 0xdd, 0x4f, 0xee, 0xeb, 0x66, 0x81, 0x37, 0x2d, 0xfe, 0x0b, 0x0b, 0x09, 0xe8, 0x0e, - 0xe4, 0x45, 0x5a, 0xc8, 0xa8, 0x4c, 0x38, 0x68, 0x8a, 0x57, 0x87, 0x9a, 0x92, 0xcf, 0xa3, 0x5f, - 0x58, 0x49, 0x34, 0xdf, 0x85, 0xe2, 0x8e, 0xd3, 0xa6, 0x1e, 0x57, 0xc7, 0x1d, 0xcc, 0x12, 0xaf, - 0xac, 0xc8, 0xc1, 0x61, 0xf0, 0x43, 0x3c, 0x8f, 0xba, 0x6b, 0xb9, 0x9e, 0x7c, 0x4b, 0xe5, 0xe2, - 0xa8, 0xbf, 0xcd, 0x81, 0x58, 0xe2, 0xae, 0x5f, 0xe1, 0x8d, 0xfa, 0x67, 0x0f, 0x6b, 0x53, 0x0f, - 0x1e, 0xd6, 0xa6, 0x3e, 0x78, 0xa8, 0x9a, 0xf6, 0xdf, 0x4a, 0x00, 0xbb, 0x07, 0xdf, 0x27, 0x6d, - 0x59, 0x0c, 0x2e, 0xdf, 0x00, 0xf1, 0xe1, 0x4b, 0x2d, 0x1e, 0xc5, 0xb6, 0x24, 0x33, 0x34, 0x7c, - 0x69, 0x38, 0x9c, 0xa0, 0x44, 0x0d, 0x28, 0x46, 0x5b, 0x21, 0x15, 0xb6, 0xc5, 0x30, 0x0d, 0xa2, - 0xd5, 0x11, 0x8e, 0x69, 0x12, 0x95, 0x69, 0xfa, 0xd2, 0xca, 0xd4, 0x84, 0x6c, 0xdf, 0xb1, 0x45, - 0x54, 0x8a, 0xcd, 0x2f, 0x85, 0x9d, 0xe1, 0xd6, 0x76, 0xeb, 0x7c, 0x50, 0x7b, 0x7e, 0xdc, 0x4a, - 0x35, 0x38, 0xf5, 0x09, 0xab, 0xdf, 0xda, 0x6e, 0x61, 0xce, 0x7c, 0xd1, 0xed, 0xcd, 0x4f, 0x78, - 0x7b, 0xd7, 0x00, 0xd4, 0xa9, 0x39, 0xb7, 0xbc, 0x86, 0x51, 0x76, 0xde, 0x88, 0x30, 0x58, 0xa3, - 0x42, 0x0c, 0x16, 0xdb, 0xfc, 0x71, 0xcf, 0x93, 0xdd, 0xe9, 0x11, 0x16, 0x58, 0x3d, 0xb9, 0x23, - 0x9a, 0x2c, 0x55, 0x9f, 0x53, 0x6a, 0x16, 0x37, 0x86, 0x85, 0xe1, 0x51, 0xf9, 0xc8, 0x83, 0x45, - 0x5b, 0x3d, 0x53, 0x63, 0xa5, 0xc5, 0x89, 0x95, 0x3e, 0xcb, 0x15, 0xb6, 0x86, 0x05, 0xe1, 0x51, - 0xd9, 0xe8, 0xbb, 0x50, 0x0d, 0x81, 0xa3, 0xbb, 0x02, 0xb1, 0xb5, 0xca, 0x36, 0x97, 0xce, 0x06, - 0xb5, 0x6a, 0x6b, 0x2c, 0x15, 0x7e, 0x82, 0x04, 0x64, 0x43, 0xbe, 0x2b, 0xc7, 0xae, 0x92, 0x68, - 0x95, 0x5f, 0x4b, 0x77, 0x8a, 0x38, 0xfb, 0xeb, 0xfa, 0xb8, 0x15, 0xbd, 0x85, 0xd5, 0xa4, 0xa5, - 0x64, 0xa3, 0xfb, 0x50, 0xb2, 0x5c, 0xd7, 0x0b, 0x2c, 0xb9, 0xbd, 0x98, 0x15, 0xaa, 0xd6, 0x27, - 0x56, 0xb5, 0x1e, 0xcb, 0x18, 0x1a, 0xef, 0x34, 0x0c, 0xd6, 0x55, 0xa1, 0x7b, 0x30, 0xef, 0xdd, - 0x73, 0x09, 0xc5, 0xe4, 0x90, 0x50, 0xe2, 0xb6, 0x09, 0xab, 0x94, 0x85, 0xf6, 0x2f, 0xa7, 0xd4, - 0x9e, 0x60, 0x8e, 0x53, 0x3a, 0x09, 0x67, 0x78, 0x58, 0x0b, 0xaa, 0x03, 0x1c, 0x3a, 0xae, 0x1a, - 0xd2, 0x2b, 0x73, 0xf1, 0x9a, 0x73, 0x2b, 0x82, 0x62, 0x8d, 0x02, 0xbd, 0x0a, 0xa5, 0x76, 0xb7, - 0xcf, 0x02, 0x22, 0xf7, 0xa9, 0xf3, 0xe2, 0x06, 0x45, 0xe7, 0xdb, 0x88, 0x51, 0x58, 0xa7, 0x43, - 0x47, 0x30, 0xeb, 0x68, 0xaf, 0x81, 0xca, 0x82, 0xc8, 0xc5, 0xb5, 0x89, 0x9f, 0x00, 0xac, 0xb9, - 0xc0, 0x2b, 0x91, 0x0e, 0xc1, 0x09, 0xc9, 0xa8, 0x0f, 0xe5, 0x9e, 0xde, 0x6a, 0x2a, 0x8b, 0xc2, - 0x8f, 0xd7, 0xd2, 0xa9, 0x1a, 0x6d, 0x86, 0xf1, 0x00, 0x91, 0xc0, 0xe1, 0xa4, 0x96, 0xea, 0x57, - 0xa0, 0xf4, 0x1f, 0xce, 0xc4, 0x7c, 0xa6, 0x1e, 0xce, 0x98, 0x89, 0x66, 0xea, 0x0f, 0x33, 0x30, - 0x97, 0x8c, 0x73, 0xf4, 0xf6, 0x34, 0xc6, 0xae, 0xe5, 0xc3, 0x66, 0x90, 0x1d, 0xdb, 0x0c, 0x54, - 0xcd, 0x9d, 0x7e, 0x9a, 0x9a, 0x9b, 0x6c, 0xe7, 0xb9, 0x54, 0xed, 0xbc, 0x0e, 0xc0, 0xe7, 0x13, - 0xea, 0x75, 0xbb, 0x84, 0x8a, 0x12, 0x5d, 0x50, 0x8b, 0xf7, 0x08, 0x8a, 0x35, 0x0a, 0xb4, 0x05, - 0xe8, 0xa0, 0xeb, 0xb5, 0x8f, 0x85, 0x0b, 0xc2, 0xf2, 0x22, 0x8a, 0x73, 0x41, 0x2e, 0x2f, 0x9b, - 0x23, 0x58, 0x7c, 0x01, 0x87, 0x39, 0x03, 0xb9, 0x3d, 0x3e, 0xe6, 0x99, 0xbf, 0x34, 0x60, 0x56, - 0xfc, 0x9a, 0x64, 0x1d, 0x5b, 0x83, 0xdc, 0xa1, 0x17, 0xae, 0x5c, 0x0a, 0xf2, 0x3f, 0x17, 0x5b, - 0x1c, 0x80, 0x25, 0xfc, 0x29, 0xf6, 0xb5, 0xef, 0x1b, 0x90, 0x5c, 0x84, 0xa2, 0x37, 0x64, 0x68, - 0x8c, 0x68, 0x53, 0x39, 0x61, 0x58, 0x5e, 0x1f, 0x37, 0xe8, 0x3f, 0x93, 0x6a, 0xeb, 0x75, 0x15, - 0x8a, 0xd8, 0xf3, 0x82, 0x3d, 0x2b, 0x38, 0x62, 0xfc, 0xe0, 0x3e, 0xff, 0xa1, 0x7c, 0x23, 0x0e, - 0x2e, 0x30, 0x58, 0xc2, 0xcd, 0x5f, 0x18, 0xf0, 0xdc, 0xd8, 0x15, 0x39, 0xcf, 0x90, 0x76, 0xf4, - 0xa5, 0x4e, 0x14, 0x65, 0x48, 0x4c, 0x87, 0x35, 0x2a, 0x3e, 0xe9, 0x27, 0xf6, 0xea, 0xc3, 0x93, - 0x7e, 0x42, 0x1b, 0x4e, 0xd2, 0x9a, 0xff, 0xcc, 0x40, 0x5e, 0x3e, 0xfb, 0xff, 0xcb, 0x8f, 0xbb, - 0x17, 0x21, 0xcf, 0x84, 0x1e, 0x65, 0x5e, 0xd4, 0x74, 0xa4, 0x76, 0xac, 0xb0, 0x62, 0xd8, 0x26, - 0x8c, 0x59, 0x9d, 0xf0, 0x32, 0xc6, 0xc3, 0xb6, 0x04, 0xe3, 0x10, 0x8f, 0x5e, 0x83, 0x3c, 0x25, - 0x16, 0x8b, 0xde, 0x1d, 0x4b, 0xa1, 0x48, 0x2c, 0xa0, 0xe7, 0x83, 0xda, 0xac, 0x12, 0x2e, 0xbe, - 0xb1, 0xa2, 0x46, 0x77, 0x60, 0xc6, 0x26, 0x81, 0xe5, 0x74, 0xc3, 0xc1, 0xf6, 0x95, 0x49, 0xd6, - 0x23, 0x2d, 0xc9, 0xda, 0x2c, 0x71, 0x9b, 0xd4, 0x07, 0x0e, 0x05, 0xf2, 0x42, 0xd2, 0xf6, 0x6c, - 0xf9, 0x9f, 0xb5, 0x5c, 0x5c, 0x48, 0x36, 0x3c, 0x9b, 0x60, 0x81, 0x31, 0x1f, 0x18, 0x50, 0x92, - 0x92, 0x36, 0xac, 0x3e, 0x23, 0x68, 0x35, 0x3a, 0x85, 0x0c, 0x77, 0x38, 0xda, 0x4c, 0xf3, 0xc7, - 0xc0, 0xf9, 0xa0, 0x56, 0x14, 0x64, 0xe2, 0x65, 0x10, 0x1e, 0x40, 0xf3, 0x51, 0xe6, 0x12, 0x1f, - 0xbd, 0x00, 0x39, 0x71, 0x7b, 0x94, 0x33, 0xa3, 0x79, 0x59, 0x5c, 0x30, 0x2c, 0x71, 0xe6, 0x27, - 0x19, 0x28, 0x27, 0x0e, 0x97, 0x62, 0x38, 0x8e, 0x56, 0x71, 0x99, 0x14, 0xeb, 0xdd, 0xf1, 0xff, - 0x0f, 0xfd, 0x36, 0xe4, 0xdb, 0xfc, 0x7c, 0xe1, 0x3f, 0xa4, 0x57, 0x27, 0x09, 0x85, 0xf0, 0x4c, - 0x9c, 0x49, 0xe2, 0x93, 0x61, 0x25, 0x10, 0xdd, 0x80, 0x45, 0x4a, 0x02, 0x7a, 0xba, 0x7e, 0x18, - 0x10, 0xaa, 0xbf, 0x2f, 0x73, 0xf1, 0xf8, 0x88, 0x87, 0x09, 0xf0, 0x28, 0x4f, 0x58, 0xfa, 0xf3, - 0x4f, 0x51, 0xfa, 0xcd, 0x2e, 0x4c, 0xff, 0x0f, 0x9f, 0x3a, 0xdf, 0x81, 0x62, 0x3c, 0x8c, 0x7e, - 0xca, 0x2a, 0xcd, 0xef, 0x41, 0x81, 0x67, 0x63, 0xf8, 0x88, 0xba, 0xa4, 0xb3, 0x26, 0x7b, 0x5e, - 0x26, 0x4d, 0xcf, 0x33, 0x7b, 0x50, 0xbe, 0xe5, 0xdb, 0x4f, 0xf9, 0x1f, 0xc0, 0x4c, 0xea, 0x8e, - 0xb2, 0x06, 0xf2, 0xbf, 0xea, 0xbc, 0x78, 0xcb, 0x05, 0x94, 0x56, 0xbc, 0xf5, 0x6d, 0x92, 0xb6, - 0x01, 0xfe, 0x89, 0x01, 0x20, 0xb6, 0x21, 0x9b, 0x27, 0xc4, 0x0d, 0xb8, 0x1f, 0x78, 0xc0, 0x87, - 0xfd, 0x20, 0x6e, 0xad, 0xc0, 0xa0, 0x5b, 0x90, 0xf7, 0xc4, 0x4c, 0xac, 0x56, 0xb2, 0x13, 0x6e, - 0xb7, 0xa2, 0x24, 0x97, 0x83, 0x35, 0x56, 0xc2, 0x9a, 0x2b, 0x8f, 0x1e, 0x2f, 0x4d, 0x7d, 0xf4, - 0x78, 0x69, 0xea, 0xe3, 0xc7, 0x4b, 0x53, 0xef, 0x9d, 0x2d, 0x19, 0x8f, 0xce, 0x96, 0x8c, 0x8f, - 0xce, 0x96, 0x8c, 0x8f, 0xcf, 0x96, 0x8c, 0x4f, 0xce, 0x96, 0x8c, 0x07, 0x7f, 0x5d, 0x9a, 0xba, - 0x93, 0x39, 0x59, 0xfd, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x66, 0x55, 0x2c, 0x41, 0x24, - 0x00, 0x00, -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index 36bda1fe5..605505e19 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -212,21 +212,20 @@ message ExportOptions { optional bool exact = 2; } -// Fields stores a set of fields in a data structure like a Trie. -// To understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff -message Fields { - // Map stores a set of fields in a data structure like a Trie. - // - // Each key is either a '.' representing the field itself, and will always map to an empty set, - // or a string representing a sub-field or item. The string will follow one of these four formats: - // 'f:', where is the name of a field in a struct, or key in a map - // 'v:', where is the exact json formatted value of a list item - // 'i:', where is position of a item in a list - // 'k:', where is a map of a list item's key fields to their unique values - // If a key maps to an empty Fields value, the field that key represents is part of the set. - // - // The exact format is defined in k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal - map map = 1; +// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. +// +// Each key is either a '.' representing the field itself, and will always map to an empty set, +// or a string representing a sub-field or item. The string will follow one of these four formats: +// 'f:', where is the name of a field in a struct, or key in a map +// 'v:', where is the exact json formatted value of a list item +// 'i:', where is position of a item in a list +// 'k:', where is a map of a list item's key fields to their unique values +// If a key maps to an empty Fields value, the field that key represents is part of the set. +// +// The exact format is defined in sigs.k8s.io/structured-merge-diff +message FieldsV1 { + // Raw is the underlying serialization of this object. + optional bytes Raw = 1; } // GetOptions is the standard query options to the standard REST get call. @@ -302,27 +301,6 @@ message GroupVersionResource { optional string resource = 3; } -// Initializer is information about an initializer that has not yet completed. -message Initializer { - // name of the process that is responsible for initializing this object. - optional string name = 1; -} - -// Initializers tracks the progress of initialization. -message Initializers { - // Pending is a list of initializers that must execute in order before this object is visible. - // When the last pending initializer is removed, and no failing result is set, the initializers - // struct will be set to nil and the object is considered as initialized and visible to all - // clients. - // +patchMergeKey=name - // +patchStrategy=merge - repeated Initializer pending = 1; - - // If result is set with the Failure field, the object will be persisted to storage and then deleted, - // ensuring that other clients can observe the deletion. - optional Status result = 2; -} - // A label selector is a label query over a set of resources. The result of matchLabels and // matchExpressions are ANDed. An empty label selector matches all objects. A null // label selector matches no objects. @@ -361,7 +339,7 @@ message LabelSelectorRequirement { // List holds a list of objects, which may not be known by the server. message List { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional ListMeta metadata = 1; @@ -375,6 +353,10 @@ message ListMeta { // selfLink is a URL representing this object. // Populated by the system. // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. // +optional optional string selfLink = 1; @@ -383,7 +365,7 @@ message ListMeta { // Value must be treated as opaque by clients and passed unmodified back to the server. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional optional string resourceVersion = 2; @@ -395,6 +377,18 @@ message ListMeta { // identical to the value in the first response, unless you have received this token from an error // message. optional string continue = 3; + + // remainingItemCount is the number of subsequent items in the list which are not included in this + // list response. If the list request contained label or field selectors, then the number of + // remaining items is unknown and the field will be left unset and omitted during serialization. + // If the list is complete (either because it is not chunking or because this is the last chunk), + // then there are no more remaining items and this field will be left unset and omitted during + // serialization. + // Servers older than v1.15 do not set this field. + // The intended use of the remainingItemCount is *estimating* the size of a collection. Clients + // should not rely on the remainingItemCount to be set or to be exact. + // +optional + optional int64 remainingItemCount = 4; } // ListOptions is the query options to a standard REST list call. @@ -414,6 +408,20 @@ message ListOptions { // +optional optional bool watch = 3; + // allowWatchBookmarks requests watch events with type "BOOKMARK". + // Servers that do not implement bookmarks may ignore this flag and + // bookmarks are sent at the server's discretion. Clients should not + // assume bookmarks are returned at any specific interval, nor may they + // assume the server will send any BOOKMARK event during a session. + // If this is not a watch, this field is ignored. + // If the feature gate WatchBookmarks is not enabled in apiserver, + // this field is ignored. + // + // This field is beta. + // + // +optional + optional bool allowWatchBookmarks = 9; + // When specified with a watch call, shows changes that occur after that particular version of a resource. // Defaults to changes from the beginning of history. // When specified for list: @@ -483,9 +491,13 @@ message ManagedFieldsEntry { // +optional optional Time time = 4; - // Fields identifies a set of fields. + // FieldsType is the discriminator for the different fields format and version. + // There is currently only one possible value: "FieldsV1" + optional string fieldsType = 6; + + // FieldsV1 holds the first JSON version format as described in the "FieldsV1" type. // +optional - optional Fields fields = 5; + optional FieldsV1 fieldsV1 = 7; } // MicroTime is version of Time with microsecond level precision. @@ -532,7 +544,7 @@ message ObjectMeta { // should retry (optionally after the time indicated in the Retry-After header). // // Applied only if Name is not specified. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency // +optional optional string generateName = 2; @@ -550,6 +562,10 @@ message ObjectMeta { // SelfLink is a URL representing this object. // Populated by the system. // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. // +optional optional string selfLink = 4; @@ -572,7 +588,7 @@ message ObjectMeta { // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional optional string resourceVersion = 6; @@ -588,7 +604,7 @@ message ObjectMeta { // Populated by the system. // Read-only. // Null for lists. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional Time creationTimestamp = 8; @@ -609,7 +625,7 @@ message ObjectMeta { // // Populated by the system when a graceful deletion is requested. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional Time deletionTimestamp = 9; @@ -643,19 +659,6 @@ message ObjectMeta { // +patchStrategy=merge repeated OwnerReference ownerReferences = 13; - // An initializer is a controller which enforces some system invariant at object creation time. - // This field is a list of initializers that have not yet acted on this object. If nil or empty, - // this object has been completely initialized. Otherwise, the object is considered uninitialized - // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to - // observe uninitialized objects. - // - // When an object is created, the system will populate this list with the current set of initializers. - // Only privileged users may set or modify this list. Once it is empty, it may not be modified further - // by any user. - // - // DEPRECATED - initializers are an alpha field and will be removed in v1.15. - optional Initializers initializers = 16; - // Must be empty before the object is deleted from the registry. Each entry // is an identifier for the responsible component that will remove the entry // from the list. If the deletionTimestamp of the object is non-nil, entries @@ -678,8 +681,6 @@ message ObjectMeta { // "ci-cd". The set of fields is always in the version that the // workflow used when modifying the object. // - // This field is alpha and can be changed or removed without notice. - // // +optional repeated ManagedFieldsEntry managedFields = 17; } @@ -692,7 +693,7 @@ message OwnerReference { optional string apiVersion = 5; // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; // Name of the referent. @@ -717,6 +718,28 @@ message OwnerReference { optional bool blockOwnerDeletion = 7; } +// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients +// to get access to a particular ObjectMeta schema without knowing the details of the version. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message PartialObjectMetadata { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional ObjectMeta metadata = 1; +} + +// PartialObjectMetadataList contains a list of objects containing only their metadata +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message PartialObjectMetadataList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional ListMeta metadata = 1; + + // items contains each of the included items. + repeated PartialObjectMetadata items = 2; +} + // Patch is provided to give a concrete name and type to the Kubernetes PATCH request body. message Patch { } @@ -780,13 +803,13 @@ message ServerAddressByClientCIDR { // Status is a return value for calls that don't return other objects. message Status { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional ListMeta metadata = 1; // Status of the operation. // One of: "Success" or "Failure". - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional string status = 2; @@ -857,7 +880,7 @@ message StatusDetails { // The kind attribute of the resource associated with the status StatusReason. // On some operations may differ from the requested resource Kind. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional string kind = 3; @@ -879,6 +902,16 @@ message StatusDetails { optional int32 retryAfterSeconds = 5; } +// TableOptions are used when a Table is requested by the caller. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message TableOptions { + // includeObject decides whether to include each object along with its columnar information. + // Specifying "None" will return no object, specifying "Object" will return the full object contents, and + // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind + // in version v1beta1 of the meta.k8s.io API group. + optional string includeObject = 1; +} + // Time is a wrapper around time.Time which supports correct // marshaling to YAML and JSON. Wrappers are provided for many // of the factory methods that the time package offers. @@ -925,14 +958,14 @@ message TypeMeta { // Servers may infer this from the endpoint the client submits requests to. // Cannot be updated. // In CamelCase. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional string kind = 1; // APIVersion defines the versioned schema of this representation of an object. // Servers should convert recognized schemas to the latest internal value, and // may reject unrecognized values. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources // +optional optional string apiVersion = 2; } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go index b4dc78b3e..ec016fd3c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -17,7 +17,9 @@ limitations under the License. package v1 import ( + "bytes" "encoding/json" + "errors" "fmt" "k8s.io/apimachinery/pkg/fields" @@ -254,14 +256,25 @@ func ResetObjectMetaForStatus(meta, existingMeta Object) { } // MarshalJSON implements json.Marshaler -func (f Fields) MarshalJSON() ([]byte, error) { - return json.Marshal(&f.Map) +// MarshalJSON may get called on pointers or values, so implement MarshalJSON on value. +// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go +func (f FieldsV1) MarshalJSON() ([]byte, error) { + if f.Raw == nil { + return []byte("null"), nil + } + return f.Raw, nil } // UnmarshalJSON implements json.Unmarshaler -func (f *Fields) UnmarshalJSON(b []byte) error { - return json.Unmarshal(b, &f.Map) +func (f *FieldsV1) UnmarshalJSON(b []byte) error { + if f == nil { + return errors.New("metav1.Fields: UnmarshalJSON on nil pointer") + } + if !bytes.Equal(b, []byte("null")) { + f.Raw = append(f.Raw[0:0], b...) + } + return nil } -var _ json.Marshaler = Fields{} -var _ json.Unmarshaler = &Fields{} +var _ json.Marshaler = FieldsV1{} +var _ json.Unmarshaler = &FieldsV1{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go index 05f07adf1..912cf2036 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go @@ -55,8 +55,6 @@ type Object interface { SetLabels(labels map[string]string) GetAnnotations() map[string]string SetAnnotations(annotations map[string]string) - GetInitializers() *Initializers - SetInitializers(initializers *Initializers) GetFinalizers() []string SetFinalizers(finalizers []string) GetOwnerReferences() []OwnerReference @@ -94,6 +92,8 @@ type ListInterface interface { SetSelfLink(selfLink string) GetContinue() string SetContinue(c string) + GetRemainingItemCount() *int64 + SetRemainingItemCount(c *int64) } // Type exposes the type and APIVersion of versioned or internal API objects. @@ -105,12 +105,16 @@ type Type interface { SetKind(kind string) } +var _ ListInterface = &ListMeta{} + func (meta *ListMeta) GetResourceVersion() string { return meta.ResourceVersion } func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } func (meta *ListMeta) GetSelfLink() string { return meta.SelfLink } func (meta *ListMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } func (meta *ListMeta) GetContinue() string { return meta.Continue } func (meta *ListMeta) SetContinue(c string) { meta.Continue = c } +func (meta *ListMeta) GetRemainingItemCount() *int64 { return meta.RemainingItemCount } +func (meta *ListMeta) SetRemainingItemCount(c *int64) { meta.RemainingItemCount = c } func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } @@ -160,8 +164,6 @@ func (meta *ObjectMeta) GetLabels() map[string]string { return m func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels } func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations } func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations } -func (meta *ObjectMeta) GetInitializers() *Initializers { return meta.Initializers } -func (meta *ObjectMeta) SetInitializers(initializers *Initializers) { meta.Initializers = initializers } func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference { return meta.OwnerReferences } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go index 6f6c5111b..cdd9a6a7a 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -41,11 +41,6 @@ func (t *MicroTime) DeepCopyInto(out *MicroTime) { *out = *t } -// String returns the representation of the time. -func (t MicroTime) String() string { - return t.Time.String() -} - // NewMicroTime returns a wrapped instance of the provided time func NewMicroTime(time time.Time) MicroTime { return MicroTime{time} @@ -72,22 +67,40 @@ func (t *MicroTime) IsZero() bool { // Before reports whether the time instant t is before u. func (t *MicroTime) Before(u *MicroTime) bool { - return t.Time.Before(u.Time) + if t != nil && u != nil { + return t.Time.Before(u.Time) + } + return false } // Equal reports whether the time instant t is equal to u. func (t *MicroTime) Equal(u *MicroTime) bool { - return t.Time.Equal(u.Time) + if t == nil && u == nil { + return true + } + if t != nil && u != nil { + return t.Time.Equal(u.Time) + } + return false } // BeforeTime reports whether the time instant t is before second-lever precision u. func (t *MicroTime) BeforeTime(u *Time) bool { - return t.Time.Before(u.Time) + if t != nil && u != nil { + return t.Time.Before(u.Time) + } + return false } // EqualTime reports whether the time instant t is equal to second-lever precision u. func (t *MicroTime) EqualTime(u *Time) bool { - return t.Time.Equal(u.Time) + if t == nil && u == nil { + return true + } + if t != nil && u != nil { + return t.Time.Equal(u.Time) + } + return false } // UnixMicro returns the local time corresponding to the given Unix time diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go index 14841be51..6dd6d8999 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go @@ -70,3 +70,11 @@ func (m *MicroTime) MarshalTo(data []byte) (int, error) { } return m.ProtoMicroTime().MarshalTo(data) } + +// MarshalToSizedBuffer implements the protobuf marshalling interface. +func (m *MicroTime) MarshalToSizedBuffer(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } + return m.ProtoMicroTime().MarshalToSizedBuffer(data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go index 76d042a96..368efe1ef 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go @@ -94,6 +94,23 @@ func init() { &PatchOptions{}, ) + if err := AddMetaToScheme(scheme); err != nil { + panic(err) + } + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. utilruntime.Must(RegisterDefaults(scheme)) } + +func AddMetaToScheme(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Table{}, + &TableOptions{}, + &PartialObjectMetadata{}, + &PartialObjectMetadataList{}, + ) + + return scheme.AddConversionFuncs( + Convert_Slice_string_To_v1_IncludeObjectPolicy, + ) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index efff656e1..fe510ed9e 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -20,7 +20,7 @@ import ( "encoding/json" "time" - "github.com/google/gofuzz" + fuzz "github.com/google/gofuzz" ) // Time is a wrapper around time.Time which supports correct @@ -41,11 +41,6 @@ func (t *Time) DeepCopyInto(out *Time) { *out = *t } -// String returns the representation of the time. -func (t Time) String() string { - return t.Time.String() -} - // NewTime returns a wrapped instance of the provided time func NewTime(time time.Time) Time { return Time{time} @@ -72,7 +67,10 @@ func (t *Time) IsZero() bool { // Before reports whether the time instant t is before u. func (t *Time) Before(u *Time) bool { - return t.Time.Before(u.Time) + if t != nil && u != nil { + return t.Time.Before(u.Time) + } + return false } // Equal reports whether the time instant t is equal to u. @@ -147,8 +145,12 @@ func (t Time) MarshalJSON() ([]byte, error) { // Encode unset/nil objects as JSON's "null". return []byte("null"), nil } - - return json.Marshal(t.UTC().Format(time.RFC3339)) + buf := make([]byte, 0, len(time.RFC3339)+2) + buf = append(buf, '"') + // time cannot contain non escapable JSON characters + buf = t.UTC().AppendFormat(buf, time.RFC3339) + buf = append(buf, '"') + return buf, nil } // OpenAPISchemaType is used by the kube-openapi generator when constructing diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go index ed72186b4..eac8d9658 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go @@ -75,7 +75,7 @@ func (m *Time) Unmarshal(data []byte) error { return nil } -// Marshal implements the protobuf marshalling interface. +// Marshal implements the protobuf marshaling interface. func (m *Time) Marshal() (data []byte, err error) { if m == nil || m.Time.IsZero() { return nil, nil @@ -83,10 +83,18 @@ func (m *Time) Marshal() (data []byte, err error) { return m.ProtoTime().Marshal() } -// MarshalTo implements the protobuf marshalling interface. +// MarshalTo implements the protobuf marshaling interface. func (m *Time) MarshalTo(data []byte) (int, error) { if m == nil || m.Time.IsZero() { return 0, nil } return m.ProtoTime().MarshalTo(data) } + +// MarshalToSizedBuffer implements the protobuf reverse marshaling interface. +func (m *Time) MarshalToSizedBuffer(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } + return m.ProtoTime().MarshalToSizedBuffer(data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index fd6395256..76b275589 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -43,14 +43,14 @@ type TypeMeta struct { // Servers may infer this from the endpoint the client submits requests to. // Cannot be updated. // In CamelCase. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` // APIVersion defines the versioned schema of this representation of an object. // Servers should convert recognized schemas to the latest internal value, and // may reject unrecognized values. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` } @@ -61,6 +61,10 @@ type ListMeta struct { // selfLink is a URL representing this object. // Populated by the system. // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. // +optional SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,1,opt,name=selfLink"` @@ -69,7 +73,7 @@ type ListMeta struct { // Value must be treated as opaque by clients and passed unmodified back to the server. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` @@ -81,6 +85,18 @@ type ListMeta struct { // identical to the value in the first response, unless you have received this token from an error // message. Continue string `json:"continue,omitempty" protobuf:"bytes,3,opt,name=continue"` + + // remainingItemCount is the number of subsequent items in the list which are not included in this + // list response. If the list request contained label or field selectors, then the number of + // remaining items is unknown and the field will be left unset and omitted during serialization. + // If the list is complete (either because it is not chunking or because this is the last chunk), + // then there are no more remaining items and this field will be left unset and omitted during + // serialization. + // Servers older than v1.15 do not set this field. + // The intended use of the remainingItemCount is *estimating* the size of a collection. Clients + // should not rely on the remainingItemCount to be set or to be exact. + // +optional + RemainingItemCount *int64 `json:"remainingItemCount,omitempty" protobuf:"bytes,4,opt,name=remainingItemCount"` } // These are internal finalizer values for Kubernetes-like APIs, must be qualified name unless defined here @@ -115,7 +131,7 @@ type ObjectMeta struct { // should retry (optionally after the time indicated in the Retry-After header). // // Applied only if Name is not specified. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency // +optional GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` @@ -133,6 +149,10 @@ type ObjectMeta struct { // SelfLink is a URL representing this object. // Populated by the system. // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. // +optional SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"` @@ -155,7 +175,7 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` @@ -171,7 +191,7 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Null for lists. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional CreationTimestamp Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` @@ -192,7 +212,7 @@ type ObjectMeta struct { // // Populated by the system when a graceful deletion is requested. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional DeletionTimestamp *Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` @@ -226,19 +246,6 @@ type ObjectMeta struct { // +patchStrategy=merge OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` - // An initializer is a controller which enforces some system invariant at object creation time. - // This field is a list of initializers that have not yet acted on this object. If nil or empty, - // this object has been completely initialized. Otherwise, the object is considered uninitialized - // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to - // observe uninitialized objects. - // - // When an object is created, the system will populate this list with the current set of initializers. - // Only privileged users may set or modify this list. Once it is empty, it may not be modified further - // by any user. - // - // DEPRECATED - initializers are an alpha field and will be removed in v1.15. - Initializers *Initializers `json:"initializers,omitempty" protobuf:"bytes,16,opt,name=initializers"` - // Must be empty before the object is deleted from the registry. Each entry // is an identifier for the responsible component that will remove the entry // from the list. If the deletionTimestamp of the object is non-nil, entries @@ -261,32 +268,10 @@ type ObjectMeta struct { // "ci-cd". The set of fields is always in the version that the // workflow used when modifying the object. // - // This field is alpha and can be changed or removed without notice. - // // +optional ManagedFields []ManagedFieldsEntry `json:"managedFields,omitempty" protobuf:"bytes,17,rep,name=managedFields"` } -// Initializers tracks the progress of initialization. -type Initializers struct { - // Pending is a list of initializers that must execute in order before this object is visible. - // When the last pending initializer is removed, and no failing result is set, the initializers - // struct will be set to nil and the object is considered as initialized and visible to all - // clients. - // +patchMergeKey=name - // +patchStrategy=merge - Pending []Initializer `json:"pending" protobuf:"bytes,1,rep,name=pending" patchStrategy:"merge" patchMergeKey:"name"` - // If result is set with the Failure field, the object will be persisted to storage and then deleted, - // ensuring that other clients can observe the deletion. - Result *Status `json:"result,omitempty" protobuf:"bytes,2,opt,name=result"` -} - -// Initializer is information about an initializer that has not yet completed. -type Initializer struct { - // name of the process that is responsible for initializing this object. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` -} - const ( // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients NamespaceDefault string = "default" @@ -307,7 +292,7 @@ type OwnerReference struct { // API version of the referent. APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent. // More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -349,6 +334,20 @@ type ListOptions struct { // add, update, and remove notifications. Specify resourceVersion. // +optional Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"` + // allowWatchBookmarks requests watch events with type "BOOKMARK". + // Servers that do not implement bookmarks may ignore this flag and + // bookmarks are sent at the server's discretion. Clients should not + // assume bookmarks are returned at any specific interval, nor may they + // assume the server will send any BOOKMARK event during a session. + // If this is not a watch, this field is ignored. + // If the feature gate WatchBookmarks is not enabled in apiserver, + // this field is ignored. + // + // This field is beta. + // + // +optional + AllowWatchBookmarks bool `json:"allowWatchBookmarks,omitempty" protobuf:"varint,9,opt,name=allowWatchBookmarks"` + // When specified with a watch call, shows changes that occur after that particular version of a resource. // Defaults to changes from the beginning of history. // When specified for list: @@ -586,13 +585,13 @@ type Preconditions struct { type Status struct { TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Status of the operation. // One of: "Success" or "Failure". - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status string `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` // A human-readable description of the status of this operation. @@ -631,7 +630,7 @@ type StatusDetails struct { Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"` // The kind attribute of the resource associated with the status StatusReason. // On some operations may differ from the requested resource Kind. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` // UID of the resource. @@ -763,11 +762,13 @@ const ( // doesn't make any sense, for example deleting a read-only object. This is different than // StatusReasonInvalid above which indicates that the API call could possibly succeed, but the // data was invalid. API calls that return BadRequest can never succeed. + // Status code 400 StatusReasonBadRequest StatusReason = "BadRequest" // StatusReasonMethodNotAllowed means that the action the client attempted to perform on the // resource was not supported by the code - for instance, attempting to delete a resource that // can only be created. API calls that return MethodNotAllowed can never succeed. + // Status code 405 StatusReasonMethodNotAllowed StatusReason = "MethodNotAllowed" // StatusReasonNotAcceptable means that the accept types indicated by the client were not acceptable @@ -866,7 +867,7 @@ const ( type List struct { TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -1099,9 +1100,16 @@ type ManagedFieldsEntry struct { // Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply' // +optional Time *Time `json:"time,omitempty" protobuf:"bytes,4,opt,name=time"` - // Fields identifies a set of fields. + + // Fields is tombstoned to show why 5 is a reserved protobuf tag. + //Fields *Fields `json:"fields,omitempty" protobuf:"bytes,5,opt,name=fields,casttype=Fields"` + + // FieldsType is the discriminator for the different fields format and version. + // There is currently only one possible value: "FieldsV1" + FieldsType string `json:"fieldsType,omitempty" protobuf:"bytes,6,opt,name=fieldsType"` + // FieldsV1 holds the first JSON version format as described in the "FieldsV1" type. // +optional - Fields *Fields `json:"fields,omitempty" protobuf:"bytes,5,opt,name=fields,casttype=Fields"` + FieldsV1 *FieldsV1 `json:"fieldsV1,omitempty" protobuf:"bytes,7,opt,name=fieldsV1"` } // ManagedFieldsOperationType is the type of operation which lead to a ManagedFieldsEntry being created. @@ -1112,19 +1120,179 @@ const ( ManagedFieldsOperationUpdate ManagedFieldsOperationType = "Update" ) -// Fields stores a set of fields in a data structure like a Trie. -// To understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff -type Fields struct { - // Map stores a set of fields in a data structure like a Trie. - // - // Each key is either a '.' representing the field itself, and will always map to an empty set, - // or a string representing a sub-field or item. The string will follow one of these four formats: - // 'f:', where is the name of a field in a struct, or key in a map - // 'v:', where is the exact json formatted value of a list item - // 'i:', where is position of a item in a list - // 'k:', where is a map of a list item's key fields to their unique values - // If a key maps to an empty Fields value, the field that key represents is part of the set. - // - // The exact format is defined in k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal - Map map[string]Fields `json:",inline" protobuf:"bytes,1,rep,name=map"` +// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. +// +// Each key is either a '.' representing the field itself, and will always map to an empty set, +// or a string representing a sub-field or item. The string will follow one of these four formats: +// 'f:', where is the name of a field in a struct, or key in a map +// 'v:', where is the exact json formatted value of a list item +// 'i:', where is position of a item in a list +// 'k:', where is a map of a list item's key fields to their unique values +// If a key maps to an empty Fields value, the field that key represents is part of the set. +// +// The exact format is defined in sigs.k8s.io/structured-merge-diff +type FieldsV1 struct { + // Raw is the underlying serialization of this object. + Raw []byte `json:"-" protobuf:"bytes,1,opt,name=Raw"` +} + +// TODO: Table does not generate to protobuf because of the interface{} - fix protobuf +// generation to support a meta type that can accept any valid JSON. This can be introduced +// in a v1 because clients a) receive an error if they try to access proto today, and b) +// once introduced they would be able to gracefully switch over to using it. + +// Table is a tabular representation of a set of API resources. The server transforms the +// object into a set of preferred columns for quickly reviewing the objects. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +protobuf=false +type Table struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty"` + + // columnDefinitions describes each column in the returned items array. The number of cells per row + // will always match the number of column definitions. + ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"` + // rows is the list of items in the table. + Rows []TableRow `json:"rows"` +} + +// TableColumnDefinition contains information about a column returned in the Table. +// +protobuf=false +type TableColumnDefinition struct { + // name is a human readable name for the column. + Name string `json:"name"` + // type is an OpenAPI type definition for this column, such as number, integer, string, or + // array. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Type string `json:"type"` + // format is an optional OpenAPI type modifier for this column. A format modifies the type and + // imposes additional rules, like date or time formatting for a string. The 'name' format is applied + // to the primary identifier column which has type 'string' to assist in clients identifying column + // is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Format string `json:"format"` + // description is a human readable description of this column. + Description string `json:"description"` + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a higher priority. + Priority int32 `json:"priority"` +} + +// TableRow is an individual row in a table. +// +protobuf=false +type TableRow struct { + // cells will be as wide as the column definitions array and may contain strings, numbers (float64 or + // int64), booleans, simple maps, lists, or null. See the type field of the column definition for a + // more detailed description. + Cells []interface{} `json:"cells"` + // conditions describe additional status of a row that are relevant for a human user. These conditions + // apply to the row, not to the object, and will be specific to table output. The only defined + // condition type is 'Completed', for a row that indicates a resource that has run to completion and + // can be given less visual priority. + // +optional + Conditions []TableRowCondition `json:"conditions,omitempty"` + // This field contains the requested additional information about each object based on the includeObject + // policy when requesting the Table. If "None", this field is empty, if "Object" this will be the + // default serialization of the object for the current API version, and if "Metadata" (the default) will + // contain the object metadata. Check the returned kind and apiVersion of the object before parsing. + // The media type of the object will always match the enclosing list - if this as a JSON table, these + // will be JSON encoded objects. + // +optional + Object runtime.RawExtension `json:"object,omitempty"` +} + +// TableRowCondition allows a row to be marked with additional information. +// +protobuf=false +type TableRowCondition struct { + // Type of row condition. The only defined value is 'Completed' indicating that the + // object this row represents has reached a completed state and may be given less visual + // priority than other rows. Clients are not required to honor any conditions but should + // be consistent where possible about handling the conditions. + Type RowConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status"` + // (brief) machine readable reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Human readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +type RowConditionType string + +// These are valid conditions of a row. This list is not exhaustive and new conditions may be +// included by other resources. +const ( + // RowCompleted means the underlying resource has reached completion and may be given less + // visual priority than other resources. + RowCompleted RowConditionType = "Completed" +) + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// IncludeObjectPolicy controls which portion of the object is returned with a Table. +type IncludeObjectPolicy string + +const ( + // IncludeNone returns no object. + IncludeNone IncludeObjectPolicy = "None" + // IncludeMetadata serializes the object containing only its metadata field. + IncludeMetadata IncludeObjectPolicy = "Metadata" + // IncludeObject contains the full object. + IncludeObject IncludeObjectPolicy = "Object" +) + +// TableOptions are used when a Table is requested by the caller. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type TableOptions struct { + TypeMeta `json:",inline"` + + // NoHeaders is only exposed for internal callers. It is not included in our OpenAPI definitions + // and may be removed as a field in a future release. + NoHeaders bool `json:"-"` + + // includeObject decides whether to include each object along with its columnar information. + // Specifying "None" will return no object, specifying "Object" will return the full object contents, and + // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind + // in version v1beta1 of the meta.k8s.io API group. + IncludeObject IncludeObjectPolicy `json:"includeObject,omitempty" protobuf:"bytes,1,opt,name=includeObject,casttype=IncludeObjectPolicy"` +} + +// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients +// to get access to a particular ObjectMeta schema without knowing the details of the version. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadata struct { + TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` +} + +// PartialObjectMetadataList contains a list of objects containing only their metadata +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadataList struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items contains each of the included items. + Items []PartialObjectMetadata `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index 3b1a09e57..07e6cc126 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -119,12 +119,12 @@ func (ExportOptions) SwaggerDoc() map[string]string { return map_ExportOptions } -var map_Fields = map[string]string{ - "": "Fields stores a set of fields in a data structure like a Trie. To understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff", +var map_FieldsV1 = map[string]string{ + "": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", } -func (Fields) SwaggerDoc() map[string]string { - return map_Fields +func (FieldsV1) SwaggerDoc() map[string]string { + return map_FieldsV1 } var map_GetOptions = map[string]string{ @@ -146,25 +146,6 @@ func (GroupVersionForDiscovery) SwaggerDoc() map[string]string { return map_GroupVersionForDiscovery } -var map_Initializer = map[string]string{ - "": "Initializer is information about an initializer that has not yet completed.", - "name": "name of the process that is responsible for initializing this object.", -} - -func (Initializer) SwaggerDoc() map[string]string { - return map_Initializer -} - -var map_Initializers = map[string]string{ - "": "Initializers tracks the progress of initialization.", - "pending": "Pending is a list of initializers that must execute in order before this object is visible. When the last pending initializer is removed, and no failing result is set, the initializers struct will be set to nil and the object is considered as initialized and visible to all clients.", - "result": "If result is set with the Failure field, the object will be persisted to storage and then deleted, ensuring that other clients can observe the deletion.", -} - -func (Initializers) SwaggerDoc() map[string]string { - return map_Initializers -} - var map_LabelSelector = map[string]string{ "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", @@ -188,7 +169,7 @@ func (LabelSelectorRequirement) SwaggerDoc() map[string]string { var map_List = map[string]string{ "": "List holds a list of objects, which may not be known by the server.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of objects", } @@ -197,10 +178,11 @@ func (List) SwaggerDoc() map[string]string { } var map_ListMeta = map[string]string{ - "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", - "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.", - "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", - "continue": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", + "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", + "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", + "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + "continue": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", + "remainingItemCount": "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.", } func (ListMeta) SwaggerDoc() map[string]string { @@ -208,14 +190,15 @@ func (ListMeta) SwaggerDoc() map[string]string { } var map_ListOptions = map[string]string{ - "": "ListOptions is the query options to a standard REST list call.", - "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", - "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "": "ListOptions is the query options to a standard REST list call.", + "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "allowWatchBookmarks": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.\n\nThis field is beta.", + "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", } func (ListOptions) SwaggerDoc() map[string]string { @@ -228,7 +211,8 @@ var map_ManagedFieldsEntry = map[string]string{ "operation": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.", "apiVersion": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.", "time": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'", - "fields": "Fields identifies a set of fields.", + "fieldsType": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"", + "fieldsV1": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.", } func (ManagedFieldsEntry) SwaggerDoc() map[string]string { @@ -238,22 +222,21 @@ func (ManagedFieldsEntry) SwaggerDoc() map[string]string { var map_ObjectMeta = map[string]string{ "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency", + "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", - "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", + "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", - "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", + "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", - "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", - "initializers": "An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.\n\nDEPRECATED - initializers are an alpha field and will be removed in v1.15.", "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", - "managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.\n\nThis field is alpha and can be changed or removed without notice.", + "managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.", } func (ObjectMeta) SwaggerDoc() map[string]string { @@ -263,7 +246,7 @@ func (ObjectMeta) SwaggerDoc() map[string]string { var map_OwnerReference = map[string]string{ "": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", "apiVersion": "API version of the referent.", - "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", "uid": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", "controller": "If true, this reference points to the managing controller.", @@ -274,6 +257,25 @@ func (OwnerReference) SwaggerDoc() map[string]string { return map_OwnerReference } +var map_PartialObjectMetadata = map[string]string{ + "": "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (PartialObjectMetadata) SwaggerDoc() map[string]string { + return map_PartialObjectMetadata +} + +var map_PartialObjectMetadataList = map[string]string{ + "": "PartialObjectMetadataList contains a list of objects containing only their metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "items contains each of the included items.", +} + +func (PartialObjectMetadataList) SwaggerDoc() map[string]string { + return map_PartialObjectMetadataList +} + var map_Patch = map[string]string{ "": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.", } @@ -324,8 +326,8 @@ func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string { var map_Status = map[string]string{ "": "Status is a return value for calls that don't return other objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", "message": "A human-readable description of the status of this operation.", "reason": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.", "details": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.", @@ -351,7 +353,7 @@ var map_StatusDetails = map[string]string{ "": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", "group": "The group attribute of the resource associated with the status StatusReason.", - "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "uid": "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids", "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.", @@ -361,10 +363,66 @@ func (StatusDetails) SwaggerDoc() map[string]string { return map_StatusDetails } +var map_Table = map[string]string{ + "": "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "columnDefinitions": "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.", + "rows": "rows is the list of items in the table.", +} + +func (Table) SwaggerDoc() map[string]string { + return map_Table +} + +var map_TableColumnDefinition = map[string]string{ + "": "TableColumnDefinition contains information about a column returned in the Table.", + "name": "name is a human readable name for the column.", + "type": "type is an OpenAPI type definition for this column, such as number, integer, string, or array. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + "format": "format is an optional OpenAPI type modifier for this column. A format modifies the type and imposes additional rules, like date or time formatting for a string. The 'name' format is applied to the primary identifier column which has type 'string' to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + "description": "description is a human readable description of this column.", + "priority": "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.", +} + +func (TableColumnDefinition) SwaggerDoc() map[string]string { + return map_TableColumnDefinition +} + +var map_TableOptions = map[string]string{ + "": "TableOptions are used when a Table is requested by the caller.", + "includeObject": "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.", +} + +func (TableOptions) SwaggerDoc() map[string]string { + return map_TableOptions +} + +var map_TableRow = map[string]string{ + "": "TableRow is an individual row in a table.", + "cells": "cells will be as wide as the column definitions array and may contain strings, numbers (float64 or int64), booleans, simple maps, lists, or null. See the type field of the column definition for a more detailed description.", + "conditions": "conditions describe additional status of a row that are relevant for a human user. These conditions apply to the row, not to the object, and will be specific to table output. The only defined condition type is 'Completed', for a row that indicates a resource that has run to completion and can be given less visual priority.", + "object": "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing. The media type of the object will always match the enclosing list - if this as a JSON table, these will be JSON encoded objects.", +} + +func (TableRow) SwaggerDoc() map[string]string { + return map_TableRow +} + +var map_TableRowCondition = map[string]string{ + "": "TableRowCondition allows a row to be marked with additional information.", + "type": "Type of row condition. The only defined value is 'Completed' indicating that the object this row represents has reached a completed state and may be given less visual priority than other rows. Clients are not required to honor any conditions but should be consistent where possible about handling the conditions.", + "status": "Status of the condition, one of True, False, Unknown.", + "reason": "(brief) machine readable reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (TableRowCondition) SwaggerDoc() map[string]string { + return map_TableRowCondition +} + var map_TypeMeta = map[string]string{ "": "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.", - "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", } func (TypeMeta) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 75ac693fe..7ea0986f3 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -32,6 +32,9 @@ import ( // NestedFieldCopy returns a deep copy of the value of a nested field. // Returns false if the value is missing. // No error is returned for a nil field. +// +// Note: fields passed to this function are treated as keys within the passed +// object; no array/slice syntax is supported. func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) { val, found, err := NestedFieldNoCopy(obj, fields...) if !found || err != nil { @@ -43,6 +46,9 @@ func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, // NestedFieldNoCopy returns a reference to a nested field. // Returns false if value is not found and an error if unable // to traverse obj. +// +// Note: fields passed to this function are treated as keys within the passed +// object; no array/slice syntax is supported. func NestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) { var val interface{} = obj @@ -275,6 +281,22 @@ func getNestedString(obj map[string]interface{}, fields ...string) string { return val } +func getNestedInt64(obj map[string]interface{}, fields ...string) int64 { + val, found, err := NestedInt64(obj, fields...) + if !found || err != nil { + return 0 + } + return val +} + +func getNestedInt64Pointer(obj map[string]interface{}, fields ...string) *int64 { + val, found, err := NestedInt64(obj, fields...) + if !found || err != nil { + return nil + } + return &val +} + func jsonPath(fields []string) string { return "." + strings.Join(fields, ".") } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index 1eaa85804..d1903394d 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -47,6 +47,7 @@ type Unstructured struct { var _ metav1.Object = &Unstructured{} var _ runtime.Unstructured = &Unstructured{} +var _ metav1.ListInterface = &Unstructured{} func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj } @@ -126,6 +127,16 @@ func (u *Unstructured) UnmarshalJSON(b []byte) error { return err } +// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. +// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. +func (in *Unstructured) NewEmptyInstance() runtime.Unstructured { + out := new(Unstructured) + if in != nil { + out.GetObjectKind().SetGroupVersionKind(in.GetObjectKind().GroupVersionKind()) + } + return out +} + func (in *Unstructured) DeepCopy() *Unstructured { if in == nil { return nil @@ -319,6 +330,18 @@ func (u *Unstructured) SetContinue(c string) { u.setNestedField(c, "metadata", "continue") } +func (u *Unstructured) GetRemainingItemCount() *int64 { + return getNestedInt64Pointer(u.Object, "metadata", "remainingItemCount") +} + +func (u *Unstructured) SetRemainingItemCount(c *int64) { + if c == nil { + RemoveNestedField(u.Object, "metadata", "remainingItemCount") + } else { + u.setNestedField(*c, "metadata", "remainingItemCount") + } +} + func (u *Unstructured) GetCreationTimestamp() metav1.Time { var timestamp metav1.Time timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp")) @@ -408,31 +431,6 @@ func (u *Unstructured) GroupVersionKind() schema.GroupVersionKind { return gvk } -func (u *Unstructured) GetInitializers() *metav1.Initializers { - m, found, err := nestedMapNoCopy(u.Object, "metadata", "initializers") - if !found || err != nil { - return nil - } - out := &metav1.Initializers{} - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, out); err != nil { - utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err)) - return nil - } - return out -} - -func (u *Unstructured) SetInitializers(initializers *metav1.Initializers) { - if initializers == nil { - RemoveNestedField(u.Object, "metadata", "initializers") - return - } - out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(initializers) - if err != nil { - utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err)) - } - u.setNestedField(out, "metadata", "initializers") -} - func (u *Unstructured) GetFinalizers() []string { val, _, _ := NestedStringSlice(u.Object, "metadata", "finalizers") return val diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go index bf3fd023f..5028f5fb5 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go @@ -52,6 +52,16 @@ func (u *UnstructuredList) EachListItem(fn func(runtime.Object) error) error { return nil } +// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. +// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. +func (u *UnstructuredList) NewEmptyInstance() runtime.Unstructured { + out := new(UnstructuredList) + if u != nil { + out.SetGroupVersionKind(u.GroupVersionKind()) + } + return out +} + // UnstructuredContent returns a map contain an overlay of the Items field onto // the Object field. Items always overwrites overlay. func (u *UnstructuredList) UnstructuredContent() map[string]interface{} { @@ -166,6 +176,18 @@ func (u *UnstructuredList) SetContinue(c string) { u.setNestedField(c, "metadata", "continue") } +func (u *UnstructuredList) GetRemainingItemCount() *int64 { + return getNestedInt64Pointer(u.Object, "metadata", "remainingItemCount") +} + +func (u *UnstructuredList) SetRemainingItemCount(c *int64) { + if c == nil { + RemoveNestedField(u.Object, "metadata", "remainingItemCount") + } else { + u.setNestedField(*c, "metadata", "remainingItemCount") + } +} + func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) { u.SetAPIVersion(gvk.GroupVersion().String()) u.SetKind(gvk.Kind) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index 68498b8d2..b82fdf202 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -313,24 +313,22 @@ func (in *ExportOptions) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Fields) DeepCopyInto(out *Fields) { +func (in *FieldsV1) DeepCopyInto(out *FieldsV1) { *out = *in - if in.Map != nil { - in, out := &in.Map, &out.Map - *out = make(map[string]Fields, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fields. -func (in *Fields) DeepCopy() *Fields { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldsV1. +func (in *FieldsV1) DeepCopy() *FieldsV1 { if in == nil { return nil } - out := new(Fields) + out := new(FieldsV1) in.DeepCopyInto(out) return out } @@ -456,48 +454,6 @@ func (in *GroupVersionResource) DeepCopy() *GroupVersionResource { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Initializer) DeepCopyInto(out *Initializer) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializer. -func (in *Initializer) DeepCopy() *Initializer { - if in == nil { - return nil - } - out := new(Initializer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Initializers) DeepCopyInto(out *Initializers) { - *out = *in - if in.Pending != nil { - in, out := &in.Pending, &out.Pending - *out = make([]Initializer, len(*in)) - copy(*out, *in) - } - if in.Result != nil { - in, out := &in.Result, &out.Result - *out = new(Status) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializers. -func (in *Initializers) DeepCopy() *Initializers { - if in == nil { - return nil - } - out := new(Initializers) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InternalEvent) DeepCopyInto(out *InternalEvent) { *out = *in @@ -572,7 +528,7 @@ func (in *LabelSelectorRequirement) DeepCopy() *LabelSelectorRequirement { func (in *List) DeepCopyInto(out *List) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]runtime.RawExtension, len(*in)) @@ -604,6 +560,11 @@ func (in *List) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ListMeta) DeepCopyInto(out *ListMeta) { *out = *in + if in.RemainingItemCount != nil { + in, out := &in.RemainingItemCount, &out.RemainingItemCount + *out = new(int64) + **out = **in + } return } @@ -654,9 +615,9 @@ func (in *ManagedFieldsEntry) DeepCopyInto(out *ManagedFieldsEntry) { in, out := &in.Time, &out.Time *out = (*in).DeepCopy() } - if in.Fields != nil { - in, out := &in.Fields, &out.Fields - *out = new(Fields) + if in.FieldsV1 != nil { + in, out := &in.FieldsV1, &out.FieldsV1 + *out = new(FieldsV1) (*in).DeepCopyInto(*out) } return @@ -716,11 +677,6 @@ func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.Initializers != nil { - in, out := &in.Initializers, &out.Initializers - *out = new(Initializers) - (*in).DeepCopyInto(*out) - } if in.Finalizers != nil { in, out := &in.Finalizers, &out.Finalizers *out = make([]string, len(*in)) @@ -772,6 +728,65 @@ func (in *OwnerReference) DeepCopy() *OwnerReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadata. +func (in *PartialObjectMetadata) DeepCopy() *PartialObjectMetadata { + if in == nil { + return nil + } + out := new(PartialObjectMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PartialObjectMetadata) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PartialObjectMetadata, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadataList. +func (in *PartialObjectMetadataList) DeepCopy() *PartialObjectMetadataList { + if in == nil { + return nil + } + out := new(PartialObjectMetadataList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Patch) DeepCopyInto(out *Patch) { *out = *in @@ -890,7 +905,7 @@ func (in *ServerAddressByClientCIDR) DeepCopy() *ServerAddressByClientCIDR { func (in *Status) DeepCopyInto(out *Status) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Details != nil { in, out := &in.Details, &out.Details *out = new(StatusDetails) @@ -954,6 +969,108 @@ func (in *StatusDetails) DeepCopy() *StatusDetails { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Table) DeepCopyInto(out *Table) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.ColumnDefinitions != nil { + in, out := &in.ColumnDefinitions, &out.ColumnDefinitions + *out = make([]TableColumnDefinition, len(*in)) + copy(*out, *in) + } + if in.Rows != nil { + in, out := &in.Rows, &out.Rows + *out = make([]TableRow, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table. +func (in *Table) DeepCopy() *Table { + if in == nil { + return nil + } + out := new(Table) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Table) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableColumnDefinition) DeepCopyInto(out *TableColumnDefinition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableColumnDefinition. +func (in *TableColumnDefinition) DeepCopy() *TableColumnDefinition { + if in == nil { + return nil + } + out := new(TableColumnDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableOptions) DeepCopyInto(out *TableOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableOptions. +func (in *TableOptions) DeepCopy() *TableOptions { + if in == nil { + return nil + } + out := new(TableOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TableOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableRow) DeepCopyInto(out *TableRow) { + clone := in.DeepCopy() + *out = *clone + return +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableRowCondition) DeepCopyInto(out *TableRowCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableRowCondition. +func (in *TableRowCondition) DeepCopy() *TableRowCondition { + if in == nil { + return nil + } + out := new(TableRowCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time. func (in *Time) DeepCopy() *Time { if in == nil { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go index 3b2bedd92..2b7e8ca0b 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go @@ -15,30 +15,3 @@ limitations under the License. */ package v1beta1 - -import "k8s.io/apimachinery/pkg/runtime" - -func (in *TableRow) DeepCopy() *TableRow { - if in == nil { - return nil - } - - out := new(TableRow) - - if in.Cells != nil { - out.Cells = make([]interface{}, len(in.Cells)) - for i := range in.Cells { - out.Cells[i] = runtime.DeepCopyJSONValue(in.Cells[i]) - } - } - - if in.Conditions != nil { - out.Conditions = make([]TableRowCondition, len(in.Conditions)) - for i := range in.Conditions { - in.Conditions[i].DeepCopyInto(&out.Conditions[i]) - } - } - - in.Object.DeepCopyInto(&out.Object) - return out -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go index 47c03d69b..5fae30ae8 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go @@ -17,27 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto - - It has these top-level messages: - PartialObjectMetadata - PartialObjectMetadataList - TableOptions -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import strings "strings" -import reflect "reflect" + io "io" -import io "io" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -50,55 +44,71 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } -func (*PartialObjectMetadata) ProtoMessage() {} -func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } func (*PartialObjectMetadataList) ProtoMessage() {} func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptor_90ec10f86b91f9a8, []int{0} +} +func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialObjectMetadataList.Merge(m, src) +} +func (m *PartialObjectMetadataList) XXX_Size() int { + return m.Size() +} +func (m *PartialObjectMetadataList) XXX_DiscardUnknown() { + xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m) } -func (m *TableOptions) Reset() { *m = TableOptions{} } -func (*TableOptions) ProtoMessage() {} -func (*TableOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo func init() { - proto.RegisterType((*PartialObjectMetadata)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadata") proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadataList") - proto.RegisterType((*TableOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.TableOptions") } -func (m *PartialObjectMetadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptor_90ec10f86b91f9a8) } -func (m *PartialObjectMetadata) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - return i, nil +var fileDescriptor_90ec10f86b91f9a8 = []byte{ + // 321 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x41, 0x4b, 0xf3, 0x30, + 0x18, 0xc7, 0x9b, 0xf7, 0x65, 0x38, 0x3a, 0x04, 0xd9, 0x69, 0xee, 0x90, 0x0d, 0x4f, 0xf3, 0xb0, + 0x84, 0x0d, 0x11, 0xc1, 0xdb, 0x6e, 0x82, 0xa2, 0xec, 0x28, 0x1e, 0x4c, 0xbb, 0xc7, 0x2e, 0xd6, + 0x34, 0x25, 0x79, 0x3a, 0xf0, 0xe6, 0x47, 0xf0, 0x63, 0xed, 0xb8, 0xe3, 0x40, 0x18, 0xae, 0x7e, + 0x11, 0x49, 0x57, 0x45, 0xa6, 0x62, 0x6f, 0x79, 0xfe, 0xe1, 0xf7, 0xcb, 0x3f, 0x89, 0x3f, 0x8e, + 0x4f, 0x2c, 0x93, 0x9a, 0xc7, 0x59, 0x00, 0x26, 0x01, 0x04, 0xcb, 0x67, 0x90, 0x4c, 0xb4, 0xe1, + 0xe5, 0x86, 0x48, 0xa5, 0x12, 0xe1, 0x54, 0x26, 0x60, 0x1e, 0x79, 0x1a, 0x47, 0x2e, 0xb0, 0x5c, + 0x01, 0x0a, 0x3e, 0x1b, 0x04, 0x80, 0x62, 0xc0, 0x23, 0x48, 0xc0, 0x08, 0x84, 0x09, 0x4b, 0x8d, + 0x46, 0xdd, 0x3c, 0xdc, 0xa0, 0xec, 0x2b, 0xca, 0xd2, 0x38, 0x72, 0x81, 0x65, 0x0e, 0x65, 0x25, + 0xda, 0xee, 0x47, 0x12, 0xa7, 0x59, 0xc0, 0x42, 0xad, 0x78, 0xa4, 0x23, 0xcd, 0x0b, 0x43, 0x90, + 0xdd, 0x15, 0x53, 0x31, 0x14, 0xab, 0x8d, 0xb9, 0x7d, 0x54, 0xa5, 0xd4, 0x76, 0x9f, 0xf6, 0xaf, + 0x57, 0x31, 0x59, 0x82, 0x52, 0xc1, 0x37, 0xe0, 0xf8, 0x2f, 0xc0, 0x86, 0x53, 0x50, 0x62, 0x9b, + 0x3b, 0x78, 0x21, 0xfe, 0xfe, 0x95, 0x30, 0x28, 0xc5, 0xc3, 0x65, 0x70, 0x0f, 0x21, 0x5e, 0x00, + 0x8a, 0x89, 0x40, 0x71, 0x2e, 0x2d, 0x36, 0x6f, 0xfc, 0xba, 0x2a, 0xe7, 0xd6, 0xbf, 0x2e, 0xe9, + 0x35, 0x86, 0x8c, 0x55, 0x79, 0x29, 0xe6, 0x68, 0x67, 0x1a, 0xed, 0xcd, 0x57, 0x1d, 0x2f, 0x5f, + 0x75, 0xea, 0x1f, 0xc9, 0xf8, 0xd3, 0xd8, 0xbc, 0xf5, 0x6b, 0x12, 0x41, 0xd9, 0x16, 0xe9, 0xfe, + 0xef, 0x35, 0x86, 0xa7, 0xd5, 0xd4, 0x3f, 0xb6, 0x1d, 0xed, 0x96, 0xe7, 0xd4, 0xce, 0x9c, 0x71, + 0xbc, 0x11, 0x8f, 0xfa, 0xf3, 0x35, 0xf5, 0x16, 0x6b, 0xea, 0x2d, 0xd7, 0xd4, 0x7b, 0xca, 0x29, + 0x99, 0xe7, 0x94, 0x2c, 0x72, 0x4a, 0x96, 0x39, 0x25, 0xaf, 0x39, 0x25, 0xcf, 0x6f, 0xd4, 0xbb, + 0xde, 0x29, 0xbf, 0xf6, 0x3d, 0x00, 0x00, 0xff, 0xff, 0xc6, 0x7e, 0x00, 0x08, 0x5a, 0x02, 0x00, + 0x00, } func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -106,65 +116,57 @@ func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { } func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartialObjectMetadataList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil -} - -func (m *TableOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TableOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IncludeObject))) - i += copy(dAtA[i:], m.IncludeObject) - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *PartialObjectMetadata) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return base } - func (m *PartialObjectMetadataList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Items) > 0 { @@ -173,56 +175,29 @@ func (m *PartialObjectMetadataList) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - return n -} - -func (m *TableOptions) Size() (n int) { - var l int - _ = l - l = len(m.IncludeObject) + l = m.ListMeta.Size() n += 1 + l + sovGenerated(uint64(l)) return n } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *PartialObjectMetadata) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PartialObjectMetadata{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *PartialObjectMetadataList) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&PartialObjectMetadataList{`, - `Items:` + strings.Replace(fmt.Sprintf("%v", this.Items), "PartialObjectMetadata", "PartialObjectMetadata", 1) + `,`, - `}`, - }, "") - return s -} -func (this *TableOptions) String() string { - if this == nil { - return "nil" + repeatedStringForItems := "[]PartialObjectMetadata{" + for _, f := range this.Items { + repeatedStringForItems += fmt.Sprintf("%v", f) + "," } - s := strings.Join([]string{`&TableOptions{`, - `IncludeObject:` + fmt.Sprintf("%v", this.IncludeObject) + `,`, + repeatedStringForItems += "}" + s := strings.Join([]string{`&PartialObjectMetadataList{`, + `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -235,7 +210,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { +func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -250,7 +225,7 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -258,15 +233,15 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PartialObjectMetadata: wiretype end group for non-group") + return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PartialObjectMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -278,7 +253,7 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -287,66 +262,20 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, v1.PartialObjectMetadata{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -358,7 +287,7 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -367,11 +296,13 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &PartialObjectMetadata{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -384,83 +315,7 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TableOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TableOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TableOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeObject", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IncludeObject = IncludeObjectPolicy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -529,10 +384,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -561,6 +419,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -579,35 +440,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 375 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xcd, 0x0a, 0xd3, 0x40, - 0x10, 0xc7, 0xb3, 0x48, 0xd1, 0x6e, 0xed, 0x25, 0x22, 0xd4, 0x1e, 0x36, 0xa5, 0xa7, 0x0a, 0x76, - 0xd7, 0x16, 0x11, 0x8f, 0x92, 0x5b, 0x41, 0x69, 0x09, 0x9e, 0x3c, 0xb9, 0x49, 0xc6, 0x74, 0xcd, - 0xc7, 0x86, 0xec, 0xa6, 0xd0, 0x8b, 0xf8, 0x08, 0x3e, 0x56, 0x8f, 0x3d, 0xf6, 0x14, 0x6c, 0x7c, - 0x0b, 0x4f, 0x92, 0x0f, 0xec, 0x87, 0x15, 0x7b, 0x9b, 0xf9, 0x0f, 0xbf, 0x5f, 0x66, 0xb2, 0xd8, - 0x09, 0xdf, 0x28, 0x2a, 0x24, 0x0b, 0x73, 0x17, 0xb2, 0x04, 0x34, 0x28, 0xb6, 0x81, 0xc4, 0x97, - 0x19, 0x6b, 0x07, 0x3c, 0x15, 0x31, 0xf7, 0xd6, 0x22, 0x81, 0x6c, 0xcb, 0xd2, 0x30, 0xa8, 0x02, - 0xc5, 0x62, 0xd0, 0x9c, 0x6d, 0x66, 0x2e, 0x68, 0x3e, 0x63, 0x01, 0x24, 0x90, 0x71, 0x0d, 0x3e, - 0x4d, 0x33, 0xa9, 0xa5, 0xf9, 0xbc, 0x41, 0xe9, 0x39, 0x4a, 0xd3, 0x30, 0xa8, 0x02, 0x45, 0x2b, - 0x94, 0xb6, 0xe8, 0x70, 0x1a, 0x08, 0xbd, 0xce, 0x5d, 0xea, 0xc9, 0x98, 0x05, 0x32, 0x90, 0xac, - 0x36, 0xb8, 0xf9, 0xe7, 0xba, 0xab, 0x9b, 0xba, 0x6a, 0xcc, 0xc3, 0x57, 0xf7, 0x2c, 0x75, 0xbd, - 0xcf, 0xf0, 0x9f, 0xa7, 0x64, 0x79, 0xa2, 0x45, 0x0c, 0x7f, 0x01, 0xaf, 0xff, 0x07, 0x28, 0x6f, - 0x0d, 0x31, 0xbf, 0xe6, 0xc6, 0x5b, 0xfc, 0x74, 0xc5, 0x33, 0x2d, 0x78, 0xb4, 0x74, 0xbf, 0x80, - 0xa7, 0xdf, 0x83, 0xe6, 0x3e, 0xd7, 0xdc, 0xfc, 0x84, 0x1f, 0xc5, 0x6d, 0x3d, 0x40, 0x23, 0x34, - 0xe9, 0xcd, 0x5f, 0xd2, 0x7b, 0x7e, 0x12, 0x3d, 0x79, 0x6c, 0x73, 0x57, 0x58, 0x46, 0x59, 0x58, - 0xf8, 0x94, 0x39, 0x7f, 0xac, 0xe3, 0xaf, 0xf8, 0xd9, 0xcd, 0x4f, 0xbf, 0x13, 0x4a, 0x9b, 0x1c, - 0x77, 0x84, 0x86, 0x58, 0x0d, 0xd0, 0xe8, 0xc1, 0xa4, 0x37, 0x7f, 0x4b, 0xef, 0x7e, 0x20, 0x7a, - 0x53, 0x6a, 0x77, 0xcb, 0xc2, 0xea, 0x2c, 0x2a, 0xa5, 0xd3, 0x98, 0xc7, 0x2e, 0x7e, 0xfc, 0x81, - 0xbb, 0x11, 0x2c, 0x53, 0x2d, 0x64, 0xa2, 0x4c, 0x07, 0xf7, 0x45, 0xe2, 0x45, 0xb9, 0x0f, 0x0d, - 0x5a, 0x9f, 0xdd, 0xb5, 0x5f, 0xb4, 0x47, 0xf4, 0x17, 0xe7, 0xc3, 0x5f, 0x85, 0xf5, 0xe4, 0x22, - 0x58, 0xc9, 0x48, 0x78, 0x5b, 0xe7, 0x52, 0x61, 0x4f, 0x77, 0x47, 0x62, 0xec, 0x8f, 0xc4, 0x38, - 0x1c, 0x89, 0xf1, 0xad, 0x24, 0x68, 0x57, 0x12, 0xb4, 0x2f, 0x09, 0x3a, 0x94, 0x04, 0xfd, 0x28, - 0x09, 0xfa, 0xfe, 0x93, 0x18, 0x1f, 0x1f, 0xb6, 0xab, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xf3, - 0xe1, 0xde, 0x86, 0xdb, 0x02, 0x00, 0x00, -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto index 83be99790..19606666f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto @@ -28,30 +28,15 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; -// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients -// to get access to a particular ObjectMeta schema without knowing the details of the version. +// PartialObjectMetadataList contains a list of objects containing only their metadata. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message PartialObjectMetadata { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +message PartialObjectMetadataList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; -} + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2; -// PartialObjectMetadataList contains a list of objects containing only their metadata -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message PartialObjectMetadataList { // items contains each of the included items. - repeated PartialObjectMetadata items = 1; -} - -// TableOptions are used when a Table is requested by the caller. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message TableOptions { - // includeObject decides whether to include each object along with its columnar information. - // Specifying "None" will return no object, specifying "Object" will return the full object contents, and - // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind - // in version v1beta1 of the meta.k8s.io API group. - optional string includeObject = 1; + repeated k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1; } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go index d13254b41..108a0764e 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go @@ -39,6 +39,12 @@ var scheme = runtime.NewScheme() var ParameterCodec = runtime.NewParameterCodec(scheme) func init() { + if err := AddMetaToScheme(scheme); err != nil { + panic(err) + } +} + +func AddMetaToScheme(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Table{}, &TableOptions{}, @@ -46,11 +52,9 @@ func init() { &PartialObjectMetadataList{}, ) - if err := scheme.AddConversionFuncs( + return scheme.AddConversionFuncs( Convert_Slice_string_To_v1beta1_IncludeObjectPolicy, - ); err != nil { - panic(err) - } + ) // register manually. This usually goes through the SchemeBuilder, which we cannot use here. //scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go index 344c533e1..f16170a37 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go @@ -18,144 +18,67 @@ limitations under the License. package v1beta1 import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// TODO: Table does not generate to protobuf because of the interface{} - fix protobuf -// generation to support a meta type that can accept any valid JSON. - // Table is a tabular representation of a set of API resources. The server transforms the // object into a set of preferred columns for quickly reviewing the objects. -// +protobuf=false // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type Table struct { - v1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - v1.ListMeta `json:"metadata,omitempty"` - - // columnDefinitions describes each column in the returned items array. The number of cells per row - // will always match the number of column definitions. - ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"` - // rows is the list of items in the table. - Rows []TableRow `json:"rows"` -} +// +protobuf=false +type Table = v1.Table // TableColumnDefinition contains information about a column returned in the Table. // +protobuf=false -type TableColumnDefinition struct { - // name is a human readable name for the column. - Name string `json:"name"` - // type is an OpenAPI type definition for this column. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. - Type string `json:"type"` - // format is an optional OpenAPI type definition for this column. The 'name' format is applied - // to the primary identifier column to assist in clients identifying column is the resource name. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. - Format string `json:"format"` - // description is a human readable description of this column. - Description string `json:"description"` - // priority is an integer defining the relative importance of this column compared to others. Lower - // numbers are considered higher priority. Columns that may be omitted in limited space scenarios - // should be given a higher priority. - Priority int32 `json:"priority"` -} +type TableColumnDefinition = v1.TableColumnDefinition // TableRow is an individual row in a table. // +protobuf=false -type TableRow struct { - // cells will be as wide as headers and may contain strings, numbers (float64 or int64), booleans, simple - // maps, or lists, or null. See the type field of the column definition for a more detailed description. - Cells []interface{} `json:"cells"` - // conditions describe additional status of a row that are relevant for a human user. - // +optional - Conditions []TableRowCondition `json:"conditions,omitempty"` - // This field contains the requested additional information about each object based on the includeObject - // policy when requesting the Table. If "None", this field is empty, if "Object" this will be the - // default serialization of the object for the current API version, and if "Metadata" (the default) will - // contain the object metadata. Check the returned kind and apiVersion of the object before parsing. - // +optional - Object runtime.RawExtension `json:"object,omitempty"` -} +type TableRow = v1.TableRow // TableRowCondition allows a row to be marked with additional information. // +protobuf=false -type TableRowCondition struct { - // Type of row condition. - Type RowConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status ConditionStatus `json:"status"` - // (brief) machine readable reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // Human readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty"` -} +type TableRowCondition = v1.TableRowCondition -type RowConditionType string +type RowConditionType = v1.RowConditionType -// These are valid conditions of a row. This list is not exhaustive and new conditions may be -// included by other resources. -const ( - // RowCompleted means the underlying resource has reached completion and may be given less - // visual priority than other resources. - RowCompleted RowConditionType = "Completed" -) +type ConditionStatus = v1.ConditionStatus -type ConditionStatus string - -// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. -// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes -// can't decide if a resource is in the condition or not. In the future, we could add other -// intermediate conditions, e.g. ConditionDegraded. -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -// IncludeObjectPolicy controls which portion of the object is returned with a Table. -type IncludeObjectPolicy string - -const ( - // IncludeNone returns no object. - IncludeNone IncludeObjectPolicy = "None" - // IncludeMetadata serializes the object containing only its metadata field. - IncludeMetadata IncludeObjectPolicy = "Metadata" - // IncludeObject contains the full object. - IncludeObject IncludeObjectPolicy = "Object" -) +type IncludeObjectPolicy = v1.IncludeObjectPolicy // TableOptions are used when a Table is requested by the caller. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type TableOptions struct { - v1.TypeMeta `json:",inline"` - // includeObject decides whether to include each object along with its columnar information. - // Specifying "None" will return no object, specifying "Object" will return the full object contents, and - // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind - // in version v1beta1 of the meta.k8s.io API group. - IncludeObject IncludeObjectPolicy `json:"includeObject,omitempty" protobuf:"bytes,1,opt,name=includeObject,casttype=IncludeObjectPolicy"` -} +type TableOptions = v1.TableOptions // PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients // to get access to a particular ObjectMeta schema without knowing the details of the version. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type PartialObjectMetadata struct { - v1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` -} +type PartialObjectMetadata = v1.PartialObjectMetadata + +// IMPORTANT: PartialObjectMetadataList has different protobuf field ids in v1beta1 than +// v1 because ListMeta was accidentally omitted prior to 1.15. Therefore this type must +// remain independent of v1.PartialObjectMetadataList to preserve mappings. -// PartialObjectMetadataList contains a list of objects containing only their metadata +// PartialObjectMetadataList contains a list of objects containing only their metadata. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type PartialObjectMetadataList struct { v1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,2,opt,name=metadata"` // items contains each of the included items. - Items []*PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"` + Items []v1.PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"` } + +const ( + RowCompleted = v1.RowCompleted + + ConditionTrue = v1.ConditionTrue + ConditionFalse = v1.ConditionFalse + ConditionUnknown = v1.ConditionUnknown + + IncludeNone = v1.IncludeNone + IncludeMetadata = v1.IncludeMetadata + IncludeObject = v1.IncludeObject +) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go index 7394535d9..ef7e7c1e9 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go @@ -27,78 +27,14 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_PartialObjectMetadata = map[string]string{ - "": "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", -} - -func (PartialObjectMetadata) SwaggerDoc() map[string]string { - return map_PartialObjectMetadata -} - var map_PartialObjectMetadataList = map[string]string{ - "": "PartialObjectMetadataList contains a list of objects containing only their metadata", - "items": "items contains each of the included items.", + "": "PartialObjectMetadataList contains a list of objects containing only their metadata.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "items contains each of the included items.", } func (PartialObjectMetadataList) SwaggerDoc() map[string]string { return map_PartialObjectMetadataList } -var map_Table = map[string]string{ - "": "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "columnDefinitions": "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.", - "rows": "rows is the list of items in the table.", -} - -func (Table) SwaggerDoc() map[string]string { - return map_Table -} - -var map_TableColumnDefinition = map[string]string{ - "": "TableColumnDefinition contains information about a column returned in the Table.", - "name": "name is a human readable name for the column.", - "type": "type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", - "format": "format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", - "description": "description is a human readable description of this column.", - "priority": "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.", -} - -func (TableColumnDefinition) SwaggerDoc() map[string]string { - return map_TableColumnDefinition -} - -var map_TableOptions = map[string]string{ - "": "TableOptions are used when a Table is requested by the caller.", - "includeObject": "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.", -} - -func (TableOptions) SwaggerDoc() map[string]string { - return map_TableOptions -} - -var map_TableRow = map[string]string{ - "": "TableRow is an individual row in a table.", - "cells": "cells will be as wide as headers and may contain strings, numbers (float64 or int64), booleans, simple maps, or lists, or null. See the type field of the column definition for a more detailed description.", - "conditions": "conditions describe additional status of a row that are relevant for a human user.", - "object": "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing.", -} - -func (TableRow) SwaggerDoc() map[string]string { - return map_TableRow -} - -var map_TableRowCondition = map[string]string{ - "": "TableRowCondition allows a row to be marked with additional information.", - "type": "Type of row condition.", - "status": "Status of the condition, one of True, False, Unknown.", - "reason": "(brief) machine readable reason for the condition's last transition.", - "message": "Human readable message indicating details about last transition.", -} - -func (TableRowCondition) SwaggerDoc() map[string]string { - return map_TableRowCondition -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go index b77db1b15..89053b981 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go @@ -21,48 +21,20 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadata. -func (in *PartialObjectMetadata) DeepCopy() *PartialObjectMetadata { - if in == nil { - return nil - } - out := new(PartialObjectMetadata) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PartialObjectMetadata) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) { *out = *in out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]*PartialObjectMetadata, len(*in)) + *out = make([]v1.PartialObjectMetadata, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(PartialObjectMetadata) - (*in).DeepCopyInto(*out) - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } return @@ -85,105 +57,3 @@ func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object { } return nil } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Table) DeepCopyInto(out *Table) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.ColumnDefinitions != nil { - in, out := &in.ColumnDefinitions, &out.ColumnDefinitions - *out = make([]TableColumnDefinition, len(*in)) - copy(*out, *in) - } - if in.Rows != nil { - in, out := &in.Rows, &out.Rows - *out = make([]TableRow, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table. -func (in *Table) DeepCopy() *Table { - if in == nil { - return nil - } - out := new(Table) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Table) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableColumnDefinition) DeepCopyInto(out *TableColumnDefinition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableColumnDefinition. -func (in *TableColumnDefinition) DeepCopy() *TableColumnDefinition { - if in == nil { - return nil - } - out := new(TableColumnDefinition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableOptions) DeepCopyInto(out *TableOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableOptions. -func (in *TableOptions) DeepCopy() *TableOptions { - if in == nil { - return nil - } - out := new(TableOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TableOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableRow) DeepCopyInto(out *TableRow) { - clone := in.DeepCopy() - *out = *clone - return -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableRowCondition) DeepCopyInto(out *TableRowCondition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableRowCondition. -func (in *TableRowCondition) DeepCopy() *TableRowCondition { - if in == nil { - return nil - } - out := new(TableRowCondition) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go index b3804aa42..2f0dd0074 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go @@ -54,10 +54,6 @@ func jsonTag(field reflect.StructField) (string, bool) { return tag, omitempty } -func formatValue(value interface{}) string { - return fmt.Sprintf("%v", value) -} - func isPointerKind(kind reflect.Kind) bool { return kind == reflect.Ptr } diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go index 32db4d96f..abf3ace6f 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/labels.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go @@ -172,7 +172,7 @@ func ConvertSelectorToLabelsMap(selector string) (Set, error) { return labelsMap, err } value := strings.TrimSpace(l[1]) - if err := validateLabelValue(value); err != nil { + if err := validateLabelValue(key, value); err != nil { return labelsMap, err } labelsMap[key] = value diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index f5a088893..9be9e57d3 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -162,7 +162,7 @@ func NewRequirement(key string, op selection.Operator, vals []string) (*Requirem } for i := range vals { - if err := validateLabelValue(vals[i]); err != nil { + if err := validateLabelValue(key, vals[i]); err != nil { return nil, err } } @@ -837,9 +837,9 @@ func validateLabelKey(k string) error { return nil } -func validateLabelValue(v string) error { +func validateLabelValue(k, v string) error { if errs := validation.IsValidLabelValue(v); len(errs) != 0 { - return fmt.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; ")) + return fmt.Errorf("invalid label value: %q: at key: %q: %s", v, k, strings.Join(errs, "; ")) } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go index 80343081f..b3e8a53b3 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -94,7 +94,7 @@ func parseBool(key string) bool { } value, err := strconv.ParseBool(key) if err != nil { - utilruntime.HandleError(fmt.Errorf("Couldn't parse '%s' as bool for unstructured mismatch detection", key)) + utilruntime.HandleError(fmt.Errorf("couldn't parse '%s' as bool for unstructured mismatch detection", key)) } return value } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/error.go b/vendor/k8s.io/apimachinery/pkg/runtime/error.go index 322b0313d..be0c5edc8 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/error.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/error.go @@ -120,3 +120,32 @@ func IsMissingVersion(err error) bool { _, ok := err.(*missingVersionErr) return ok } + +// strictDecodingError is a base error type that is returned by a strict Decoder such +// as UniversalStrictDecoder. +type strictDecodingError struct { + message string + data string +} + +// NewStrictDecodingError creates a new strictDecodingError object. +func NewStrictDecodingError(message string, data string) error { + return &strictDecodingError{ + message: message, + data: data, + } +} + +func (e *strictDecodingError) Error() string { + return fmt.Sprintf("strict decoder error for %s: %s", e.data, e.message) +} + +// IsStrictDecodingError returns true if the error indicates that the provided object +// strictness violations. +func IsStrictDecodingError(err error) bool { + if err == nil { + return false + } + _, ok := err.(*strictDecodingError) + return ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go index 9b15989c8..af2f076b8 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -17,27 +17,19 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto -/* - Package runtime is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto - - It has these top-level messages: - RawExtension - TypeMeta - Unknown -*/ package runtime -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import strings "strings" -import reflect "reflect" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import io "io" + proto "github.com/gogo/protobuf/proto" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -50,27 +42,132 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *RawExtension) Reset() { *m = RawExtension{} } -func (*RawExtension) ProtoMessage() {} -func (*RawExtension) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *RawExtension) Reset() { *m = RawExtension{} } +func (*RawExtension) ProtoMessage() {} +func (*RawExtension) Descriptor() ([]byte, []int) { + return fileDescriptor_9d3c45d7f546725c, []int{0} +} +func (m *RawExtension) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RawExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RawExtension) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawExtension.Merge(m, src) +} +func (m *RawExtension) XXX_Size() int { + return m.Size() +} +func (m *RawExtension) XXX_DiscardUnknown() { + xxx_messageInfo_RawExtension.DiscardUnknown(m) +} -func (m *TypeMeta) Reset() { *m = TypeMeta{} } -func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_RawExtension proto.InternalMessageInfo -func (m *Unknown) Reset() { *m = Unknown{} } -func (*Unknown) ProtoMessage() {} -func (*Unknown) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_9d3c45d7f546725c, []int{1} +} +func (m *TypeMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypeMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeMeta.Merge(m, src) +} +func (m *TypeMeta) XXX_Size() int { + return m.Size() +} +func (m *TypeMeta) XXX_DiscardUnknown() { + xxx_messageInfo_TypeMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_TypeMeta proto.InternalMessageInfo + +func (m *Unknown) Reset() { *m = Unknown{} } +func (*Unknown) ProtoMessage() {} +func (*Unknown) Descriptor() ([]byte, []int) { + return fileDescriptor_9d3c45d7f546725c, []int{2} +} +func (m *Unknown) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Unknown) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Unknown) XXX_Merge(src proto.Message) { + xxx_messageInfo_Unknown.Merge(m, src) +} +func (m *Unknown) XXX_Size() int { + return m.Size() +} +func (m *Unknown) XXX_DiscardUnknown() { + xxx_messageInfo_Unknown.DiscardUnknown(m) +} + +var xxx_messageInfo_Unknown proto.InternalMessageInfo func init() { proto.RegisterType((*RawExtension)(nil), "k8s.io.apimachinery.pkg.runtime.RawExtension") proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.runtime.TypeMeta") proto.RegisterType((*Unknown)(nil), "k8s.io.apimachinery.pkg.runtime.Unknown") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptor_9d3c45d7f546725c) +} + +var fileDescriptor_9d3c45d7f546725c = []byte{ + // 378 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x4f, 0xab, 0x13, 0x31, + 0x14, 0xc5, 0x27, 0xaf, 0x85, 0x3e, 0xd3, 0xc2, 0x93, 0xb8, 0x70, 0x74, 0x91, 0x79, 0x74, 0xe5, + 0x5b, 0xbc, 0x04, 0x1e, 0x08, 0x6e, 0x3b, 0xa5, 0xa0, 0x88, 0x20, 0xc1, 0x3f, 0xe0, 0xca, 0x74, + 0x26, 0x4e, 0xc3, 0xd0, 0x9b, 0x21, 0xcd, 0x38, 0x76, 0xe7, 0x47, 0xf0, 0x63, 0x75, 0xd9, 0x65, + 0x57, 0xc5, 0x8e, 0x1f, 0xc2, 0xad, 0x34, 0x4d, 0x6b, 0xd5, 0x85, 0xbb, 0xe4, 0x9e, 0xf3, 0x3b, + 0xf7, 0x1e, 0xfc, 0xbc, 0x7c, 0xb6, 0x60, 0xda, 0xf0, 0xb2, 0x9e, 0x2a, 0x0b, 0xca, 0xa9, 0x05, + 0xff, 0xac, 0x20, 0x37, 0x96, 0x07, 0x41, 0x56, 0x7a, 0x2e, 0xb3, 0x99, 0x06, 0x65, 0x97, 0xbc, + 0x2a, 0x0b, 0x6e, 0x6b, 0x70, 0x7a, 0xae, 0x78, 0xa1, 0x40, 0x59, 0xe9, 0x54, 0xce, 0x2a, 0x6b, + 0x9c, 0x21, 0xc9, 0x01, 0x60, 0xe7, 0x00, 0xab, 0xca, 0x82, 0x05, 0xe0, 0xf1, 0x6d, 0xa1, 0xdd, + 0xac, 0x9e, 0xb2, 0xcc, 0xcc, 0x79, 0x61, 0x0a, 0xc3, 0x3d, 0x37, 0xad, 0x3f, 0xf9, 0x9f, 0xff, + 0xf8, 0xd7, 0x21, 0x6f, 0x78, 0x83, 0x07, 0x42, 0x36, 0x93, 0x2f, 0x4e, 0xc1, 0x42, 0x1b, 0x20, + 0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0xd7, 0xe8, 0xc9, 0x20, 0xed, 0xb5, 0xdb, 0xa4, 0x23, 0x64, + 0x23, 0xf6, 0xb3, 0xe1, 0x47, 0x7c, 0xf9, 0x66, 0x59, 0xa9, 0x57, 0xca, 0x49, 0x72, 0x87, 0xb1, + 0xac, 0xf4, 0x3b, 0x65, 0xf7, 0x90, 0x77, 0xdf, 0x4b, 0xc9, 0x6a, 0x9b, 0x44, 0xed, 0x36, 0xc1, + 0xa3, 0xd7, 0x2f, 0x82, 0x22, 0xce, 0x5c, 0xe4, 0x1a, 0x77, 0x4b, 0x0d, 0x79, 0x7c, 0xe1, 0xdd, + 0x83, 0xe0, 0xee, 0xbe, 0xd4, 0x90, 0x0b, 0xaf, 0x0c, 0x7f, 0x22, 0xdc, 0x7b, 0x0b, 0x25, 0x98, + 0x06, 0xc8, 0x7b, 0x7c, 0xe9, 0xc2, 0x36, 0x9f, 0xdf, 0xbf, 0xbb, 0x61, 0xff, 0xe9, 0xce, 0x8e, + 0xe7, 0xa5, 0xf7, 0x43, 0xf8, 0xe9, 0x60, 0x71, 0x0a, 0x3b, 0x36, 0xbc, 0xf8, 0xb7, 0x21, 0x19, + 0xe1, 0xab, 0xcc, 0x80, 0x53, 0xe0, 0x26, 0x90, 0x99, 0x5c, 0x43, 0x11, 0x77, 0xfc, 0xb1, 0x0f, + 0x43, 0xde, 0xd5, 0xf8, 0x4f, 0x59, 0xfc, 0xed, 0x27, 0x4f, 0x71, 0x3f, 0x8c, 0xf6, 0xab, 0xe3, + 0xae, 0xc7, 0x1f, 0x04, 0xbc, 0x3f, 0xfe, 0x2d, 0x89, 0x73, 0x5f, 0x7a, 0xbb, 0xda, 0xd1, 0x68, + 0xbd, 0xa3, 0xd1, 0x66, 0x47, 0xa3, 0xaf, 0x2d, 0x45, 0xab, 0x96, 0xa2, 0x75, 0x4b, 0xd1, 0xa6, + 0xa5, 0xe8, 0x7b, 0x4b, 0xd1, 0xb7, 0x1f, 0x34, 0xfa, 0xd0, 0x0b, 0x45, 0x7f, 0x05, 0x00, 0x00, + 0xff, 0xff, 0xe3, 0x33, 0x18, 0x0b, 0x50, 0x02, 0x00, 0x00, +} + func (m *RawExtension) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -78,23 +175,29 @@ func (m *RawExtension) Marshal() (dAtA []byte, err error) { } func (m *RawExtension) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RawExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.Raw != nil { - dAtA[i] = 0xa - i++ + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) - i += copy(dAtA[i:], m.Raw) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *TypeMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -102,25 +205,32 @@ func (m *TypeMeta) Marshal() (dAtA []byte, err error) { } func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TypeMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x12 - i++ + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Unknown) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -128,45 +238,60 @@ func (m *Unknown) Marshal() (dAtA []byte, err error) { } func (m *Unknown) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Unknown) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TypeMeta.Size())) - n1, err := m.TypeMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 + i -= len(m.ContentType) + copy(dAtA[i:], m.ContentType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentType))) + i-- + dAtA[i] = 0x22 + i -= len(m.ContentEncoding) + copy(dAtA[i:], m.ContentEncoding) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentEncoding))) + i-- + dAtA[i] = 0x1a if m.Raw != nil { - dAtA[i] = 0x12 - i++ + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) - i += copy(dAtA[i:], m.Raw) + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentEncoding))) - i += copy(dAtA[i:], m.ContentEncoding) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentType))) - i += copy(dAtA[i:], m.ContentType) - return i, nil + { + size, err := m.TypeMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *RawExtension) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Raw != nil { @@ -177,6 +302,9 @@ func (m *RawExtension) Size() (n int) { } func (m *TypeMeta) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.APIVersion) @@ -187,6 +315,9 @@ func (m *TypeMeta) Size() (n int) { } func (m *Unknown) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.TypeMeta.Size() @@ -203,14 +334,7 @@ func (m *Unknown) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -272,7 +396,7 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -300,7 +424,7 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -309,6 +433,9 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -326,6 +453,9 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -353,7 +483,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -381,7 +511,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -391,6 +521,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -410,7 +543,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -420,6 +553,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -434,6 +570,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -461,7 +600,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -489,7 +628,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -498,6 +637,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -519,7 +661,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -528,6 +670,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -550,7 +695,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -560,6 +705,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -579,7 +727,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -589,6 +737,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -603,6 +754,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -669,10 +823,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -701,6 +858,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -719,35 +879,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 378 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x4f, 0xab, 0x13, 0x31, - 0x14, 0xc5, 0x27, 0xaf, 0x85, 0x3e, 0xd3, 0xc2, 0x93, 0xb8, 0x70, 0x74, 0x91, 0x79, 0x74, 0xe5, - 0x5b, 0xbc, 0x04, 0x1e, 0x08, 0x6e, 0x3b, 0xa5, 0xa0, 0x88, 0x20, 0xc1, 0x3f, 0xe0, 0xca, 0x74, - 0x26, 0x4e, 0xc3, 0xd0, 0x9b, 0x21, 0xcd, 0x38, 0x76, 0xe7, 0x47, 0xf0, 0x63, 0x75, 0xd9, 0x65, - 0x57, 0xc5, 0x8e, 0x1f, 0xc2, 0xad, 0x34, 0x4d, 0x6b, 0xd5, 0x85, 0xbb, 0xe4, 0x9e, 0xf3, 0x3b, - 0xf7, 0x1e, 0xfc, 0xbc, 0x7c, 0xb6, 0x60, 0xda, 0xf0, 0xb2, 0x9e, 0x2a, 0x0b, 0xca, 0xa9, 0x05, - 0xff, 0xac, 0x20, 0x37, 0x96, 0x07, 0x41, 0x56, 0x7a, 0x2e, 0xb3, 0x99, 0x06, 0x65, 0x97, 0xbc, - 0x2a, 0x0b, 0x6e, 0x6b, 0x70, 0x7a, 0xae, 0x78, 0xa1, 0x40, 0x59, 0xe9, 0x54, 0xce, 0x2a, 0x6b, - 0x9c, 0x21, 0xc9, 0x01, 0x60, 0xe7, 0x00, 0xab, 0xca, 0x82, 0x05, 0xe0, 0xf1, 0x6d, 0xa1, 0xdd, - 0xac, 0x9e, 0xb2, 0xcc, 0xcc, 0x79, 0x61, 0x0a, 0xc3, 0x3d, 0x37, 0xad, 0x3f, 0xf9, 0x9f, 0xff, - 0xf8, 0xd7, 0x21, 0x6f, 0x78, 0x83, 0x07, 0x42, 0x36, 0x93, 0x2f, 0x4e, 0xc1, 0x42, 0x1b, 0x20, - 0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0xd7, 0xe8, 0xc9, 0x20, 0xed, 0xb5, 0xdb, 0xa4, 0x23, 0x64, - 0x23, 0xf6, 0xb3, 0xe1, 0x47, 0x7c, 0xf9, 0x66, 0x59, 0xa9, 0x57, 0xca, 0x49, 0x72, 0x87, 0xb1, - 0xac, 0xf4, 0x3b, 0x65, 0xf7, 0x90, 0x77, 0xdf, 0x4b, 0xc9, 0x6a, 0x9b, 0x44, 0xed, 0x36, 0xc1, - 0xa3, 0xd7, 0x2f, 0x82, 0x22, 0xce, 0x5c, 0xe4, 0x1a, 0x77, 0x4b, 0x0d, 0x79, 0x7c, 0xe1, 0xdd, - 0x83, 0xe0, 0xee, 0xbe, 0xd4, 0x90, 0x0b, 0xaf, 0x0c, 0x7f, 0x22, 0xdc, 0x7b, 0x0b, 0x25, 0x98, - 0x06, 0xc8, 0x7b, 0x7c, 0xe9, 0xc2, 0x36, 0x9f, 0xdf, 0xbf, 0xbb, 0x61, 0xff, 0xe9, 0xce, 0x8e, - 0xe7, 0xa5, 0xf7, 0x43, 0xf8, 0xe9, 0x60, 0x71, 0x0a, 0x3b, 0x36, 0xbc, 0xf8, 0xb7, 0x21, 0x19, - 0xe1, 0xab, 0xcc, 0x80, 0x53, 0xe0, 0x26, 0x90, 0x99, 0x5c, 0x43, 0x11, 0x77, 0xfc, 0xb1, 0x0f, - 0x43, 0xde, 0xd5, 0xf8, 0x4f, 0x59, 0xfc, 0xed, 0x27, 0x4f, 0x71, 0x3f, 0x8c, 0xf6, 0xab, 0xe3, - 0xae, 0xc7, 0x1f, 0x04, 0xbc, 0x3f, 0xfe, 0x2d, 0x89, 0x73, 0x5f, 0x7a, 0xbb, 0xda, 0xd1, 0x68, - 0xbd, 0xa3, 0xd1, 0x66, 0x47, 0xa3, 0xaf, 0x2d, 0x45, 0xab, 0x96, 0xa2, 0x75, 0x4b, 0xd1, 0xa6, - 0xa5, 0xe8, 0x7b, 0x4b, 0xd1, 0xb7, 0x1f, 0x34, 0xfa, 0xd0, 0x0b, 0x45, 0x7f, 0x05, 0x00, 0x00, - 0xff, 0xff, 0xe3, 0x33, 0x18, 0x0b, 0x50, 0x02, 0x00, 0x00, -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go index 33f11eb10..7bd1a3a6a 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go @@ -51,7 +51,7 @@ func UnsafeObjectConvertor(scheme *Scheme) ObjectConvertor { func SetField(src interface{}, v reflect.Value, fieldName string) error { field := v.FieldByName(fieldName) if !field.IsValid() { - return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface()) } srcValue := reflect.ValueOf(src) if srcValue.Type().AssignableTo(field.Type()) { @@ -70,7 +70,7 @@ func SetField(src interface{}, v reflect.Value, fieldName string) error { func Field(v reflect.Value, fieldName string, dest interface{}) error { field := v.FieldByName(fieldName) if !field.IsValid() { - return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface()) } destValue, err := conversion.EnforcePtr(dest) if err != nil { @@ -93,7 +93,7 @@ func Field(v reflect.Value, fieldName string, dest interface{}) error { func FieldPtr(v reflect.Value, fieldName string, dest interface{}) error { field := v.FieldByName(fieldName) if !field.IsValid() { - return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface()) } v, err := conversion.EnforcePtr(dest) if err != nil { @@ -210,3 +210,50 @@ type defaultFramer struct{} func (defaultFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { return r } func (defaultFramer) NewFrameWriter(w io.Writer) io.Writer { return w } + +// WithVersionEncoder serializes an object and ensures the GVK is set. +type WithVersionEncoder struct { + Version GroupVersioner + Encoder + ObjectTyper +} + +// Encode does not do conversion. It sets the gvk during serialization. +func (e WithVersionEncoder) Encode(obj Object, stream io.Writer) error { + gvks, _, err := e.ObjectTyper.ObjectKinds(obj) + if err != nil { + if IsNotRegisteredError(err) { + return e.Encoder.Encode(obj, stream) + } + return err + } + kind := obj.GetObjectKind() + oldGVK := kind.GroupVersionKind() + gvk := gvks[0] + if e.Version != nil { + preferredGVK, ok := e.Version.KindForGroupVersionKinds(gvks) + if ok { + gvk = preferredGVK + } + } + kind.SetGroupVersionKind(gvk) + err = e.Encoder.Encode(obj, stream) + kind.SetGroupVersionKind(oldGVK) + return err +} + +// WithoutVersionDecoder clears the group version kind of a deserialized object. +type WithoutVersionDecoder struct { + Decoder +} + +// Decode does not do conversion. It removes the gvk during deserialization. +func (d WithoutVersionDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { + obj, gvk, err := d.Decoder.Decode(data, defaults, into) + if obj != nil { + kind := obj.GetObjectKind() + // clearing the gvk is just a convention of a codec + kind.SetGroupVersionKind(schema.GroupVersionKind{}) + } + return obj, gvk, err +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index 699ff13e0..bded5bf15 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -91,6 +91,10 @@ type Framer interface { type SerializerInfo struct { // MediaType is the value that represents this serializer over the wire. MediaType string + // MediaTypeType is the first part of the MediaType ("application" in "application/json"). + MediaTypeType string + // MediaTypeSubType is the second part of the MediaType ("json" in "application/json"). + MediaTypeSubType string // EncodesAsText indicates this serializer can be encoded to UTF-8 safely. EncodesAsText bool // Serializer is the individual object serializer for this media type. @@ -206,6 +210,25 @@ type ObjectCreater interface { New(kind schema.GroupVersionKind) (out Object, err error) } +// EquivalentResourceMapper provides information about resources that address the same underlying data as a specified resource +type EquivalentResourceMapper interface { + // EquivalentResourcesFor returns a list of resources that address the same underlying data as resource. + // If subresource is specified, only equivalent resources which also have the same subresource are included. + // The specified resource can be included in the returned list. + EquivalentResourcesFor(resource schema.GroupVersionResource, subresource string) []schema.GroupVersionResource + // KindFor returns the kind expected by the specified resource[/subresource]. + // A zero value is returned if the kind is unknown. + KindFor(resource schema.GroupVersionResource, subresource string) schema.GroupVersionKind +} + +// EquivalentResourceRegistry provides an EquivalentResourceMapper interface, +// and allows registering known resource[/subresource] -> kind +type EquivalentResourceRegistry interface { + EquivalentResourceMapper + // RegisterKindFor registers the existence of the specified resource[/subresource] along with its expected kind. + RegisterKindFor(resource schema.GroupVersionResource, subresource string, kind schema.GroupVersionKind) +} + // ResourceVersioner provides methods for setting and retrieving // the resource version from an API object. type ResourceVersioner interface { @@ -237,6 +260,9 @@ type Object interface { // to JSON allowed. type Unstructured interface { Object + // NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. + // This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. + NewEmptyInstance() Unstructured // UnstructuredContent returns a non-nil map with this object's contents. Values may be // []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to // and from JSON. SetUnstructuredContent should be used to mutate the contents. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go b/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go new file mode 100644 index 000000000..3ff84611a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type equivalentResourceRegistry struct { + // keyFunc computes a key for the specified resource (this allows honoring colocated resources across API groups). + // if null, or if "" is returned, resource.String() is used as the key + keyFunc func(resource schema.GroupResource) string + // resources maps key -> subresource -> equivalent resources (subresource is not included in the returned resources). + // main resources are stored with subresource="". + resources map[string]map[string][]schema.GroupVersionResource + // kinds maps resource -> subresource -> kind + kinds map[schema.GroupVersionResource]map[string]schema.GroupVersionKind + // keys caches the computed key for each GroupResource + keys map[schema.GroupResource]string + + mutex sync.RWMutex +} + +var _ EquivalentResourceMapper = (*equivalentResourceRegistry)(nil) +var _ EquivalentResourceRegistry = (*equivalentResourceRegistry)(nil) + +// NewEquivalentResourceRegistry creates a resource registry that considers all versions of a GroupResource to be equivalent. +func NewEquivalentResourceRegistry() EquivalentResourceRegistry { + return &equivalentResourceRegistry{} +} + +// NewEquivalentResourceRegistryWithIdentity creates a resource mapper with a custom identity function. +// If "" is returned by the function, GroupResource#String is used as the identity. +// GroupResources with the same identity string are considered equivalent. +func NewEquivalentResourceRegistryWithIdentity(keyFunc func(schema.GroupResource) string) EquivalentResourceRegistry { + return &equivalentResourceRegistry{keyFunc: keyFunc} +} + +func (r *equivalentResourceRegistry) EquivalentResourcesFor(resource schema.GroupVersionResource, subresource string) []schema.GroupVersionResource { + r.mutex.RLock() + defer r.mutex.RUnlock() + return r.resources[r.keys[resource.GroupResource()]][subresource] +} +func (r *equivalentResourceRegistry) KindFor(resource schema.GroupVersionResource, subresource string) schema.GroupVersionKind { + r.mutex.RLock() + defer r.mutex.RUnlock() + return r.kinds[resource][subresource] +} +func (r *equivalentResourceRegistry) RegisterKindFor(resource schema.GroupVersionResource, subresource string, kind schema.GroupVersionKind) { + r.mutex.Lock() + defer r.mutex.Unlock() + if r.kinds == nil { + r.kinds = map[schema.GroupVersionResource]map[string]schema.GroupVersionKind{} + } + if r.kinds[resource] == nil { + r.kinds[resource] = map[string]schema.GroupVersionKind{} + } + r.kinds[resource][subresource] = kind + + // get the shared key of the parent resource + key := "" + gr := resource.GroupResource() + if r.keyFunc != nil { + key = r.keyFunc(gr) + } + if key == "" { + key = gr.String() + } + + if r.keys == nil { + r.keys = map[schema.GroupResource]string{} + } + r.keys[gr] = key + + if r.resources == nil { + r.resources = map[string]map[string][]schema.GroupVersionResource{} + } + if r.resources[key] == nil { + r.resources[key] = map[string][]schema.GroupVersionResource{} + } + r.resources[key][subresource] = append(r.resources[key][subresource], resource) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go index 28a61d5fb..a7276649f 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go @@ -17,19 +17,15 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto -/* -Package schema is a generated protocol buffer package. +package schema -It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto +import ( + fmt "fmt" -It has these top-level messages: -*/ -package schema + math "math" -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" + proto "github.com/gogo/protobuf/proto" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -43,10 +39,10 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptor_0462724132518e0d) } -var fileDescriptorGenerated = []byte{ +var fileDescriptor_0462724132518e0d = []byte{ // 185 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0xcc, 0xaf, 0x6e, 0xc3, 0x30, 0x10, 0xc7, 0x71, 0x9b, 0x0c, 0x0c, 0x0e, 0x0e, 0x1c, 0x1c, 0xda, 0x7c, 0x74, 0xb8, 0x2f, 0x50, diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index 65f451124..f21b0ef19 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -17,9 +17,13 @@ limitations under the License. package serializer import ( + "mime" + "strings" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/apimachinery/pkg/runtime/serializer/protobuf" "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" "k8s.io/apimachinery/pkg/runtime/serializer/versioning" ) @@ -44,30 +48,53 @@ type serializerType struct { StreamSerializer runtime.Serializer } -func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory) []serializerType { - jsonSerializer := json.NewSerializer(mf, scheme, scheme, false) - jsonPrettySerializer := json.NewSerializer(mf, scheme, scheme, true) - yamlSerializer := json.NewYAMLSerializer(mf, scheme, scheme) +func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []serializerType { + jsonSerializer := json.NewSerializerWithOptions( + mf, scheme, scheme, + json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict}, + ) + jsonSerializerType := serializerType{ + AcceptContentTypes: []string{runtime.ContentTypeJSON}, + ContentType: runtime.ContentTypeJSON, + FileExtensions: []string{"json"}, + EncodesAsText: true, + Serializer: jsonSerializer, + + Framer: json.Framer, + StreamSerializer: jsonSerializer, + } + if options.Pretty { + jsonSerializerType.PrettySerializer = json.NewSerializerWithOptions( + mf, scheme, scheme, + json.SerializerOptions{Yaml: false, Pretty: true, Strict: options.Strict}, + ) + } - serializers := []serializerType{ - { - AcceptContentTypes: []string{"application/json"}, - ContentType: "application/json", - FileExtensions: []string{"json"}, - EncodesAsText: true, - Serializer: jsonSerializer, - PrettySerializer: jsonPrettySerializer, + yamlSerializer := json.NewSerializerWithOptions( + mf, scheme, scheme, + json.SerializerOptions{Yaml: true, Pretty: false, Strict: options.Strict}, + ) + protoSerializer := protobuf.NewSerializer(scheme, scheme) + protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme) - Framer: json.Framer, - StreamSerializer: jsonSerializer, - }, + serializers := []serializerType{ + jsonSerializerType, { - AcceptContentTypes: []string{"application/yaml"}, - ContentType: "application/yaml", + AcceptContentTypes: []string{runtime.ContentTypeYAML}, + ContentType: runtime.ContentTypeYAML, FileExtensions: []string{"yaml"}, EncodesAsText: true, Serializer: yamlSerializer, }, + { + AcceptContentTypes: []string{runtime.ContentTypeProtobuf}, + ContentType: runtime.ContentTypeProtobuf, + FileExtensions: []string{"pb"}, + Serializer: protoSerializer, + + Framer: protobuf.LengthDelimitedFramer, + StreamSerializer: protoRawSerializer, + }, } for _, fn := range serializerExtensions { @@ -89,14 +116,56 @@ type CodecFactory struct { legacySerializer runtime.Serializer } +// CodecFactoryOptions holds the options for configuring CodecFactory behavior +type CodecFactoryOptions struct { + // Strict configures all serializers in strict mode + Strict bool + // Pretty includes a pretty serializer along with the non-pretty one + Pretty bool +} + +// CodecFactoryOptionsMutator takes a pointer to an options struct and then modifies it. +// Functions implementing this type can be passed to the NewCodecFactory() constructor. +type CodecFactoryOptionsMutator func(*CodecFactoryOptions) + +// EnablePretty enables including a pretty serializer along with the non-pretty one +func EnablePretty(options *CodecFactoryOptions) { + options.Pretty = true +} + +// DisablePretty disables including a pretty serializer along with the non-pretty one +func DisablePretty(options *CodecFactoryOptions) { + options.Pretty = false +} + +// EnableStrict enables configuring all serializers in strict mode +func EnableStrict(options *CodecFactoryOptions) { + options.Strict = true +} + +// DisableStrict disables configuring all serializers in strict mode +func DisableStrict(options *CodecFactoryOptions) { + options.Strict = false +} + // NewCodecFactory provides methods for retrieving serializers for the supported wire formats // and conversion wrappers to define preferred internal and external versions. In the future, // as the internal version is used less, callers may instead use a defaulting serializer and // only convert objects which are shared internally (Status, common API machinery). +// +// Mutators can be passed to change the CodecFactoryOptions before construction of the factory. +// It is recommended to explicitly pass mutators instead of relying on defaults. +// By default, Pretty is enabled -- this is conformant with previously supported behavior. +// // TODO: allow other codecs to be compiled in? // TODO: accept a scheme interface -func NewCodecFactory(scheme *runtime.Scheme) CodecFactory { - serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory) +func NewCodecFactory(scheme *runtime.Scheme, mutators ...CodecFactoryOptionsMutator) CodecFactory { + options := CodecFactoryOptions{Pretty: true} + for _, fn := range mutators { + fn(&options) + } + + serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory, options) return newCodecFactory(scheme, serializers) } @@ -120,6 +189,15 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec Serializer: d.Serializer, PrettySerializer: d.PrettySerializer, } + + mediaType, _, err := mime.ParseMediaType(info.MediaType) + if err != nil { + panic(err) + } + parts := strings.SplitN(mediaType, "/", 2) + info.MediaTypeType = parts[0] + info.MediaTypeSubType = parts[1] + if d.StreamSerializer != nil { info.StreamSerializer = &runtime.StreamSerializerInfo{ Serializer: d.StreamSerializer, @@ -148,6 +226,12 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec } } +// WithoutConversion returns a NegotiatedSerializer that performs no conversion, even if the +// caller requests it. +func (f CodecFactory) WithoutConversion() runtime.NegotiatedSerializer { + return WithoutConversionCodecFactory{f} +} + // SupportedMediaTypes returns the RFC2046 media types that this factory has serializers for. func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo { return f.accepts @@ -215,23 +299,26 @@ func (f CodecFactory) EncoderForVersion(encoder runtime.Encoder, gv runtime.Grou return f.CodecForVersions(encoder, nil, gv, nil) } -// DirectCodecFactory provides methods for retrieving "DirectCodec"s, which do not do conversion. -type DirectCodecFactory struct { +// WithoutConversionCodecFactory is a CodecFactory that will explicitly ignore requests to perform conversion. +// This wrapper is used while code migrates away from using conversion (such as external clients) and in the future +// will be unnecessary when we change the signature of NegotiatedSerializer. +type WithoutConversionCodecFactory struct { CodecFactory } -// EncoderForVersion returns an encoder that does not do conversion. -func (f DirectCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder { - return versioning.DirectEncoder{ +// EncoderForVersion returns an encoder that does not do conversion, but does set the group version kind of the object +// when serialized. +func (f WithoutConversionCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder { + return runtime.WithVersionEncoder{ Version: version, Encoder: serializer, ObjectTyper: f.CodecFactory.scheme, } } -// DecoderToVersion returns an decoder that does not do conversion. gv is ignored. -func (f DirectCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { - return versioning.DirectDecoder{ +// DecoderToVersion returns an decoder that does not do conversion. +func (f WithoutConversionCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { + return runtime.WithoutVersionDecoder{ Decoder: serializer, } } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index 8987e74c6..69ada8ecf 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -35,34 +35,56 @@ import ( // NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer // is not nil, the object has the group, version, and kind fields set. +// Deprecated: use NewSerializerWithOptions instead. func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer { - return &Serializer{ - meta: meta, - creater: creater, - typer: typer, - yaml: false, - pretty: pretty, - } + return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false}) } // NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer // is not nil, the object has the group, version, and kind fields set. This serializer supports only the subset of YAML that // matches JSON, and will error if constructs are used that do not serialize to JSON. +// Deprecated: use NewSerializerWithOptions instead. func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { + return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false}) +} + +// NewSerializerWithOptions creates a JSON/YAML serializer that handles encoding versioned objects into the proper JSON/YAML +// form. If typer is not nil, the object has the group, version, and kind fields set. Options are copied into the Serializer +// and are immutable. +func NewSerializerWithOptions(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, options SerializerOptions) *Serializer { return &Serializer{ meta: meta, creater: creater, typer: typer, - yaml: true, + options: options, } } +// SerializerOptions holds the options which are used to configure a JSON/YAML serializer. +// example: +// (1) To configure a JSON serializer, set `Yaml` to `false`. +// (2) To configure a YAML serializer, set `Yaml` to `true`. +// (3) To configure a strict serializer that can return strictDecodingError, set `Strict` to `true`. +type SerializerOptions struct { + // Yaml: configures the Serializer to work with JSON(false) or YAML(true). + // When `Yaml` is enabled, this serializer only supports the subset of YAML that + // matches JSON, and will error if constructs are used that do not serialize to JSON. + Yaml bool + + // Pretty: configures a JSON enabled Serializer(`Yaml: false`) to produce human-readable output. + // This option is silently ignored when `Yaml` is `true`. + Pretty bool + + // Strict: configures the Serializer to return strictDecodingError's when duplicate fields are present decoding JSON or YAML. + // Note that enabling this option is not as performant as the non-strict variant, and should not be used in fast paths. + Strict bool +} + type Serializer struct { meta MetaFactory + options SerializerOptions creater runtime.ObjectCreater typer runtime.ObjectTyper - yaml bool - pretty bool } // Serializer implements Serializer @@ -119,11 +141,28 @@ func CaseSensitiveJsonIterator() jsoniter.API { return config } -// Private copy of jsoniter to try to shield against possible mutations +// StrictCaseSensitiveJsonIterator returns a jsoniterator API that's configured to be +// case-sensitive, but also disallows unknown fields when unmarshalling. It is compatible with +// the encoding/json standard library. +func StrictCaseSensitiveJsonIterator() jsoniter.API { + config := jsoniter.Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, + CaseSensitive: true, + DisallowUnknownFields: true, + }.Froze() + // Force jsoniter to decode number to interface{} via int64/float64, if possible. + config.RegisterExtension(&customNumberExtension{}) + return config +} + +// Private copies of jsoniter to try to shield against possible mutations // from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them // in some other library will mess with every usage of the jsoniter library in the whole program. // See https://github.com/json-iterator/go/issues/265 var caseSensitiveJsonIterator = CaseSensitiveJsonIterator() +var strictCaseSensitiveJsonIterator = StrictCaseSensitiveJsonIterator() // gvkWithDefaults returns group kind and version defaulting from provided default func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind { @@ -160,7 +199,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i } data := originalData - if s.yaml { + if s.options.Yaml { altered, err := yaml.YAMLToJSON(data) if err != nil { return nil, nil, err @@ -216,12 +255,38 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i if err := caseSensitiveJsonIterator.Unmarshal(data, obj); err != nil { return nil, actual, err } + + // If the deserializer is non-strict, return successfully here. + if !s.options.Strict { + return obj, actual, nil + } + + // In strict mode pass the data trough the YAMLToJSONStrict converter. + // This is done to catch duplicate fields regardless of encoding (JSON or YAML). For JSON data, + // the output would equal the input, unless there is a parsing error such as duplicate fields. + // As we know this was successful in the non-strict case, the only error that may be returned here + // is because of the newly-added strictness. hence we know we can return the typed strictDecoderError + // the actual error is that the object contains duplicate fields. + altered, err := yaml.YAMLToJSONStrict(originalData) + if err != nil { + return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData)) + } + // As performance is not an issue for now for the strict deserializer (one has regardless to do + // the unmarshal twice), we take the sanitized, altered data that is guaranteed to have no duplicated + // fields, and unmarshal this into a copy of the already-populated obj. Any error that occurs here is + // due to that a matching field doesn't exist in the object. hence we can return a typed strictDecoderError, + // the actual error is that the object contains unknown field. + strictObj := obj.DeepCopyObject() + if err := strictCaseSensitiveJsonIterator.Unmarshal(altered, strictObj); err != nil { + return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData)) + } + // Always return the same object as the non-strict serializer to avoid any deviations. return obj, actual, nil } // Encode serializes the provided object to the given writer. func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { - if s.yaml { + if s.options.Yaml { json, err := caseSensitiveJsonIterator.Marshal(obj) if err != nil { return err @@ -234,7 +299,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { return err } - if s.pretty { + if s.options.Pretty { data, err := caseSensitiveJsonIterator.MarshalIndent(obj, "", " ") if err != nil { return err @@ -248,7 +313,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { // RecognizesData implements the RecognizingDecoder interface. func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) { - if s.yaml { + if s.options.Yaml { // we could potentially look for '---' return false, true, nil } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go index b99ba25c8..0f33e1d82 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go @@ -69,22 +69,18 @@ func IsNotMarshalable(err error) bool { // NewSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer // is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written // as-is (any type info passed with the object will be used). -// -// This encoding scheme is experimental, and is subject to change at any time. -func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *Serializer { +func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { return &Serializer{ - prefix: protoEncodingPrefix, - creater: creater, - typer: typer, - contentType: defaultContentType, + prefix: protoEncodingPrefix, + creater: creater, + typer: typer, } } type Serializer struct { - prefix []byte - creater runtime.ObjectCreater - typer runtime.ObjectTyper - contentType string + prefix []byte + creater runtime.ObjectCreater + typer runtime.ObjectTyper } var _ runtime.Serializer = &Serializer{} @@ -138,7 +134,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil { *intoUnknown = unk if ok, _, _ := s.RecognizesData(bytes.NewBuffer(unk.Raw)); ok { - intoUnknown.ContentType = s.contentType + intoUnknown.ContentType = runtime.ContentTypeProtobuf } return intoUnknown, &actual, nil } @@ -207,7 +203,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { switch t := obj.(type) { case bufferedMarshaller: // this path performs a single allocation during write but requires the caller to implement - // the more efficient Size and MarshalTo methods + // the more efficient Size and MarshalToSizedBuffer methods encodedSize := uint64(t.Size()) estimatedSize := prefixSize + estimateUnknownSize(&unk, encodedSize) data := make([]byte, estimatedSize) @@ -287,6 +283,12 @@ type bufferedMarshaller interface { runtime.ProtobufMarshaller } +// Like bufferedMarshaller, but is able to marshal backwards, which is more efficient since it doesn't call Size() as frequently. +type bufferedReverseMarshaller interface { + proto.Sizer + runtime.ProtobufReverseMarshaller +} + // estimateUnknownSize returns the expected bytes consumed by a given runtime.Unknown // object with a nil RawJSON struct and the expected size of the provided buffer. The // returned size will not be correct if RawJSOn is set on unk. @@ -303,20 +305,18 @@ func estimateUnknownSize(unk *runtime.Unknown, byteSize uint64) uint64 { // encoded object, and thus is not self describing (callers must know what type is being described in order to decode). // // This encoding scheme is experimental, and is subject to change at any time. -func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *RawSerializer { +func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *RawSerializer { return &RawSerializer{ - creater: creater, - typer: typer, - contentType: defaultContentType, + creater: creater, + typer: typer, } } // RawSerializer encodes and decodes objects without adding a runtime.Unknown wrapper (objects are encoded without identifying // type). type RawSerializer struct { - creater runtime.ObjectCreater - typer runtime.ObjectTyper - contentType string + creater runtime.ObjectCreater + typer runtime.ObjectTyper } var _ runtime.Serializer = &RawSerializer{} @@ -358,7 +358,7 @@ func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil { intoUnknown.Raw = data intoUnknown.ContentEncoding = "" - intoUnknown.ContentType = s.contentType + intoUnknown.ContentType = runtime.ContentTypeProtobuf intoUnknown.SetGroupVersionKind(*actual) return intoUnknown, actual, nil } @@ -411,12 +411,28 @@ func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater, if err := proto.Unmarshal(data, pb); err != nil { return nil, actual, err } + if actual != nil { + obj.GetObjectKind().SetGroupVersionKind(*actual) + } return obj, actual, nil } // Encode serializes the provided object to the given writer. Overrides is ignored. func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error { switch t := obj.(type) { + case bufferedReverseMarshaller: + // this path performs a single allocation during write but requires the caller to implement + // the more efficient Size and MarshalToSizedBuffer methods + encodedSize := uint64(t.Size()) + data := make([]byte, encodedSize) + + n, err := t.MarshalToSizedBuffer(data) + if err != nil { + return err + } + _, err = w.Write(data[:n]) + return err + case bufferedMarshaller: // this path performs a single allocation during write but requires the caller to implement // the more efficient Size and MarshalTo methods diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go deleted file mode 100644 index 545cf78df..000000000 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package serializer - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer/protobuf" -) - -const ( - // contentTypeProtobuf is the protobuf type exposed for Kubernetes. It is private to prevent others from - // depending on it unintentionally. - // TODO: potentially move to pkg/api (since it's part of the Kube public API) and pass it in to the - // CodecFactory on initialization. - contentTypeProtobuf = "application/vnd.kubernetes.protobuf" -) - -func protobufSerializer(scheme *runtime.Scheme) (serializerType, bool) { - serializer := protobuf.NewSerializer(scheme, scheme, contentTypeProtobuf) - raw := protobuf.NewRawSerializer(scheme, scheme, contentTypeProtobuf) - return serializerType{ - AcceptContentTypes: []string{contentTypeProtobuf}, - ContentType: contentTypeProtobuf, - FileExtensions: []string{"pb"}, - Serializer: serializer, - - Framer: protobuf.LengthDelimitedFramer, - StreamSerializer: raw, - }, true -} - -func init() { - serializerExtensions = append(serializerExtensions, protobufSerializer) -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go index 001847107..ee5cb86f7 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -106,20 +106,13 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru } if d, ok := obj.(runtime.NestedObjectDecoder); ok { - if err := d.DecodeNestedObjects(DirectDecoder{c.decoder}); err != nil { + if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil { return nil, gvk, err } } // if we specify a target, use generic conversion. if into != nil { - if into == obj { - if isVersioned { - return versioned, gvk, nil - } - return into, gvk, nil - } - // perform defaulting if requested if c.defaulter != nil { // create a copy to ensure defaulting is not applied to the original versioned objects @@ -133,6 +126,14 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru } } + // Short-circuit conversion if the into object is same object + if into == obj { + if isVersioned { + return versioned, gvk, nil + } + return into, gvk, nil + } + if err := c.convertor.Convert(obj, into, c.decodeVersion); err != nil { return nil, gvk, err } @@ -199,84 +200,33 @@ func (c *codec) Encode(obj runtime.Object, w io.Writer) error { return err } + objectKind := obj.GetObjectKind() + old := objectKind.GroupVersionKind() + // restore the old GVK after encoding + defer objectKind.SetGroupVersionKind(old) + if c.encodeVersion == nil || isUnversioned { if e, ok := obj.(runtime.NestedObjectEncoder); ok { - if err := e.EncodeNestedObjects(DirectEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { + if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { return err } } - objectKind := obj.GetObjectKind() - old := objectKind.GroupVersionKind() objectKind.SetGroupVersionKind(gvks[0]) - err = c.encoder.Encode(obj, w) - objectKind.SetGroupVersionKind(old) - return err + return c.encoder.Encode(obj, w) } // Perform a conversion if necessary - objectKind := obj.GetObjectKind() - old := objectKind.GroupVersionKind() out, err := c.convertor.ConvertToVersion(obj, c.encodeVersion) if err != nil { return err } if e, ok := out.(runtime.NestedObjectEncoder); ok { - if err := e.EncodeNestedObjects(DirectEncoder{Version: c.encodeVersion, Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { + if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Version: c.encodeVersion, Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { return err } } // Conversion is responsible for setting the proper group, version, and kind onto the outgoing object - err = c.encoder.Encode(out, w) - // restore the old GVK, in case conversion returned the same object - objectKind.SetGroupVersionKind(old) - return err -} - -// DirectEncoder serializes an object and ensures the GVK is set. -type DirectEncoder struct { - Version runtime.GroupVersioner - runtime.Encoder - runtime.ObjectTyper -} - -// Encode does not do conversion. It sets the gvk during serialization. -func (e DirectEncoder) Encode(obj runtime.Object, stream io.Writer) error { - gvks, _, err := e.ObjectTyper.ObjectKinds(obj) - if err != nil { - if runtime.IsNotRegisteredError(err) { - return e.Encoder.Encode(obj, stream) - } - return err - } - kind := obj.GetObjectKind() - oldGVK := kind.GroupVersionKind() - gvk := gvks[0] - if e.Version != nil { - preferredGVK, ok := e.Version.KindForGroupVersionKinds(gvks) - if ok { - gvk = preferredGVK - } - } - kind.SetGroupVersionKind(gvk) - err = e.Encoder.Encode(obj, stream) - kind.SetGroupVersionKind(oldGVK) - return err -} - -// DirectDecoder clears the group version kind of a deserialized object. -type DirectDecoder struct { - runtime.Decoder -} - -// Decode does not do conversion. It removes the gvk during deserialization. -func (d DirectDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { - obj, gvk, err := d.Decoder.Decode(data, defaults, into) - if obj != nil { - kind := obj.GetObjectKind() - // clearing the gvk is just a convention of a codec - kind.SetGroupVersionKind(schema.GroupVersionKind{}) - } - return obj, gvk, err + return c.encoder.Encode(out, w) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go index 1f7f662e0..2f0b6c9e5 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -41,8 +41,9 @@ type TypeMeta struct { } const ( - ContentTypeJSON string = "application/json" - ContentTypeYAML string = "application/yaml" + ContentTypeJSON string = "application/json" + ContentTypeYAML string = "application/yaml" + ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf" ) // RawExtension is used to hold extensions in external versions. @@ -94,7 +95,7 @@ type RawExtension struct { // Raw is the underlying serialization of this object. // // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. - Raw []byte `protobuf:"bytes,1,opt,name=raw"` + Raw []byte `json:"-" protobuf:"bytes,1,opt,name=raw"` // Object can hold a representation of this extension - useful for working with versioned // structs. Object Object `json:"-"` diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go index ead96ee05..a82227b23 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go @@ -24,46 +24,66 @@ type ProtobufMarshaller interface { MarshalTo(data []byte) (int, error) } +type ProtobufReverseMarshaller interface { + MarshalToSizedBuffer(data []byte) (int, error) +} + // NestedMarshalTo allows a caller to avoid extra allocations during serialization of an Unknown -// that will contain an object that implements ProtobufMarshaller. +// that will contain an object that implements ProtobufMarshaller or ProtobufReverseMarshaller. func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.TypeMeta.Size())) - n1, err := m.TypeMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err + // Calculate the full size of the message. + msgSize := m.Size() + if b != nil { + msgSize += int(size) + sovGenerated(size) + 1 } - i += n1 + // Reverse marshal the fields of m. + i := msgSize + i -= len(m.ContentType) + copy(data[i:], m.ContentType) + i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) + i-- + data[i] = 0x22 + i -= len(m.ContentEncoding) + copy(data[i:], m.ContentEncoding) + i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) + i-- + data[i] = 0x1a if b != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, size) - n2, err := b.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - if uint64(n2) != size { - // programmer error: the Size() method for protobuf does not match the results of MarshalTo, which means the proto - // struct returned would be wrong. - return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n2) + if r, ok := b.(ProtobufReverseMarshaller); ok { + n1, err := r.MarshalToSizedBuffer(data[:i]) + if err != nil { + return 0, err + } + i -= int(size) + if uint64(n1) != size { + // programmer error: the Size() method for protobuf does not match the results of LashramOt, which means the proto + // struct returned would be wrong. + return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n1) + } + } else { + i -= int(size) + n1, err := b.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + if uint64(n1) != size { + // programmer error: the Size() method for protobuf does not match the results of MarshalTo, which means the proto + // struct returned would be wrong. + return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n1) + } } - i += n2 + i = encodeVarintGenerated(data, i, size) + i-- + data[i] = 0x12 } - - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) - i += copy(data[i:], m.ContentEncoding) - - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) - i += copy(data[i:], m.ContentType) - return i, nil + n2, err := m.TypeMeta.MarshalToSizedBuffer(data[:i]) + if err != nil { + return 0, err + } + i -= n2 + i = encodeVarintGenerated(data, i, uint64(n2)) + i-- + data[i] = 0xa + return msgSize - i, nil } diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go index 9567f9006..1689e62e8 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go +++ b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go @@ -21,11 +21,18 @@ import ( "time" ) +// PassiveClock allows for injecting fake or real clocks into code +// that needs to read the current time but does not support scheduling +// activity in the future. +type PassiveClock interface { + Now() time.Time + Since(time.Time) time.Duration +} + // Clock allows for injecting fake or real clocks into code that // needs to do arbitrary things based on time. type Clock interface { - Now() time.Time - Since(time.Time) time.Duration + PassiveClock After(time.Duration) <-chan time.Time NewTimer(time.Duration) Timer Sleep(time.Duration) @@ -66,10 +73,15 @@ func (RealClock) Sleep(d time.Duration) { time.Sleep(d) } -// FakeClock implements Clock, but returns an arbitrary time. -type FakeClock struct { +// FakePassiveClock implements PassiveClock, but returns an arbitrary time. +type FakePassiveClock struct { lock sync.RWMutex time time.Time +} + +// FakeClock implements Clock, but returns an arbitrary time. +type FakeClock struct { + FakePassiveClock // waiters are waiting for the fake time to pass their specified time waiters []fakeClockWaiter @@ -80,29 +92,41 @@ type fakeClockWaiter struct { stepInterval time.Duration skipIfBlocked bool destChan chan time.Time - fired bool +} + +func NewFakePassiveClock(t time.Time) *FakePassiveClock { + return &FakePassiveClock{ + time: t, + } } func NewFakeClock(t time.Time) *FakeClock { return &FakeClock{ - time: t, + FakePassiveClock: *NewFakePassiveClock(t), } } // Now returns f's time. -func (f *FakeClock) Now() time.Time { +func (f *FakePassiveClock) Now() time.Time { f.lock.RLock() defer f.lock.RUnlock() return f.time } // Since returns time since the time in f. -func (f *FakeClock) Since(ts time.Time) time.Duration { +func (f *FakePassiveClock) Since(ts time.Time) time.Duration { f.lock.RLock() defer f.lock.RUnlock() return f.time.Sub(ts) } +// Sets the time. +func (f *FakePassiveClock) SetTime(t time.Time) { + f.lock.Lock() + defer f.lock.Unlock() + f.time = t +} + // Fake version of time.After(d). func (f *FakeClock) After(d time.Duration) <-chan time.Time { f.lock.Lock() @@ -175,12 +199,10 @@ func (f *FakeClock) setTimeLocked(t time.Time) { if w.skipIfBlocked { select { case w.destChan <- t: - w.fired = true default: } } else { w.destChan <- t - w.fired = true } if w.stepInterval > 0 { @@ -287,36 +309,50 @@ func (f *fakeTimer) C() <-chan time.Time { return f.waiter.destChan } -// Stop stops the timer and returns true if the timer has not yet fired, or false otherwise. +// Stop conditionally stops the timer. If the timer has neither fired +// nor been stopped then this call stops the timer and returns true, +// otherwise this call returns false. This is like time.Timer::Stop. func (f *fakeTimer) Stop() bool { f.fakeClock.lock.Lock() defer f.fakeClock.lock.Unlock() - - newWaiters := make([]fakeClockWaiter, 0, len(f.fakeClock.waiters)) - for i := range f.fakeClock.waiters { - w := &f.fakeClock.waiters[i] - if w != &f.waiter { - newWaiters = append(newWaiters, *w) + // The timer has already fired or been stopped, unless it is found + // among the clock's waiters. + stopped := false + oldWaiters := f.fakeClock.waiters + newWaiters := make([]fakeClockWaiter, 0, len(oldWaiters)) + seekChan := f.waiter.destChan + for i := range oldWaiters { + // Identify the timer's fakeClockWaiter by the identity of the + // destination channel, nothing else is necessarily unique and + // constant since the timer's creation. + if oldWaiters[i].destChan == seekChan { + stopped = true + } else { + newWaiters = append(newWaiters, oldWaiters[i]) } } f.fakeClock.waiters = newWaiters - return !f.waiter.fired + return stopped } -// Reset resets the timer to the fake clock's "now" + d. It returns true if the timer has not yet -// fired, or false otherwise. +// Reset conditionally updates the firing time of the timer. If the +// timer has neither fired nor been stopped then this call resets the +// timer to the fake clock's "now" + d and returns true, otherwise +// this call returns false. This is like time.Timer::Reset. func (f *fakeTimer) Reset(d time.Duration) bool { f.fakeClock.lock.Lock() defer f.fakeClock.lock.Unlock() - - active := !f.waiter.fired - - f.waiter.fired = false - f.waiter.targetTime = f.fakeClock.time.Add(d) - - return active + waiters := f.fakeClock.waiters + seekChan := f.waiter.destChan + for i := range waiters { + if waiters[i].destChan == seekChan { + waiters[i].targetTime = f.fakeClock.time.Add(d) + return true + } + } + return false } type Ticker interface { diff --git a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go index 06042617e..a006b925a 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go +++ b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go @@ -18,16 +18,12 @@ package diff import ( "bytes" - "encoding/json" "fmt" - "reflect" - "sort" "strings" "text/tabwriter" "github.com/davecgh/go-spew/spew" - - "k8s.io/apimachinery/pkg/util/validation/field" + "github.com/google/go-cmp/cmp" ) // StringDiff diffs a and b and returns a human readable diff. @@ -50,220 +46,29 @@ func StringDiff(a, b string) string { return string(out) } -// ObjectDiff writes the two objects out as JSON and prints out the identical part of -// the objects followed by the remaining part of 'a' and finally the remaining part of 'b'. -// For debugging tests. +func legacyDiff(a, b interface{}) string { + return cmp.Diff(a, b) +} + +// ObjectDiff prints the diff of two go objects and fails if the objects +// contain unhandled unexported fields. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff func ObjectDiff(a, b interface{}) string { - ab, err := json.Marshal(a) - if err != nil { - panic(fmt.Sprintf("a: %v", err)) - } - bb, err := json.Marshal(b) - if err != nil { - panic(fmt.Sprintf("b: %v", err)) - } - return StringDiff(string(ab), string(bb)) + return legacyDiff(a, b) } -// ObjectGoPrintDiff is like ObjectDiff, but uses go-spew to print the objects, -// which shows absolutely everything by recursing into every single pointer -// (go's %#v formatters OTOH stop at a certain point). This is needed when you -// can't figure out why reflect.DeepEqual is returning false and nothing is -// showing you differences. This will. +// ObjectGoPrintDiff prints the diff of two go objects and fails if the objects +// contain unhandled unexported fields. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff func ObjectGoPrintDiff(a, b interface{}) string { - s := spew.ConfigState{DisableMethods: true} - return StringDiff( - s.Sprintf("%#v", a), - s.Sprintf("%#v", b), - ) + return legacyDiff(a, b) } +// ObjectReflectDiff prints the diff of two go objects and fails if the objects +// contain unhandled unexported fields. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff func ObjectReflectDiff(a, b interface{}) string { - vA, vB := reflect.ValueOf(a), reflect.ValueOf(b) - if vA.Type() != vB.Type() { - return fmt.Sprintf("type A %T and type B %T do not match", a, b) - } - diffs := objectReflectDiff(field.NewPath("object"), vA, vB) - if len(diffs) == 0 { - return "" - } - out := []string{""} - for _, d := range diffs { - elidedA, elidedB := limit(d.a, d.b, 80) - out = append(out, - fmt.Sprintf("%s:", d.path), - fmt.Sprintf(" a: %s", elidedA), - fmt.Sprintf(" b: %s", elidedB), - ) - } - return strings.Join(out, "\n") -} - -// limit: -// 1. stringifies aObj and bObj -// 2. elides identical prefixes if either is too long -// 3. elides remaining content from the end if either is too long -func limit(aObj, bObj interface{}, max int) (string, string) { - elidedPrefix := "" - elidedASuffix := "" - elidedBSuffix := "" - a, b := fmt.Sprintf("%#v", aObj), fmt.Sprintf("%#v", bObj) - - if aObj != nil && bObj != nil { - if aType, bType := fmt.Sprintf("%T", aObj), fmt.Sprintf("%T", bObj); aType != bType { - a = fmt.Sprintf("%s (%s)", a, aType) - b = fmt.Sprintf("%s (%s)", b, bType) - } - } - - for { - switch { - case len(a) > max && len(a) > 4 && len(b) > 4 && a[:4] == b[:4]: - // a is too long, b has data, and the first several characters are the same - elidedPrefix = "..." - a = a[2:] - b = b[2:] - - case len(b) > max && len(b) > 4 && len(a) > 4 && a[:4] == b[:4]: - // b is too long, a has data, and the first several characters are the same - elidedPrefix = "..." - a = a[2:] - b = b[2:] - - case len(a) > max: - a = a[:max] - elidedASuffix = "..." - - case len(b) > max: - b = b[:max] - elidedBSuffix = "..." - - default: - // both are short enough - return elidedPrefix + a + elidedASuffix, elidedPrefix + b + elidedBSuffix - } - } -} - -func public(s string) bool { - if len(s) == 0 { - return false - } - return s[:1] == strings.ToUpper(s[:1]) -} - -type diff struct { - path *field.Path - a, b interface{} -} - -type orderedDiffs []diff - -func (d orderedDiffs) Len() int { return len(d) } -func (d orderedDiffs) Swap(i, j int) { d[i], d[j] = d[j], d[i] } -func (d orderedDiffs) Less(i, j int) bool { - a, b := d[i].path.String(), d[j].path.String() - if a < b { - return true - } - return false -} - -func objectReflectDiff(path *field.Path, a, b reflect.Value) []diff { - switch a.Type().Kind() { - case reflect.Struct: - var changes []diff - for i := 0; i < a.Type().NumField(); i++ { - if !public(a.Type().Field(i).Name) { - if reflect.DeepEqual(a.Interface(), b.Interface()) { - continue - } - return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}} - } - if sub := objectReflectDiff(path.Child(a.Type().Field(i).Name), a.Field(i), b.Field(i)); len(sub) > 0 { - changes = append(changes, sub...) - } - } - return changes - case reflect.Ptr, reflect.Interface: - if a.IsNil() || b.IsNil() { - switch { - case a.IsNil() && b.IsNil(): - return nil - case a.IsNil(): - return []diff{{path: path, a: nil, b: b.Interface()}} - default: - return []diff{{path: path, a: a.Interface(), b: nil}} - } - } - return objectReflectDiff(path, a.Elem(), b.Elem()) - case reflect.Chan: - if !reflect.DeepEqual(a.Interface(), b.Interface()) { - return []diff{{path: path, a: a.Interface(), b: b.Interface()}} - } - return nil - case reflect.Slice: - lA, lB := a.Len(), b.Len() - l := lA - if lB < lA { - l = lB - } - if lA == lB && lA == 0 { - if a.IsNil() != b.IsNil() { - return []diff{{path: path, a: a.Interface(), b: b.Interface()}} - } - return nil - } - var diffs []diff - for i := 0; i < l; i++ { - if !reflect.DeepEqual(a.Index(i), b.Index(i)) { - diffs = append(diffs, objectReflectDiff(path.Index(i), a.Index(i), b.Index(i))...) - } - } - for i := l; i < lA; i++ { - diffs = append(diffs, diff{path: path.Index(i), a: a.Index(i), b: nil}) - } - for i := l; i < lB; i++ { - diffs = append(diffs, diff{path: path.Index(i), a: nil, b: b.Index(i)}) - } - return diffs - case reflect.Map: - if reflect.DeepEqual(a.Interface(), b.Interface()) { - return nil - } - aKeys := make(map[interface{}]interface{}) - for _, key := range a.MapKeys() { - aKeys[key.Interface()] = a.MapIndex(key).Interface() - } - var missing []diff - for _, key := range b.MapKeys() { - if _, ok := aKeys[key.Interface()]; ok { - delete(aKeys, key.Interface()) - if reflect.DeepEqual(a.MapIndex(key).Interface(), b.MapIndex(key).Interface()) { - continue - } - missing = append(missing, objectReflectDiff(path.Key(fmt.Sprintf("%s", key.Interface())), a.MapIndex(key), b.MapIndex(key))...) - continue - } - missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key.Interface())), a: nil, b: b.MapIndex(key).Interface()}) - } - for key, value := range aKeys { - missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key)), a: value, b: nil}) - } - if len(missing) == 0 { - missing = append(missing, diff{path: path, a: a.Interface(), b: b.Interface()}) - } - sort.Sort(orderedDiffs(missing)) - return missing - default: - if reflect.DeepEqual(a.Interface(), b.Interface()) { - return nil - } - if !a.CanInterface() { - return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}} - } - return []diff{{path: path, a: a.Interface(), b: b.Interface()}} - } + return legacyDiff(a, b) } // ObjectGoPrintSideBySide prints a and b as textual dumps side by side, diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go index 48dd7d9c5..64cbc7703 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -17,22 +17,17 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto -/* - Package intstr is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto - - It has these top-level messages: - IntOrString -*/ package intstr -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + + io "io" + math "math" + math_bits "math/bits" -import io "io" + proto "github.com/gogo/protobuf/proto" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -45,17 +40,69 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *IntOrString) Reset() { *m = IntOrString{} } -func (*IntOrString) ProtoMessage() {} -func (*IntOrString) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *IntOrString) Reset() { *m = IntOrString{} } +func (*IntOrString) ProtoMessage() {} +func (*IntOrString) Descriptor() ([]byte, []int) { + return fileDescriptor_94e046ae3ce6121c, []int{0} +} +func (m *IntOrString) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IntOrString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IntOrString) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntOrString.Merge(m, src) +} +func (m *IntOrString) XXX_Size() int { + return m.Size() +} +func (m *IntOrString) XXX_DiscardUnknown() { + xxx_messageInfo_IntOrString.DiscardUnknown(m) +} + +var xxx_messageInfo_IntOrString proto.InternalMessageInfo func init() { proto.RegisterType((*IntOrString)(nil), "k8s.io.apimachinery.pkg.util.intstr.IntOrString") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptor_94e046ae3ce6121c) +} + +var fileDescriptor_94e046ae3ce6121c = []byte{ + // 292 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31, + 0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d, + 0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d, + 0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a, + 0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36, + 0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda, + 0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20, + 0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e, + 0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84, + 0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0, + 0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30, + 0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f, + 0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92, + 0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3, + 0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83, + 0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35, + 0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb, + 0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9, + 0x64, 0x01, 0x00, 0x00, +} + func (m *IntOrString) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -63,33 +110,44 @@ func (m *IntOrString) Marshal() (dAtA []byte, err error) { } func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IntOrString) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Type)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal)) - dAtA[i] = 0x1a - i++ + i -= len(m.StrVal) + copy(dAtA[i:], m.StrVal) i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal))) - i += copy(dAtA[i:], m.StrVal) - return i, nil + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *IntOrString) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Type)) @@ -100,14 +158,7 @@ func (m *IntOrString) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -127,7 +178,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -155,7 +206,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= (Type(b) & 0x7F) << shift + m.Type |= Type(b&0x7F) << shift if b < 0x80 { break } @@ -174,7 +225,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.IntVal |= (int32(b) & 0x7F) << shift + m.IntVal |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -193,7 +244,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -203,6 +254,9 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -217,6 +271,9 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -283,10 +340,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -315,6 +375,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -333,30 +396,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 292 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31, - 0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d, - 0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d, - 0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a, - 0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36, - 0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda, - 0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20, - 0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e, - 0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84, - 0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0, - 0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30, - 0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f, - 0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92, - 0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3, - 0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83, - 0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35, - 0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb, - 0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9, - 0x64, 0x01, 0x00, 0x00, -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 5b26ed262..12c8a7b6c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -122,11 +122,11 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) { // the OpenAPI spec of this type. // // See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators -func (_ IntOrString) OpenAPISchemaType() []string { return []string{"string"} } +func (IntOrString) OpenAPISchemaType() []string { return []string{"string"} } // OpenAPISchemaFormat is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. -func (_ IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } +func (IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } func (intstr *IntOrString) Fuzz(c fuzz.Continue) { if intstr == nil { diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index 078f00d9b..f9540c63b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -101,6 +101,9 @@ func SetOldTransportDefaults(t *http.Transport) *http.Transport { if t.TLSHandshakeTimeout == 0 { t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout } + if t.IdleConnTimeout == 0 { + t.IdleConnTimeout = defaultTransport.IdleConnTimeout + } return t } @@ -111,7 +114,7 @@ func SetTransportDefaults(t *http.Transport) *http.Transport { // Allow clients to disable http2 if needed. if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 { klog.Infof("HTTP2 has been explicitly disabled") - } else { + } else if allowsHTTP2(t) { if err := http2.ConfigureTransport(t); err != nil { klog.Warningf("Transport failed http2 configuration: %v", err) } @@ -119,6 +122,21 @@ func SetTransportDefaults(t *http.Transport) *http.Transport { return t } +func allowsHTTP2(t *http.Transport) bool { + if t.TLSClientConfig == nil || len(t.TLSClientConfig.NextProtos) == 0 { + // the transport expressed no NextProto preference, allow + return true + } + for _, p := range t.TLSClientConfig.NextProtos { + if p == http2.NextProtoTLS { + // the transport explicitly allowed http/2 + return true + } + } + // the transport explicitly set NextProtos and excluded http/2 + return false +} + type RoundTripperWrapper interface { http.RoundTripper WrappedRoundTripper() http.RoundTripper diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/util.go b/vendor/k8s.io/apimachinery/pkg/util/net/util.go index 8344d10c8..2e7cb9499 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/util.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/util.go @@ -54,3 +54,20 @@ func IsConnectionReset(err error) bool { } return false } + +// Returns if the given err is "connection refused" error +func IsConnectionRefused(err error) bool { + if urlErr, ok := err.(*url.Error); ok { + err = urlErr.Err + } + if opErr, ok := err.(*net.OpError); ok { + err = opErr.Err + } + if osErr, ok := err.(*os.SyscallError); ok { + err = osErr.Err + } + if errno, ok := err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED { + return true + } + return false +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index 8e34f9261..1428443f5 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -18,6 +18,7 @@ package runtime import ( "fmt" + "net/http" "runtime" "sync" "time" @@ -40,11 +41,7 @@ var PanicHandlers = []func(interface{}){logPanic} // called in case of panic. HandleCrash actually crashes, after calling the // handlers and logging the panic message. // -// TODO: remove this function. We are switching to a world where it's safe for -// apiserver to panic, since it will be restarted by kubelet. At the beginning -// of the Kubernetes project, nothing was going to restart apiserver and so -// catching panics was important. But it's actually much simpler for monitoring -// software if we just exit when an unexpected panic happens. +// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. func HandleCrash(additionalHandlers ...func(interface{})) { if r := recover(); r != nil { for _, fn := range PanicHandlers { @@ -60,27 +57,26 @@ func HandleCrash(additionalHandlers ...func(interface{})) { } } -// logPanic logs the caller tree when a panic occurs. +// logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler). func logPanic(r interface{}) { - callers := getCallers(r) - if _, ok := r.(string); ok { - klog.Errorf("Observed a panic: %s\n%v", r, callers) - } else { - klog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers) + if r == http.ErrAbortHandler { + // honor the http.ErrAbortHandler sentinel panic value: + // ErrAbortHandler is a sentinel panic value to abort a handler. + // While any panic from ServeHTTP aborts the response to the client, + // panicking with ErrAbortHandler also suppresses logging of a stack trace to the server's error log. + return } -} -func getCallers(r interface{}) string { - callers := "" - for i := 0; true; i++ { - _, file, line, ok := runtime.Caller(i) - if !ok { - break - } - callers = callers + fmt.Sprintf("%v:%v\n", file, line) + // Same as stdlib http server code. Manually allocate stack trace buffer size + // to prevent excessively large logs + const size = 64 << 10 + stacktrace := make([]byte, size) + stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] + if _, ok := r.(string); ok { + klog.Errorf("Observed a panic: %s\n%s", r, stacktrace) + } else { + klog.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace) } - - return callers } // ErrorHandlers is a list of functions which will be invoked when an unreturnable @@ -155,13 +151,17 @@ func GetCaller() string { // handlers to handle errors and panics the same way. func RecoverFromPanic(err *error) { if r := recover(); r != nil { - callers := getCallers(r) + // Same as stdlib http server code. Manually allocate stack trace buffer size + // to prevent excessively large logs + const size = 64 << 10 + stacktrace := make([]byte, size) + stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] *err = fmt.Errorf( - "recovered from panic %q. (err=%v) Call stack:\n%v", + "recovered from panic %q. (err=%v) Call stack:\n%s", r, *err, - callers) + stacktrace) } } diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go index 766f4501e..9bfa85d43 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go @@ -46,17 +46,19 @@ func ByteKeySet(theMap interface{}) Byte { } // Insert adds items to the set. -func (s Byte) Insert(items ...byte) { +func (s Byte) Insert(items ...byte) Byte { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s Byte) Delete(items ...byte) { +func (s Byte) Delete(items ...byte) Byte { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go index a0a513cd9..88bd70967 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go @@ -46,17 +46,19 @@ func IntKeySet(theMap interface{}) Int { } // Insert adds items to the set. -func (s Int) Insert(items ...int) { +func (s Int) Insert(items ...int) Int { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s Int) Delete(items ...int) { +func (s Int) Delete(items ...int) Int { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go new file mode 100644 index 000000000..96a485554 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption. +type Int32 map[int32]Empty + +// NewInt32 creates a Int32 from a list of values. +func NewInt32(items ...int32) Int32 { + ss := Int32{} + ss.Insert(items...) + return ss +} + +// Int32KeySet creates a Int32 from a keys of a map[int32](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func Int32KeySet(theMap interface{}) Int32 { + v := reflect.ValueOf(theMap) + ret := Int32{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(int32)) + } + return ret +} + +// Insert adds items to the set. +func (s Int32) Insert(items ...int32) Int32 { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s Int32) Delete(items ...int32) Int32 { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s Int32) Has(item int32) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int32) HasAll(items ...int32) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Int32) HasAny(items ...int32) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Int32) Difference(s2 Int32) Int32 { + result := NewInt32() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int32) Union(s2 Int32) Int32 { + result := NewInt32() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int32) Intersection(s2 Int32) Int32 { + var walk, other Int32 + result := NewInt32() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int32) IsSuperset(s2 Int32) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int32) Equal(s2 Int32) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfInt32 []int32 + +func (s sortableSliceOfInt32) Len() int { return len(s) } +func (s sortableSliceOfInt32) Less(i, j int) bool { return lessInt32(s[i], s[j]) } +func (s sortableSliceOfInt32) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted int32 slice. +func (s Int32) List() []int32 { + res := make(sortableSliceOfInt32, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []int32(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int32) UnsortedList() []int32 { + res := make([]int32, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Int32) PopAny() (int32, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue int32 + return zeroValue, false +} + +// Len returns the size of the set. +func (s Int32) Len() int { + return len(s) +} + +func lessInt32(lhs, rhs int32) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go index 9ca9af0c5..b375a1b06 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go @@ -46,17 +46,19 @@ func Int64KeySet(theMap interface{}) Int64 { } // Insert adds items to the set. -func (s Int64) Insert(items ...int64) { +func (s Int64) Insert(items ...int64) Int64 { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s Int64) Delete(items ...int64) { +func (s Int64) Delete(items ...int64) Int64 { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go index ba00ad7df..e6f37db88 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go @@ -46,17 +46,19 @@ func StringKeySet(theMap interface{}) String { } // Insert adds items to the set. -func (s String) Insert(items ...string) { +func (s String) Insert(items ...string) String { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s String) Delete(items ...string) { +func (s String) Delete(items ...string) String { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go b/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go index bf478223d..1fa351aab 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go +++ b/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go @@ -17,27 +17,11 @@ limitations under the License. package uuid import ( - "sync" - - "github.com/pborman/uuid" + "github.com/google/uuid" "k8s.io/apimachinery/pkg/types" ) -var uuidLock sync.Mutex -var lastUUID uuid.UUID - func NewUUID() types.UID { - uuidLock.Lock() - defer uuidLock.Unlock() - result := uuid.NewUUID() - // The UUID package is naive and can generate identical UUIDs if the - // time interval is quick enough. - // The UUID uses 100 ns increments so it's short enough to actively - // wait for a new value. - for uuid.Equal(lastUUID, result) == true { - result = uuid.NewUUID() - } - lastUUID = result - return types.UID(result.String()) + return types.UID(uuid.New().String()) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index 4767fd1dd..1d372a525 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -116,6 +116,10 @@ const ( // This is similar to ErrorTypeInvalid, but the error will not include the // too-long value. See TooLong(). ErrorTypeTooLong ErrorType = "FieldValueTooLong" + // ErrorTypeTooMany is used to report "too many". This is used to + // report that a given list has too many items. This is similar to FieldValueTooLong, + // but the error indicates quantity instead of length. + ErrorTypeTooMany ErrorType = "FieldValueTooMany" // ErrorTypeInternal is used to report other errors that are not related // to user input. See InternalError(). ErrorTypeInternal ErrorType = "InternalError" @@ -138,6 +142,8 @@ func (t ErrorType) String() string { return "Forbidden" case ErrorTypeTooLong: return "Too long" + case ErrorTypeTooMany: + return "Too many" case ErrorTypeInternal: return "Internal error" default: @@ -201,6 +207,13 @@ func TooLong(field *Path, value interface{}, maxLength int) *Error { return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d characters", maxLength)} } +// TooMany returns a *Error indicating "too many". This is used to +// report that a given list has too many items. This is similar to TooLong, +// but the returned error indicates quantity instead of length. +func TooMany(field *Path, actualQuantity, maxQuantity int) *Error { + return &Error{ErrorTypeTooMany, field.String(), actualQuantity, fmt.Sprintf("must have at most %d items", maxQuantity)} +} + // InternalError returns a *Error indicating "internal error". This is used // to signal that an error was found that was not directly related to user // input. The err argument must be non-nil. diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index 204177563..386c3e7ea 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -207,23 +207,31 @@ type ConditionFunc func() (done bool, err error) type Backoff struct { // The initial duration. Duration time.Duration - // Duration is multiplied by factor each iteration. Must be greater - // than or equal to zero. + // Duration is multiplied by factor each iteration, if factor is not zero + // and the limits imposed by Steps and Cap have not been reached. + // Should not be negative. + // The jitter does not contribute to the updates to the duration parameter. Factor float64 - // The amount of jitter applied each iteration. Jitter is applied after - // cap. + // The sleep at each iteration is the duration plus an additional + // amount chosen uniformly at random from the interval between + // zero and `jitter*duration`. Jitter float64 - // The number of steps before duration stops changing. If zero, initial - // duration is always used. Used for exponential backoff in combination - // with Factor. + // The remaining number of iterations in which the duration + // parameter may change (but progress can be stopped earlier by + // hitting the cap). If not positive, the duration is not + // changed. Used for exponential backoff in combination with + // Factor and Cap. Steps int - // The returned duration will never be greater than cap *before* jitter - // is applied. The actual maximum cap is `cap * (1.0 + jitter)`. + // A limit on revised values of the duration parameter. If a + // multiplication by the factor parameter would make the duration + // exceed the cap then the duration is set to the cap and the + // steps parameter is set to zero. Cap time.Duration } -// Step returns the next interval in the exponential backoff. This method -// will mutate the provided backoff. +// Step (1) returns an amount of time to sleep determined by the +// original Duration and Jitter and (2) mutates the provided Backoff +// to update its Steps and Duration. func (b *Backoff) Step() time.Duration { if b.Steps < 1 { if b.Jitter > 0 { @@ -250,16 +258,35 @@ func (b *Backoff) Step() time.Duration { return duration } -// ExponentialBackoff repeats a condition check with exponential backoff. +// contextForChannel derives a child context from a parent channel. // -// It checks the condition up to Steps times, increasing the wait by multiplying -// the previous duration by Factor. +// The derived context's Done channel is closed when the returned cancel function +// is called or when the parent channel is closed, whichever happens first. // -// If Jitter is greater than zero, a random amount of each duration is added -// (between duration and duration*(1+jitter)). +// Note the caller must *always* call the CancelFunc, otherwise resources may be leaked. +func contextForChannel(parentCh <-chan struct{}) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + select { + case <-parentCh: + cancel() + case <-ctx.Done(): + } + }() + return ctx, cancel +} + +// ExponentialBackoff repeats a condition check with exponential backoff. // -// If the condition never returns true, ErrWaitTimeout is returned. All other -// errors terminate immediately. +// It repeatedly checks the condition and then sleeps, using `backoff.Step()` +// to determine the length of the sleep and adjust Duration and Steps. +// Stops and returns as soon as: +// 1. the condition check returns true or an error, +// 2. `backoff.Steps` checks of the condition have been done, or +// 3. a sleep truncated by the cap on duration has been completed. +// In case (1) the returned error is what the condition function returned. +// In all other cases, ErrWaitTimeout is returned. func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { for backoff.Steps > 0 { if ok, err := condition(); err != nil || ok { @@ -353,7 +380,9 @@ func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) erro // PollUntil always waits interval before the first run of 'condition'. // 'condition' will always be invoked at least once. func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - return WaitFor(poller(interval, 0), condition, stopCh) + ctx, cancel := contextForChannel(stopCh) + defer cancel() + return WaitFor(poller(interval, 0), condition, ctx.Done()) } // PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed. @@ -422,7 +451,9 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { // timeout has elapsed and then closes the channel. // // Over very short intervals you may receive no ticks before the channel is -// closed. A timeout of 0 is interpreted as an infinity. +// closed. A timeout of 0 is interpreted as an infinity, and in such a case +// it would be the caller's responsibility to close the done channel. +// Failure to do so would result in a leaked goroutine. // // Output ticks are not buffered. If the channel is not ready to receive an // item, the tick is skipped. diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go index d61cf5a2e..8af256eb1 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go @@ -17,13 +17,15 @@ limitations under the License. package watch import ( + "fmt" "io" "sync" + "k8s.io/klog" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/klog" ) // Decoder allows StreamWatcher to watch any stream for which a Decoder can be written. @@ -39,19 +41,28 @@ type Decoder interface { Close() } +// Reporter hides the details of how an error is turned into a runtime.Object for +// reporting on a watch stream since this package may not import a higher level report. +type Reporter interface { + // AsObject must convert err into a valid runtime.Object for the watch stream. + AsObject(err error) runtime.Object +} + // StreamWatcher turns any stream for which you can write a Decoder interface // into a watch.Interface. type StreamWatcher struct { sync.Mutex - source Decoder - result chan Event - stopped bool + source Decoder + reporter Reporter + result chan Event + stopped bool } // NewStreamWatcher creates a StreamWatcher from the given decoder. -func NewStreamWatcher(d Decoder) *StreamWatcher { +func NewStreamWatcher(d Decoder, r Reporter) *StreamWatcher { sw := &StreamWatcher{ - source: d, + source: d, + reporter: r, // It's easy for a consumer to add buffering via an extra // goroutine/channel, but impossible for them to remove it, // so nonbuffered is better. @@ -102,11 +113,13 @@ func (sw *StreamWatcher) receive() { case io.ErrUnexpectedEOF: klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) default: - msg := "Unable to decode an event from the watch stream: %v" if net.IsProbableEOF(err) { - klog.V(5).Infof(msg, err) + klog.V(5).Infof("Unable to decode an event from the watch stream: %v", err) } else { - klog.Errorf(msg, err) + sw.result <- Event{ + Type: Error, + Object: sw.reporter.AsObject(fmt.Errorf("unable to decode an event from the watch stream: %v", err)), + } } } return diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go index be9c90c03..3945be3ae 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -44,6 +44,7 @@ const ( Added EventType = "ADDED" Modified EventType = "MODIFIED" Deleted EventType = "DELETED" + Bookmark EventType = "BOOKMARK" Error EventType = "ERROR" DefaultChanSize int32 = 100 @@ -57,6 +58,10 @@ type Event struct { // Object is: // * If Type is Added or Modified: the new state of the object. // * If Type is Deleted: the state of the object immediately before deletion. + // * If Type is Bookmark: the object (instance of a type being watched) where + // only ResourceVersion field is set. On successful restart of watch from a + // bookmark resourceVersion, client is guaranteed to not get repeat event + // nor miss any events. // * If Type is Error: *api.Status is recommended; other types may make sense // depending on context. Object runtime.Object diff --git a/vendor/k8s.io/client-go/discovery/doc.go b/vendor/k8s.io/client-go/discovery/doc.go index 76495588e..6baa1ef2a 100644 --- a/vendor/k8s.io/client-go/discovery/doc.go +++ b/vendor/k8s.io/client-go/discovery/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package discovery provides ways to discover server-supported // API groups, versions and resources. -package discovery +package discovery // import "k8s.io/client-go/discovery" diff --git a/vendor/k8s.io/client-go/dynamic/scheme.go b/vendor/k8s.io/client-go/dynamic/scheme.go index c4aa081f9..4596104d8 100644 --- a/vendor/k8s.io/client-go/dynamic/scheme.go +++ b/vendor/k8s.io/client-go/dynamic/scheme.go @@ -43,6 +43,8 @@ func init() { var watchJsonSerializerInfo = runtime.SerializerInfo{ MediaType: "application/json", + MediaTypeType: "application", + MediaTypeSubType: "json", EncodesAsText: true, Serializer: json.NewSerializer(json.DefaultMetaFactory, watchScheme, watchScheme, false), PrettySerializer: json.NewSerializer(json.DefaultMetaFactory, watchScheme, watchScheme, true), @@ -77,6 +79,8 @@ func (s basicNegotiatedSerializer) SupportedMediaTypes() []runtime.SerializerInf return []runtime.SerializerInfo{ { MediaType: "application/json", + MediaTypeType: "application", + MediaTypeSubType: "json", EncodesAsText: true, Serializer: json.NewSerializer(json.DefaultMetaFactory, basicScheme, basicScheme, false), PrettySerializer: json.NewSerializer(json.DefaultMetaFactory, basicScheme, basicScheme, true), diff --git a/vendor/k8s.io/client-go/dynamic/simple.go b/vendor/k8s.io/client-go/dynamic/simple.go index 852f0c512..4e0ef5a7d 100644 --- a/vendor/k8s.io/client-go/dynamic/simple.go +++ b/vendor/k8s.io/client-go/dynamic/simple.go @@ -17,6 +17,7 @@ limitations under the License. package dynamic import ( + "fmt" "io" "k8s.io/apimachinery/pkg/api/meta" @@ -36,6 +37,19 @@ type dynamicClient struct { var _ Interface = &dynamicClient{} +// ConfigFor returns a copy of the provided config with the +// appropriate dynamic client defaults set. +func ConfigFor(inConfig *rest.Config) *rest.Config { + config := rest.CopyConfig(inConfig) + config.AcceptContentTypes = "application/json" + config.ContentType = "application/json" + config.NegotiatedSerializer = basicNegotiatedSerializer{} // this gets used for discovery and error handling types + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + return config +} + // NewForConfigOrDie creates a new Interface for the given config and // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) Interface { @@ -46,17 +60,12 @@ func NewForConfigOrDie(c *rest.Config) Interface { return ret } +// NewForConfig creates a new dynamic client or returns an error. func NewForConfig(inConfig *rest.Config) (Interface, error) { - config := rest.CopyConfig(inConfig) + config := ConfigFor(inConfig) // for serializing the options config.GroupVersion = &schema.GroupVersion{} config.APIPath = "/if-you-see-this-search-for-the-break" - config.AcceptContentTypes = "application/json" - config.ContentType = "application/json" - config.NegotiatedSerializer = basicNegotiatedSerializer{} // this gets used for discovery and error handling types - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } restClient, err := rest.RESTClientFor(config) if err != nil { @@ -94,6 +103,9 @@ func (c *dynamicResourceClient) Create(obj *unstructured.Unstructured, opts meta return nil, err } name = accessor.GetName() + if len(name) == 0 { + return nil, fmt.Errorf("name is required") + } } result := c.client.client. @@ -122,6 +134,10 @@ func (c *dynamicResourceClient) Update(obj *unstructured.Unstructured, opts meta if err != nil { return nil, err } + name := accessor.GetName() + if len(name) == 0 { + return nil, fmt.Errorf("name is required") + } outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) if err != nil { return nil, err @@ -129,7 +145,7 @@ func (c *dynamicResourceClient) Update(obj *unstructured.Unstructured, opts meta result := c.client.client. Put(). - AbsPath(append(c.makeURLSegments(accessor.GetName()), subresources...)...). + AbsPath(append(c.makeURLSegments(name), subresources...)...). Body(outBytes). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). Do() @@ -153,6 +169,10 @@ func (c *dynamicResourceClient) UpdateStatus(obj *unstructured.Unstructured, opt if err != nil { return nil, err } + name := accessor.GetName() + if len(name) == 0 { + return nil, fmt.Errorf("name is required") + } outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) if err != nil { @@ -161,7 +181,7 @@ func (c *dynamicResourceClient) UpdateStatus(obj *unstructured.Unstructured, opt result := c.client.client. Put(). - AbsPath(append(c.makeURLSegments(accessor.GetName()), "status")...). + AbsPath(append(c.makeURLSegments(name), "status")...). Body(outBytes). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). Do() @@ -181,6 +201,9 @@ func (c *dynamicResourceClient) UpdateStatus(obj *unstructured.Unstructured, opt } func (c *dynamicResourceClient) Delete(name string, opts *metav1.DeleteOptions, subresources ...string) error { + if len(name) == 0 { + return fmt.Errorf("name is required") + } if opts == nil { opts = &metav1.DeleteOptions{} } @@ -216,6 +239,9 @@ func (c *dynamicResourceClient) DeleteCollection(opts *metav1.DeleteOptions, lis } func (c *dynamicResourceClient) Get(name string, opts metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) { + if len(name) == 0 { + return nil, fmt.Errorf("name is required") + } result := c.client.client.Get().AbsPath(append(c.makeURLSegments(name), subresources...)...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do() if err := result.Error(); err != nil { return nil, err @@ -284,6 +310,9 @@ func (c *dynamicResourceClient) Watch(opts metav1.ListOptions) (watch.Interface, } func (c *dynamicResourceClient) Patch(name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) { + if len(name) == 0 { + return nil, fmt.Errorf("name is required") + } result := c.client.client. Patch(pt). AbsPath(append(c.makeURLSegments(name), subresources...)...). diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index fb889e6df..f8a237f61 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -19,7 +19,10 @@ limitations under the License. package kubernetes import ( + "fmt" + discovery "k8s.io/client-go/discovery" + admissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" @@ -39,6 +42,7 @@ import ( coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + discoveryv1alpha1 "k8s.io/client-go/kubernetes/typed/discovery/v1alpha1" eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" @@ -62,6 +66,7 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface + AdmissionregistrationV1() admissionregistrationv1.AdmissionregistrationV1Interface AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface AppsV1() appsv1.AppsV1Interface AppsV1beta1() appsv1beta1.AppsV1beta1Interface @@ -81,6 +86,7 @@ type Interface interface { CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface CoordinationV1() coordinationv1.CoordinationV1Interface CoreV1() corev1.CoreV1Interface + DiscoveryV1alpha1() discoveryv1alpha1.DiscoveryV1alpha1Interface EventsV1beta1() eventsv1beta1.EventsV1beta1Interface ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface NetworkingV1() networkingv1.NetworkingV1Interface @@ -104,6 +110,7 @@ type Interface interface { // version included in a Clientset. type Clientset struct { *discovery.DiscoveryClient + admissionregistrationV1 *admissionregistrationv1.AdmissionregistrationV1Client admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client appsV1 *appsv1.AppsV1Client appsV1beta1 *appsv1beta1.AppsV1beta1Client @@ -123,6 +130,7 @@ type Clientset struct { coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client coordinationV1 *coordinationv1.CoordinationV1Client coreV1 *corev1.CoreV1Client + discoveryV1alpha1 *discoveryv1alpha1.DiscoveryV1alpha1Client eventsV1beta1 *eventsv1beta1.EventsV1beta1Client extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client networkingV1 *networkingv1.NetworkingV1Client @@ -142,6 +150,11 @@ type Clientset struct { storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client } +// AdmissionregistrationV1 retrieves the AdmissionregistrationV1Client +func (c *Clientset) AdmissionregistrationV1() admissionregistrationv1.AdmissionregistrationV1Interface { + return c.admissionregistrationV1 +} + // AdmissionregistrationV1beta1 retrieves the AdmissionregistrationV1beta1Client func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { return c.admissionregistrationV1beta1 @@ -237,6 +250,11 @@ func (c *Clientset) CoreV1() corev1.CoreV1Interface { return c.coreV1 } +// DiscoveryV1alpha1 retrieves the DiscoveryV1alpha1Client +func (c *Clientset) DiscoveryV1alpha1() discoveryv1alpha1.DiscoveryV1alpha1Interface { + return c.discoveryV1alpha1 +} + // EventsV1beta1 retrieves the EventsV1beta1Client func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { return c.eventsV1beta1 @@ -331,13 +349,22 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface { } // NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. func NewForConfig(c *rest.Config) (*Clientset, error) { configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("Burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } var cs Clientset var err error + cs.admissionregistrationV1, err = admissionregistrationv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -414,6 +441,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.discoveryV1alpha1, err = discoveryv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.eventsV1beta1, err = eventsv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -494,6 +525,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset + cs.admissionregistrationV1 = admissionregistrationv1.NewForConfigOrDie(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.NewForConfigOrDie(c) cs.appsV1 = appsv1.NewForConfigOrDie(c) cs.appsV1beta1 = appsv1beta1.NewForConfigOrDie(c) @@ -513,6 +545,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.coordinationV1beta1 = coordinationv1beta1.NewForConfigOrDie(c) cs.coordinationV1 = coordinationv1.NewForConfigOrDie(c) cs.coreV1 = corev1.NewForConfigOrDie(c) + cs.discoveryV1alpha1 = discoveryv1alpha1.NewForConfigOrDie(c) cs.eventsV1beta1 = eventsv1beta1.NewForConfigOrDie(c) cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c) cs.networkingV1 = networkingv1.NewForConfigOrDie(c) @@ -538,6 +571,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { // New creates a new Clientset for the given RESTClient. func New(c rest.Interface) *Clientset { var cs Clientset + cs.admissionregistrationV1 = admissionregistrationv1.New(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.New(c) cs.appsV1 = appsv1.New(c) cs.appsV1beta1 = appsv1beta1.New(c) @@ -557,6 +591,7 @@ func New(c rest.Interface) *Clientset { cs.coordinationV1beta1 = coordinationv1beta1.New(c) cs.coordinationV1 = coordinationv1.New(c) cs.coreV1 = corev1.New(c) + cs.discoveryV1alpha1 = discoveryv1alpha1.New(c) cs.eventsV1beta1 = eventsv1beta1.New(c) cs.extensionsV1beta1 = extensionsv1beta1.New(c) cs.networkingV1 = networkingv1.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 8346d26a5..c1a2c5198 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -19,6 +19,7 @@ limitations under the License. package scheme import ( + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" @@ -38,6 +39,7 @@ import ( coordinationv1 "k8s.io/api/coordination/v1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" + discoveryv1alpha1 "k8s.io/api/discovery/v1alpha1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" @@ -66,6 +68,7 @@ var Scheme = runtime.NewScheme() var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ + admissionregistrationv1.AddToScheme, admissionregistrationv1beta1.AddToScheme, appsv1.AddToScheme, appsv1beta1.AddToScheme, @@ -85,6 +88,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ coordinationv1beta1.AddToScheme, coordinationv1.AddToScheme, corev1.AddToScheme, + discoveryv1alpha1.AddToScheme, eventsv1beta1.AddToScheme, extensionsv1beta1.AddToScheme, networkingv1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go new file mode 100644 index 000000000..751273159 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/admissionregistration/v1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AdmissionregistrationV1Interface interface { + RESTClient() rest.Interface + MutatingWebhookConfigurationsGetter + ValidatingWebhookConfigurationsGetter +} + +// AdmissionregistrationV1Client is used to interact with features provided by the admissionregistration.k8s.io group. +type AdmissionregistrationV1Client struct { + restClient rest.Interface +} + +func (c *AdmissionregistrationV1Client) MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface { + return newMutatingWebhookConfigurations(c) +} + +func (c *AdmissionregistrationV1Client) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface { + return newValidatingWebhookConfigurations(c) +} + +// NewForConfig creates a new AdmissionregistrationV1Client for the given config. +func NewForConfig(c *rest.Config) (*AdmissionregistrationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AdmissionregistrationV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AdmissionregistrationV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AdmissionregistrationV1Client for the given RESTClient. +func New(c rest.Interface) *AdmissionregistrationV1Client { + return &AdmissionregistrationV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AdmissionregistrationV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/doc.go new file mode 100644 index 000000000..3af5d054f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go new file mode 100644 index 000000000..a5b062e37 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type MutatingWebhookConfigurationExpansion interface{} + +type ValidatingWebhookConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go new file mode 100644 index 000000000..1f5e5e380 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface. +// A group's client should implement this interface. +type MutatingWebhookConfigurationsGetter interface { + MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface +} + +// MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources. +type MutatingWebhookConfigurationInterface interface { + Create(*v1.MutatingWebhookConfiguration) (*v1.MutatingWebhookConfiguration, error) + Update(*v1.MutatingWebhookConfiguration) (*v1.MutatingWebhookConfiguration, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.MutatingWebhookConfiguration, error) + List(opts metav1.ListOptions) (*v1.MutatingWebhookConfigurationList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) + MutatingWebhookConfigurationExpansion +} + +// mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface +type mutatingWebhookConfigurations struct { + client rest.Interface +} + +// newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations +func newMutatingWebhookConfigurations(c *AdmissionregistrationV1Client) *mutatingWebhookConfigurations { + return &mutatingWebhookConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. +func (c *mutatingWebhookConfigurations) Get(name string, options metav1.GetOptions) (result *v1.MutatingWebhookConfiguration, err error) { + result = &v1.MutatingWebhookConfiguration{} + err = c.client.Get(). + Resource("mutatingwebhookconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. +func (c *mutatingWebhookConfigurations) List(opts metav1.ListOptions) (result *v1.MutatingWebhookConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.MutatingWebhookConfigurationList{} + err = c.client.Get(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. +func (c *mutatingWebhookConfigurations) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *mutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration) (result *v1.MutatingWebhookConfiguration, err error) { + result = &v1.MutatingWebhookConfiguration{} + err = c.client.Post(). + Resource("mutatingwebhookconfigurations"). + Body(mutatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *mutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration) (result *v1.MutatingWebhookConfiguration, err error) { + result = &v1.MutatingWebhookConfiguration{} + err = c.client.Put(). + Resource("mutatingwebhookconfigurations"). + Name(mutatingWebhookConfiguration.Name). + Body(mutatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *mutatingWebhookConfigurations) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("mutatingwebhookconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *mutatingWebhookConfigurations) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched mutatingWebhookConfiguration. +func (c *mutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) { + result = &v1.MutatingWebhookConfiguration{} + err = c.client.Patch(pt). + Resource("mutatingwebhookconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go new file mode 100644 index 000000000..7987b6e30 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface. +// A group's client should implement this interface. +type ValidatingWebhookConfigurationsGetter interface { + ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface +} + +// ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources. +type ValidatingWebhookConfigurationInterface interface { + Create(*v1.ValidatingWebhookConfiguration) (*v1.ValidatingWebhookConfiguration, error) + Update(*v1.ValidatingWebhookConfiguration) (*v1.ValidatingWebhookConfiguration, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.ValidatingWebhookConfiguration, error) + List(opts metav1.ListOptions) (*v1.ValidatingWebhookConfigurationList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) + ValidatingWebhookConfigurationExpansion +} + +// validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface +type validatingWebhookConfigurations struct { + client rest.Interface +} + +// newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations +func newValidatingWebhookConfigurations(c *AdmissionregistrationV1Client) *validatingWebhookConfigurations { + return &validatingWebhookConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. +func (c *validatingWebhookConfigurations) Get(name string, options metav1.GetOptions) (result *v1.ValidatingWebhookConfiguration, err error) { + result = &v1.ValidatingWebhookConfiguration{} + err = c.client.Get(). + Resource("validatingwebhookconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. +func (c *validatingWebhookConfigurations) List(opts metav1.ListOptions) (result *v1.ValidatingWebhookConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ValidatingWebhookConfigurationList{} + err = c.client.Get(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. +func (c *validatingWebhookConfigurations) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *validatingWebhookConfigurations) Create(validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration) (result *v1.ValidatingWebhookConfiguration, err error) { + result = &v1.ValidatingWebhookConfiguration{} + err = c.client.Post(). + Resource("validatingwebhookconfigurations"). + Body(validatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *validatingWebhookConfigurations) Update(validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration) (result *v1.ValidatingWebhookConfiguration, err error) { + result = &v1.ValidatingWebhookConfiguration{} + err = c.client.Put(). + Resource("validatingwebhookconfigurations"). + Name(validatingWebhookConfiguration.Name). + Body(validatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *validatingWebhookConfigurations) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("validatingwebhookconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *validatingWebhookConfigurations) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched validatingWebhookConfiguration. +func (c *validatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) { + result = &v1.ValidatingWebhookConfiguration{} + err = c.client.Patch(pt). + Resource("validatingwebhookconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go index b13ea7953..2d93ff02e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/admissionregistration/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -76,7 +75,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go index da19c7596..621c734af 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/apps/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -91,7 +90,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go index 2c9db886b..e5dd64d98 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/apps/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -81,7 +80,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go index 99d677f40..7ca4e0b20 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go @@ -20,7 +20,6 @@ package v1beta2 import ( v1beta2 "k8s.io/api/apps/v1beta2" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -91,7 +90,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go index f007b05ef..ec63179ea 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go @@ -20,7 +20,6 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/auditregistration/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go index 3bdcee598..de8864e22 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/authentication/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go index 7f3334a0c..816bd0a2c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/authentication/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go index e84b90084..2cc226322 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/authorization/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -86,7 +85,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go index 7f236f6e3..88eac75b7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/authorization/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -86,7 +85,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go index 2bd49e2db..4f3e96aec 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/autoscaling/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go index 3a49b26b3..c1a91fc3e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go @@ -20,7 +20,6 @@ package v2beta1 import ( v2beta1 "k8s.io/api/autoscaling/v2beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v2beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go index 03fe25e48..bd2b39270 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go @@ -20,7 +20,6 @@ package v2beta2 import ( v2beta2 "k8s.io/api/autoscaling/v2beta2" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v2beta2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go index d5e35e6b2..8dfc118a3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/batch/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go index aa71ca833..257085358 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/batch/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go index e6c6306b8..d45c19d52 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go @@ -20,7 +20,6 @@ package v2alpha1 import ( v2alpha1 "k8s.io/api/batch/v2alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v2alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go index baac42ee2..1c52d551b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/certificates/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go index 9b566f310..0df7b71bf 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/coordination/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go index 91a764843..d68ed5d34 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/coordination/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go index 044a28ebd..428d2afa3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/core/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -146,7 +145,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/api" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go index 8d6b6e879..feacd307f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go @@ -46,6 +46,9 @@ type PodInterface interface { List(opts metav1.ListOptions) (*v1.PodList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) + GetEphemeralContainers(podName string, options metav1.GetOptions) (*v1.EphemeralContainers, error) + UpdateEphemeralContainers(podName string, ephemeralContainers *v1.EphemeralContainers) (*v1.EphemeralContainers, error) + PodExpansion } @@ -189,3 +192,31 @@ func (c *pods) Patch(name string, pt types.PatchType, data []byte, subresources Into(result) return } + +// GetEphemeralContainers takes name of the pod, and returns the corresponding v1.EphemeralContainers object, and an error if there is any. +func (c *pods) GetEphemeralContainers(podName string, options metav1.GetOptions) (result *v1.EphemeralContainers, err error) { + result = &v1.EphemeralContainers{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + Name(podName). + SubResource("ephemeralcontainers"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateEphemeralContainers takes the top resource name and the representation of a ephemeralContainers and updates it. Returns the server's representation of the ephemeralContainers, and an error, if there is any. +func (c *pods) UpdateEphemeralContainers(podName string, ephemeralContainers *v1.EphemeralContainers) (result *v1.EphemeralContainers, err error) { + result = &v1.EphemeralContainers{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pods"). + Name(podName). + SubResource("ephemeralcontainers"). + Body(ephemeralContainers). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/discovery_client.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/discovery_client.go new file mode 100644 index 000000000..e65a0988d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/discovery_client.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/discovery/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type DiscoveryV1alpha1Interface interface { + RESTClient() rest.Interface + EndpointSlicesGetter +} + +// DiscoveryV1alpha1Client is used to interact with features provided by the discovery.k8s.io group. +type DiscoveryV1alpha1Client struct { + restClient rest.Interface +} + +func (c *DiscoveryV1alpha1Client) EndpointSlices(namespace string) EndpointSliceInterface { + return newEndpointSlices(c, namespace) +} + +// NewForConfig creates a new DiscoveryV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*DiscoveryV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &DiscoveryV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new DiscoveryV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *DiscoveryV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new DiscoveryV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *DiscoveryV1alpha1Client { + return &DiscoveryV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *DiscoveryV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/doc.go new file mode 100644 index 000000000..df51baa4d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go new file mode 100644 index 000000000..417514761 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/discovery/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// EndpointSlicesGetter has a method to return a EndpointSliceInterface. +// A group's client should implement this interface. +type EndpointSlicesGetter interface { + EndpointSlices(namespace string) EndpointSliceInterface +} + +// EndpointSliceInterface has methods to work with EndpointSlice resources. +type EndpointSliceInterface interface { + Create(*v1alpha1.EndpointSlice) (*v1alpha1.EndpointSlice, error) + Update(*v1alpha1.EndpointSlice) (*v1alpha1.EndpointSlice, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.EndpointSlice, error) + List(opts v1.ListOptions) (*v1alpha1.EndpointSliceList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EndpointSlice, err error) + EndpointSliceExpansion +} + +// endpointSlices implements EndpointSliceInterface +type endpointSlices struct { + client rest.Interface + ns string +} + +// newEndpointSlices returns a EndpointSlices +func newEndpointSlices(c *DiscoveryV1alpha1Client, namespace string) *endpointSlices { + return &endpointSlices{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. +func (c *endpointSlices) Get(name string, options v1.GetOptions) (result *v1alpha1.EndpointSlice, err error) { + result = &v1alpha1.EndpointSlice{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. +func (c *endpointSlices) List(opts v1.ListOptions) (result *v1alpha1.EndpointSliceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.EndpointSliceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested endpointSlices. +func (c *endpointSlices) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *endpointSlices) Create(endpointSlice *v1alpha1.EndpointSlice) (result *v1alpha1.EndpointSlice, err error) { + result = &v1alpha1.EndpointSlice{} + err = c.client.Post(). + Namespace(c.ns). + Resource("endpointslices"). + Body(endpointSlice). + Do(). + Into(result) + return +} + +// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *endpointSlices) Update(endpointSlice *v1alpha1.EndpointSlice) (result *v1alpha1.EndpointSlice, err error) { + result = &v1alpha1.EndpointSlice{} + err = c.client.Put(). + Namespace(c.ns). + Resource("endpointslices"). + Name(endpointSlice.Name). + Body(endpointSlice). + Do(). + Into(result) + return +} + +// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. +func (c *endpointSlices) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpointslices"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *endpointSlices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched endpointSlice. +func (c *endpointSlices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EndpointSlice, err error) { + result = &v1alpha1.EndpointSlice{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("endpointslices"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..e8ceb59a4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type EndpointSliceExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go new file mode 100644 index 000000000..312ee4283 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + "k8s.io/api/events/v1beta1" + "k8s.io/apimachinery/pkg/types" +) + +// The EventExpansion interface allows manually adding extra methods to the EventInterface. +// TODO: Add querying functions to the event expansion +type EventExpansion interface { + // CreateWithEventNamespace is the same as a Create + // except that it sends the request to the event.Namespace. + CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) + // UpdateWithEventNamespace is the same as a Update + // except that it sends the request to the event.Namespace. + UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) + // PatchWithEventNamespace is the same as an Update + // except that it sends the request to the event.Namespace. + PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) +} + +// CreateWithEventNamespace makes a new event. +// Returns the copy of the event the server returns, or an error. +// The namespace to create the event within is deduced from the event. +// it must either match this event client's namespace, or this event client must +// have been created with the "" namespace. +func (e *events) CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { + if e.ns != "" && event.Namespace != e.ns { + return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + } + result := &v1beta1.Event{} + err := e.client.Post(). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Body(event). + Do(). + Into(result) + return result, err +} + +// UpdateWithEventNamespace modifies an existing event. +// It returns the copy of the event that the server returns, or an error. +// The namespace and key to update the event within is deduced from the event. +// The namespace must either match this event client's namespace, or this event client must have been +// created with the "" namespace. +// Update also requires the ResourceVersion to be set in the event object. +func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { + if e.ns != "" && event.Namespace != e.ns { + return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + } + result := &v1beta1.Event{} + err := e.client.Put(). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return result, err +} + +// PatchWithEventNamespace modifies an existing event. +// It returns the copy of the event that the server returns, or an error. +// The namespace and name of the target event is deduced from the event. +// The namespace must either match this event client's namespace, or this event client must +// have been created with the "" namespace. +func (e *events) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) { + if e.ns != "" && event.Namespace != e.ns { + return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + } + result := &v1beta1.Event{} + err := e.client.Patch(types.StrategicMergePatchType). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Name(event.Name). + Body(data). + Do(). + Into(result) + return result, err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go index fb59635bb..e372ccffa 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/events/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go index e27f693f8..f6df76963 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go @@ -17,5 +17,3 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. package v1beta1 - -type EventExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index 0e9edf5cc..e3b22aa44 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/extensions/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -30,6 +29,7 @@ type ExtensionsV1beta1Interface interface { DaemonSetsGetter DeploymentsGetter IngressesGetter + NetworkPoliciesGetter PodSecurityPoliciesGetter ReplicaSetsGetter } @@ -51,6 +51,10 @@ func (c *ExtensionsV1beta1Client) Ingresses(namespace string) IngressInterface { return newIngresses(c, namespace) } +func (c *ExtensionsV1beta1Client) NetworkPolicies(namespace string) NetworkPolicyInterface { + return newNetworkPolicies(c, namespace) +} + func (c *ExtensionsV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { return newPodSecurityPolicies(c) } @@ -91,7 +95,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go index cfaeebd05..41d28f041 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go @@ -22,6 +22,8 @@ type DaemonSetExpansion interface{} type IngressExpansion interface{} +type NetworkPolicyExpansion interface{} + type PodSecurityPolicyExpansion interface{} type ReplicaSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go new file mode 100644 index 000000000..0607e2dd4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// NetworkPoliciesGetter has a method to return a NetworkPolicyInterface. +// A group's client should implement this interface. +type NetworkPoliciesGetter interface { + NetworkPolicies(namespace string) NetworkPolicyInterface +} + +// NetworkPolicyInterface has methods to work with NetworkPolicy resources. +type NetworkPolicyInterface interface { + Create(*v1beta1.NetworkPolicy) (*v1beta1.NetworkPolicy, error) + Update(*v1beta1.NetworkPolicy) (*v1beta1.NetworkPolicy, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.NetworkPolicy, error) + List(opts v1.ListOptions) (*v1beta1.NetworkPolicyList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.NetworkPolicy, err error) + NetworkPolicyExpansion +} + +// networkPolicies implements NetworkPolicyInterface +type networkPolicies struct { + client rest.Interface + ns string +} + +// newNetworkPolicies returns a NetworkPolicies +func newNetworkPolicies(c *ExtensionsV1beta1Client, namespace string) *networkPolicies { + return &networkPolicies{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. +func (c *networkPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) { + result = &v1beta1.NetworkPolicy{} + err = c.client.Get(). + Namespace(c.ns). + Resource("networkpolicies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. +func (c *networkPolicies) List(opts v1.ListOptions) (result *v1beta1.NetworkPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.NetworkPolicyList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("networkpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested networkPolicies. +func (c *networkPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("networkpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. +func (c *networkPolicies) Create(networkPolicy *v1beta1.NetworkPolicy) (result *v1beta1.NetworkPolicy, err error) { + result = &v1beta1.NetworkPolicy{} + err = c.client.Post(). + Namespace(c.ns). + Resource("networkpolicies"). + Body(networkPolicy). + Do(). + Into(result) + return +} + +// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. +func (c *networkPolicies) Update(networkPolicy *v1beta1.NetworkPolicy) (result *v1beta1.NetworkPolicy, err error) { + result = &v1beta1.NetworkPolicy{} + err = c.client.Put(). + Namespace(c.ns). + Resource("networkpolicies"). + Name(networkPolicy.Name). + Body(networkPolicy). + Do(). + Into(result) + return +} + +// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. +func (c *networkPolicies) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("networkpolicies"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *networkPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("networkpolicies"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched networkPolicy. +func (c *networkPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.NetworkPolicy, err error) { + result = &v1beta1.NetworkPolicy{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("networkpolicies"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go index 8684db456..5315d9b92 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/networking/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go index 541bf6a9a..ee523f8e7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/networking/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go index 863f2d4dc..e7acc27e4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go @@ -20,7 +20,6 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/node/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go index 52683e201..b38d9acac 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/node/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go index 020e185e6..8b8b22c6d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/policy/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -81,7 +80,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go index e3855bb9b..1bc0179c6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/rbac/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -86,7 +85,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go index de83531ed..efbbc68be 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go @@ -20,7 +20,6 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/rbac/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -86,7 +85,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go index 46718d731..4db94cfad 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/rbac/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -86,7 +85,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go index bd7e1e54f..5028bac89 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/scheduling/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go index 375f41b8d..83bc0b8a9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go @@ -20,7 +20,6 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/scheduling/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go index 6feec4aec..373f5cca8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/scheduling/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go index c2a03b960..8d3a8d8e1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go @@ -20,7 +20,6 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/settings/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go index 92378cf7f..1afbe93c9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/storage/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -76,7 +75,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go index c52f630ac..32d503060 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go @@ -20,7 +20,6 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/storage/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go index e9916bc0a..5e12b025b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/storage/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -86,7 +85,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go index 3f6b9bc23..fb81fb7b1 100644 --- a/vendor/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/client-go/rest/config.go @@ -94,6 +94,10 @@ type Config struct { // UserAgent is an optional field that specifies the caller of this request. UserAgent string + // DisableCompression bypasses automatic GZip compression requests to the + // server. + DisableCompression bool + // Transport may be used for custom HTTP behavior. This attribute may not // be specified with the TLS client certificate options. Use WrapTransport // to provide additional per-server middleware behavior. @@ -207,6 +211,12 @@ type TLSClientConfig struct { // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). // CAData takes precedence over CAFile CAData []byte + + // NextProtos is a list of supported application level protocols, in order of preference. + // Used to populate tls.Config.NextProtos. + // To indicate to the server http/1.1 is preferred over http/2, set to ["http/1.1", "h2"] (though the server is free to ignore that preference). + // To use only http/1.1, set to ["http/1.1"]. + NextProtos []string } var _ fmt.Stringer = TLSClientConfig{} @@ -232,6 +242,7 @@ func (c TLSClientConfig) String() string { CertData: c.CertData, KeyData: c.KeyData, CAData: c.CAData, + NextProtos: c.NextProtos, } // Explicitly mark non-empty credential fields as redacted. if len(cc.CertData) != 0 { @@ -487,7 +498,7 @@ func AddUserAgent(config *Config, userAgent string) *Config { return config } -// AnonymousClientConfig returns a copy of the given config with all user credentials (cert/key, bearer token, and username/password) removed +// AnonymousClientConfig returns a copy of the given config with all user credentials (cert/key, bearer token, and username/password) and custom transports (WrapTransport, Transport) removed func AnonymousClientConfig(config *Config) *Config { // copy only known safe fields return &Config{ @@ -499,15 +510,15 @@ func AnonymousClientConfig(config *Config) *Config { ServerName: config.ServerName, CAFile: config.TLSClientConfig.CAFile, CAData: config.TLSClientConfig.CAData, + NextProtos: config.TLSClientConfig.NextProtos, }, - RateLimiter: config.RateLimiter, - UserAgent: config.UserAgent, - Transport: config.Transport, - WrapTransport: config.WrapTransport, - QPS: config.QPS, - Burst: config.Burst, - Timeout: config.Timeout, - Dial: config.Dial, + RateLimiter: config.RateLimiter, + UserAgent: config.UserAgent, + DisableCompression: config.DisableCompression, + QPS: config.QPS, + Burst: config.Burst, + Timeout: config.Timeout, + Dial: config.Dial, } } @@ -538,14 +549,16 @@ func CopyConfig(config *Config) *Config { CertData: config.TLSClientConfig.CertData, KeyData: config.TLSClientConfig.KeyData, CAData: config.TLSClientConfig.CAData, + NextProtos: config.TLSClientConfig.NextProtos, }, - UserAgent: config.UserAgent, - Transport: config.Transport, - WrapTransport: config.WrapTransport, - QPS: config.QPS, - Burst: config.Burst, - RateLimiter: config.RateLimiter, - Timeout: config.Timeout, - Dial: config.Dial, + UserAgent: config.UserAgent, + DisableCompression: config.DisableCompression, + Transport: config.Transport, + WrapTransport: config.WrapTransport, + QPS: config.QPS, + Burst: config.Burst, + RateLimiter: config.RateLimiter, + Timeout: config.Timeout, + Dial: config.Dial, } } diff --git a/vendor/k8s.io/client-go/rest/plugin.go b/vendor/k8s.io/client-go/rest/plugin.go index 83ef5ae32..0bc2d03f6 100644 --- a/vendor/k8s.io/client-go/rest/plugin.go +++ b/vendor/k8s.io/client-go/rest/plugin.go @@ -55,7 +55,7 @@ func RegisterAuthProviderPlugin(name string, plugin Factory) error { pluginsLock.Lock() defer pluginsLock.Unlock() if _, found := plugins[name]; found { - return fmt.Errorf("Auth Provider Plugin %q was registered twice", name) + return fmt.Errorf("auth Provider Plugin %q was registered twice", name) } klog.V(4).Infof("Registered Auth Provider Plugin %q", name) plugins[name] = plugin @@ -67,7 +67,7 @@ func GetAuthProvider(clusterAddress string, apc *clientcmdapi.AuthProviderConfig defer pluginsLock.Unlock() p, ok := plugins[apc.Name] if !ok { - return nil, fmt.Errorf("No Auth Provider found for name %q", apc.Name) + return nil, fmt.Errorf("no Auth Provider found for name %q", apc.Name) } return p(clusterAddress, apc.Config, persister) } diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index dd0630387..491f8bbd1 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -521,14 +521,24 @@ func (r Request) finalURLTemplate() url.URL { return *url } -func (r *Request) tryThrottle() { +func (r *Request) tryThrottle() error { + if r.throttle == nil { + return nil + } + now := time.Now() - if r.throttle != nil { + var err error + if r.ctx != nil { + err = r.throttle.Wait(r.ctx) + } else { r.throttle.Accept() } + if latency := time.Since(now); latency > longThrottleLatency { klog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) } + + return err } // Watch attempts to begin watching the requested location. @@ -592,10 +602,15 @@ func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser) if result := r.transformResponse(resp, req); result.err != nil { return nil, result.err } - return nil, fmt.Errorf("for request '%+v', got status: %v", url, resp.StatusCode) + return nil, fmt.Errorf("for request %s, got status: %v", url, resp.StatusCode) } wrapperDecoder := wrapperDecoderFn(resp.Body) - return watch.NewStreamWatcher(restclientwatch.NewDecoder(wrapperDecoder, embeddedDecoder)), nil + return watch.NewStreamWatcher( + restclientwatch.NewDecoder(wrapperDecoder, embeddedDecoder), + // use 500 to indicate that the cause of the error is unknown - other error codes + // are more specific to HTTP interactions, and set a reason + errors.NewClientErrorReporter(http.StatusInternalServerError, r.verb, "ClientWatchDecoding"), + ), nil } // updateURLMetrics is a convenience function for pushing metrics. @@ -625,13 +640,18 @@ func (r *Request) Stream() (io.ReadCloser, error) { return nil, r.err } - r.tryThrottle() + if err := r.tryThrottle(); err != nil { + return nil, err + } url := r.URL().String() req, err := http.NewRequest(r.verb, url, nil) if err != nil { return nil, err } + if r.body != nil { + req.Body = ioutil.NopCloser(r.body) + } if r.ctx != nil { req = req.WithContext(r.ctx) } @@ -727,7 +747,9 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { // We are retrying the request that we already send to apiserver // at least once before. // This request should also be throttled with the client-internal throttler. - r.tryThrottle() + if err := r.tryThrottle(); err != nil { + return err + } } resp, err := client.Do(req) updateURLMetrics(r, resp, err) @@ -798,7 +820,9 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { // * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError // * http.Client.Do errors are returned directly. func (r *Request) Do() Result { - r.tryThrottle() + if err := r.tryThrottle(); err != nil { + return Result{err: err} + } var result Result err := r.request(func(req *http.Request, resp *http.Response) { @@ -812,7 +836,9 @@ func (r *Request) Do() Result { // DoRaw executes the request but does not process the response body. func (r *Request) DoRaw() ([]byte, error) { - r.tryThrottle() + if err := r.tryThrottle(); err != nil { + return nil, err + } var result Result err := r.request(func(req *http.Request, resp *http.Response) { @@ -845,13 +871,13 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu // 3. Apiserver closes connection. // 4. client-go should catch this and return an error. klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err) - streamErr := fmt.Errorf("Stream error %#v when reading response body, may be caused by closed connection. Please retry.", err) + streamErr := fmt.Errorf("stream error when reading response body, may be caused by closed connection. Please retry. Original error: %v", err) return Result{ err: streamErr, } default: - klog.Errorf("Unexpected error when reading response body: %#v", err) - unexpectedErr := fmt.Errorf("Unexpected error %#v when reading response body. Please retry.", err) + klog.Errorf("Unexpected error when reading response body: %v", err) + unexpectedErr := fmt.Errorf("unexpected error when reading response body. Please retry. Original error: %v", err) return Result{ err: unexpectedErr, } diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go index bd5749dc6..0800e4ec7 100644 --- a/vendor/k8s.io/client-go/rest/transport.go +++ b/vendor/k8s.io/client-go/rest/transport.go @@ -61,9 +61,10 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip // TransportConfig converts a client config to an appropriate transport config. func (c *Config) TransportConfig() (*transport.Config, error) { conf := &transport.Config{ - UserAgent: c.UserAgent, - Transport: c.Transport, - WrapTransport: c.WrapTransport, + UserAgent: c.UserAgent, + Transport: c.Transport, + WrapTransport: c.WrapTransport, + DisableCompression: c.DisableCompression, TLS: transport.TLSConfig{ Insecure: c.Insecure, ServerName: c.ServerName, @@ -73,10 +74,12 @@ func (c *Config) TransportConfig() (*transport.Config, error) { CertData: c.CertData, KeyFile: c.KeyFile, KeyData: c.KeyData, + NextProtos: c.NextProtos, }, - Username: c.Username, - Password: c.Password, - BearerToken: c.BearerToken, + Username: c.Username, + Password: c.Password, + BearerToken: c.BearerToken, + BearerTokenFile: c.BearerTokenFile, Impersonate: transport.ImpersonationConfig{ UserName: c.Impersonate.UserName, Groups: c.Impersonate.Groups, diff --git a/vendor/k8s.io/client-go/rest/watch/decoder.go b/vendor/k8s.io/client-go/rest/watch/decoder.go index 73bb63add..e95c020b2 100644 --- a/vendor/k8s.io/client-go/rest/watch/decoder.go +++ b/vendor/k8s.io/client-go/rest/watch/decoder.go @@ -54,7 +54,7 @@ func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) { return "", nil, fmt.Errorf("unable to decode to metav1.Event") } switch got.Type { - case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error): + case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error), string(watch.Bookmark): default: return "", nil, fmt.Errorf("got invalid watch event type: %v", got.Type) } diff --git a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go index c1ab45f33..da4a1624e 100644 --- a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go @@ -38,6 +38,11 @@ func (in *TLSClientConfig) DeepCopyInto(out *TLSClientConfig) { *out = make([]byte, len(*in)) copy(*out, *in) } + if in.NextProtos != nil { + in, out := &in.NextProtos, &out.NextProtos + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/client-go/tools/auth/clientauth.go b/vendor/k8s.io/client-go/tools/auth/clientauth.go index 20339ab9d..c34172677 100644 --- a/vendor/k8s.io/client-go/tools/auth/clientauth.go +++ b/vendor/k8s.io/client-go/tools/auth/clientauth.go @@ -105,7 +105,7 @@ func LoadFromFile(path string) (*Info, error) { // The fields of client.Config with a corresponding field in the Info are set // with the value from the Info. func (info Info) MergeWithConfig(c restclient.Config) (restclient.Config, error) { - var config restclient.Config = c + var config = c config.Username = info.User config.Password = info.Password config.CAFile = info.CAFile @@ -118,6 +118,7 @@ func (info Info) MergeWithConfig(c restclient.Config) (restclient.Config, error) return config, nil } +// Complete returns true if the Kubernetes API authorization info is complete. func (info Info) Complete() bool { return len(info.User) > 0 || len(info.CertFile) > 0 || diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index b5d392520..27a1c52cd 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -79,6 +79,7 @@ type controller struct { clock clock.Clock } +// Controller is a generic controller framework. type Controller interface { Run(stopCh <-chan struct{}) HasSynced() bool @@ -130,6 +131,8 @@ func (c *controller) HasSynced() bool { } func (c *controller) LastSyncResourceVersion() string { + c.reflectorMutex.RLock() + defer c.reflectorMutex.RUnlock() if c.reflector == nil { return "" } @@ -149,7 +152,7 @@ func (c *controller) processLoop() { for { obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process)) if err != nil { - if err == FIFOClosedError { + if err == ErrFIFOClosed { return } if c.config.RetryOnError { diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index f24eec254..db4519f2e 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -160,7 +160,7 @@ func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) { return f.keyFunc(obj) } -// Return true if an Add/Update/Delete/AddIfNotPresent are called first, +// HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first, // or an Update called first but the first batch of items inserted by Replace() has been popped func (f *DeltaFIFO) HasSynced() bool { f.lock.Lock() @@ -389,7 +389,7 @@ func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err err return d, exists, nil } -// Checks if the queue is closed +// IsClosed checks if the queue is closed func (f *DeltaFIFO) IsClosed() bool { f.closedLock.Lock() defer f.closedLock.Unlock() @@ -417,7 +417,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { // When Close() is called, the f.closed is set and the condition is broadcasted. // Which causes this loop to continue and return from the Pop(). if f.IsClosed() { - return nil, FIFOClosedError + return nil, ErrFIFOClosed } f.cond.Wait() @@ -593,6 +593,7 @@ type KeyGetter interface { // DeltaType is the type of a change (addition, deletion, etc) type DeltaType string +// Change type definition const ( Added DeltaType = "Added" Updated DeltaType = "Updated" diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go index 68d41c8ec..14ad492ec 100644 --- a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go @@ -48,14 +48,14 @@ type ExpirationCache struct { // ExpirationPolicy dictates when an object expires. Currently only abstracted out // so unittests don't rely on the system clock. type ExpirationPolicy interface { - IsExpired(obj *timestampedEntry) bool + IsExpired(obj *TimestampedEntry) bool } // TTLPolicy implements a ttl based ExpirationPolicy. type TTLPolicy struct { // >0: Expire entries with an age > ttl // <=0: Don't expire any entry - Ttl time.Duration + TTL time.Duration // Clock used to calculate ttl expiration Clock clock.Clock @@ -63,26 +63,30 @@ type TTLPolicy struct { // IsExpired returns true if the given object is older than the ttl, or it can't // determine its age. -func (p *TTLPolicy) IsExpired(obj *timestampedEntry) bool { - return p.Ttl > 0 && p.Clock.Since(obj.timestamp) > p.Ttl +func (p *TTLPolicy) IsExpired(obj *TimestampedEntry) bool { + return p.TTL > 0 && p.Clock.Since(obj.Timestamp) > p.TTL } -// timestampedEntry is the only type allowed in a ExpirationCache. -type timestampedEntry struct { - obj interface{} - timestamp time.Time +// TimestampedEntry is the only type allowed in a ExpirationCache. +// Keep in mind that it is not safe to share timestamps between computers. +// Behavior may be inconsistent if you get a timestamp from the API Server and +// use it on the client machine as part of your ExpirationCache. +type TimestampedEntry struct { + Obj interface{} + Timestamp time.Time + key string } -// getTimestampedEntry returns the timestampedEntry stored under the given key. -func (c *ExpirationCache) getTimestampedEntry(key string) (*timestampedEntry, bool) { +// getTimestampedEntry returns the TimestampedEntry stored under the given key. +func (c *ExpirationCache) getTimestampedEntry(key string) (*TimestampedEntry, bool) { item, _ := c.cacheStorage.Get(key) - if tsEntry, ok := item.(*timestampedEntry); ok { + if tsEntry, ok := item.(*TimestampedEntry); ok { return tsEntry, true } return nil, false } -// getOrExpire retrieves the object from the timestampedEntry if and only if it hasn't +// getOrExpire retrieves the object from the TimestampedEntry if and only if it hasn't // already expired. It holds a write lock across deletion. func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) { // Prevent all inserts from the time we deem an item as "expired" to when we @@ -95,11 +99,11 @@ func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) { return nil, false } if c.expirationPolicy.IsExpired(timestampedItem) { - klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj) + klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.Obj) c.cacheStorage.Delete(key) return nil, false } - return timestampedItem.obj, true + return timestampedItem.Obj, true } // GetByKey returns the item stored under the key, or sets exists=false. @@ -126,10 +130,8 @@ func (c *ExpirationCache) List() []interface{} { list := make([]interface{}, 0, len(items)) for _, item := range items { - obj := item.(*timestampedEntry).obj - if key, err := c.keyFunc(obj); err != nil { - list = append(list, obj) - } else if obj, exists := c.getOrExpire(key); exists { + key := item.(*TimestampedEntry).key + if obj, exists := c.getOrExpire(key); exists { list = append(list, obj) } } @@ -151,7 +153,7 @@ func (c *ExpirationCache) Add(obj interface{}) error { c.expirationLock.Lock() defer c.expirationLock.Unlock() - c.cacheStorage.Add(key, ×tampedEntry{obj, c.clock.Now()}) + c.cacheStorage.Add(key, &TimestampedEntry{obj, c.clock.Now(), key}) return nil } @@ -184,7 +186,7 @@ func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) er if err != nil { return KeyError{item, err} } - items[key] = ×tampedEntry{item, ts} + items[key] = &TimestampedEntry{item, ts, key} } c.expirationLock.Lock() defer c.expirationLock.Unlock() @@ -199,10 +201,15 @@ func (c *ExpirationCache) Resync() error { // NewTTLStore creates and returns a ExpirationCache with a TTLPolicy func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store { + return NewExpirationStore(keyFunc, &TTLPolicy{ttl, clock.RealClock{}}) +} + +// NewExpirationStore creates and returns a ExpirationCache for a given policy +func NewExpirationStore(keyFunc KeyFunc, expirationPolicy ExpirationPolicy) Store { return &ExpirationCache{ cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}), keyFunc: keyFunc, clock: clock.RealClock{}, - expirationPolicy: &TTLPolicy{ttl, clock.RealClock{}}, + expirationPolicy: expirationPolicy, } } diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go index a096765f6..33afd32c8 100644 --- a/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go @@ -33,16 +33,19 @@ func (c *fakeThreadSafeMap) Delete(key string) { } } +// FakeExpirationPolicy keeps the list for keys which never expires. type FakeExpirationPolicy struct { NeverExpire sets.String RetrieveKeyFunc KeyFunc } -func (p *FakeExpirationPolicy) IsExpired(obj *timestampedEntry) bool { +// IsExpired used to check if object is expired. +func (p *FakeExpirationPolicy) IsExpired(obj *TimestampedEntry) bool { key, _ := p.RetrieveKeyFunc(obj) return !p.NeverExpire.Has(key) } +// NewFakeExpirationStore creates a new instance for the ExpirationCache. func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock clock.Clock) Store { cacheStorage := NewThreadSafeStore(Indexers{}, Indices{}) return &ExpirationCache{ diff --git a/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go b/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go index b59e2eb27..462d22660 100644 --- a/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go +++ b/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go @@ -16,7 +16,7 @@ limitations under the License. package cache -// FakeStore lets you define custom functions for store operations +// FakeCustomStore lets you define custom functions for store operations. type FakeCustomStore struct { AddFunc func(obj interface{}) error UpdateFunc func(obj interface{}) error @@ -25,7 +25,7 @@ type FakeCustomStore struct { ListKeysFunc func() []string GetFunc func(obj interface{}) (item interface{}, exists bool, err error) GetByKeyFunc func(key string) (item interface{}, exists bool, err error) - ReplaceFunc func(list []interface{}, resourceVerion string) error + ReplaceFunc func(list []interface{}, resourceVersion string) error ResyncFunc func() error } diff --git a/vendor/k8s.io/client-go/tools/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go index 508c5530c..7a3bc3d30 100644 --- a/vendor/k8s.io/client-go/tools/cache/fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -34,7 +34,8 @@ type ErrRequeue struct { Err error } -var FIFOClosedError error = errors.New("DeltaFIFO: manipulating with closed queue") +// ErrFIFOClosed used when FIFO is closed +var ErrFIFOClosed = errors.New("DeltaFIFO: manipulating with closed queue") func (e ErrRequeue) Error() string { if e.Err == nil { @@ -66,7 +67,7 @@ type Queue interface { Close() } -// Helper function for popping from Queue. +// Pop is helper function for popping from Queue. // WARNING: Do NOT use this function in non-test code to avoid races // unless you really really really really know what you are doing. func Pop(queue Queue) interface{} { @@ -126,7 +127,7 @@ func (f *FIFO) Close() { f.cond.Broadcast() } -// Return true if an Add/Update/Delete/AddIfNotPresent are called first, +// HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first, // or an Update called first but the first batch of items inserted by Replace() has been popped func (f *FIFO) HasSynced() bool { f.lock.Lock() @@ -242,7 +243,7 @@ func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) { return item, exists, nil } -// Checks if the queue is closed +// IsClosed checks if the queue is closed func (f *FIFO) IsClosed() bool { f.closedLock.Lock() defer f.closedLock.Unlock() @@ -267,7 +268,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { // When Close() is called, the f.closed is set and the condition is broadcasted. // Which causes this loop to continue and return from the Pop(). if f.IsClosed() { - return nil, FIFOClosedError + return nil, ErrFIFOClosed } f.cond.Wait() diff --git a/vendor/k8s.io/client-go/tools/cache/heap.go b/vendor/k8s.io/client-go/tools/cache/heap.go index 7357ff97a..e503a45a4 100644 --- a/vendor/k8s.io/client-go/tools/cache/heap.go +++ b/vendor/k8s.io/client-go/tools/cache/heap.go @@ -28,7 +28,9 @@ const ( closedMsg = "heap is closed" ) +// LessFunc is used to compare two objects in the heap. type LessFunc func(interface{}, interface{}) bool + type heapItem struct { obj interface{} // The object which is stored in the heap. index int // The index of the object's key in the Heap.queue. @@ -158,7 +160,7 @@ func (h *Heap) Add(obj interface{}) error { return nil } -// Adds all the items in the list to the queue and then signals the condition +// BulkAdd adds all the items in the list to the queue and then signals the condition // variable. It is useful when the caller would like to add all of the items // to the queue before consumer starts processing them. func (h *Heap) BulkAdd(list []interface{}) error { @@ -249,11 +251,11 @@ func (h *Heap) Pop() (interface{}, error) { h.cond.Wait() } obj := heap.Pop(h.data) - if obj != nil { - return obj, nil - } else { + if obj == nil { return nil, fmt.Errorf("object was removed from heap data") } + + return obj, nil } // List returns a list of all the items. diff --git a/vendor/k8s.io/client-go/tools/cache/index.go b/vendor/k8s.io/client-go/tools/cache/index.go index 15acb168e..9fa406838 100644 --- a/vendor/k8s.io/client-go/tools/cache/index.go +++ b/vendor/k8s.io/client-go/tools/cache/index.go @@ -23,17 +23,27 @@ import ( "k8s.io/apimachinery/pkg/util/sets" ) -// Indexer is a storage interface that lets you list objects using multiple indexing functions +// Indexer is a storage interface that lets you list objects using multiple indexing functions. +// There are three kinds of strings here. +// One is a storage key, as defined in the Store interface. +// Another kind is a name of an index. +// The third kind of string is an "indexed value", which is produced by an +// IndexFunc and can be a field value or any other string computed from the object. type Indexer interface { Store - // Retrieve list of objects that match on the named indexing function + // Index returns the stored objects whose set of indexed values + // intersects the set of indexed values of the given object, for + // the named index Index(indexName string, obj interface{}) ([]interface{}, error) - // IndexKeys returns the set of keys that match on the named indexing function. - IndexKeys(indexName, indexKey string) ([]string, error) - // ListIndexFuncValues returns the list of generated values of an Index func + // IndexKeys returns the storage keys of the stored objects whose + // set of indexed values for the named index includes the given + // indexed value + IndexKeys(indexName, indexedValue string) ([]string, error) + // ListIndexFuncValues returns all the indexed values of the given index ListIndexFuncValues(indexName string) []string - // ByIndex lists object that match on the named indexing function with the exact key - ByIndex(indexName, indexKey string) ([]interface{}, error) + // ByIndex returns the stored objects whose set of indexed values + // for the named index includes the given indexed value + ByIndex(indexName, indexedValue string) ([]interface{}, error) // GetIndexer return the indexers GetIndexers() Indexers @@ -42,7 +52,7 @@ type Indexer interface { AddIndexers(newIndexers Indexers) error } -// IndexFunc knows how to provide an indexed value for an object. +// IndexFunc knows how to compute the set of indexed values for an object. type IndexFunc func(obj interface{}) ([]string, error) // IndexFuncToKeyFuncAdapter adapts an indexFunc to a keyFunc. This is only useful if your index function returns @@ -65,6 +75,7 @@ func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc { } const ( + // NamespaceIndex is the lookup name for the most comment index function, which is to index by the namespace field. NamespaceIndex string = "namespace" ) diff --git a/vendor/k8s.io/client-go/tools/cache/listers.go b/vendor/k8s.io/client-go/tools/cache/listers.go index 311ff8c49..d649cd735 100644 --- a/vendor/k8s.io/client-go/tools/cache/listers.go +++ b/vendor/k8s.io/client-go/tools/cache/listers.go @@ -30,6 +30,7 @@ import ( // AppendFunc is used to add a matching item to whatever list the caller is using type AppendFunc func(interface{}) +// ListAll calls appendFn with each value retrieved from store which matches the selector. func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { selectAll := selector.Empty() for _, m := range store.List() { @@ -50,6 +51,7 @@ func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { return nil } +// ListAllByNamespace used to list items belongs to namespace from Indexer. func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error { selectAll := selector.Empty() if namespace == metav1.NamespaceAll { @@ -124,6 +126,7 @@ type GenericNamespaceLister interface { Get(name string) (runtime.Object, error) } +// NewGenericLister creates a new instance for the genericLister. func NewGenericLister(indexer Indexer, resource schema.GroupResource) GenericLister { return &genericLister{indexer: indexer, resource: resource} } diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_cache.go b/vendor/k8s.io/client-go/tools/cache/mutation_cache.go index 4c6686e91..5d3245a60 100644 --- a/vendor/k8s.io/client-go/tools/cache/mutation_cache.go +++ b/vendor/k8s.io/client-go/tools/cache/mutation_cache.go @@ -42,6 +42,7 @@ type MutationCache interface { Mutation(interface{}) } +// ResourceVersionComparator is able to compare object versions. type ResourceVersionComparator interface { CompareResourceVersion(lhs, rhs runtime.Object) int } diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go index adb5b8be8..9903b2928 100644 --- a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go +++ b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go @@ -36,12 +36,14 @@ func init() { mutationDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_CACHE_MUTATION_DETECTOR")) } -type CacheMutationDetector interface { +// MutationDetector is able to monitor if the object be modified outside. +type MutationDetector interface { AddObject(obj interface{}) Run(stopCh <-chan struct{}) } -func NewCacheMutationDetector(name string) CacheMutationDetector { +// NewCacheMutationDetector creates a new instance for the defaultCacheMutationDetector. +func NewCacheMutationDetector(name string) MutationDetector { if !mutationDetectionEnabled { return dummyMutationDetector{} } @@ -114,7 +116,7 @@ func (d *defaultCacheMutationDetector) CompareObjects() { altered := false for i, obj := range d.cachedObjs { if !reflect.DeepEqual(obj.cached, obj.copied) { - fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectDiff(obj.cached, obj.copied)) + fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectGoPrintSideBySide(obj.cached, obj.copied)) altered = true } } diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index c43b7fc52..8abde7131 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -17,16 +17,13 @@ limitations under the License. package cache import ( + "context" "errors" "fmt" "io" "math/rand" - "net" - "net/url" "reflect" - "strings" "sync" - "syscall" "time" apierrs "k8s.io/apimachinery/pkg/api/errors" @@ -35,9 +32,11 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/naming" + utilnet "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/pager" "k8s.io/klog" "k8s.io/utils/trace" ) @@ -68,6 +67,9 @@ type Reflector struct { lastSyncResourceVersion string // lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion lastSyncResourceVersionMutex sync.RWMutex + // WatchListPageSize is the requested chunk size of initial and resync watch lists. + // Defaults to pager.PageSize. + WatchListPageSize int64 } var ( @@ -79,7 +81,7 @@ var ( // NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector // The indexer is configured to key on namespace func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) { - indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{"namespace": MetaNamespaceIndexFunc}) + indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{NamespaceIndex: MetaNamespaceIndexFunc}) reflector = NewReflector(lw, expectedType, indexer, resyncPeriod) return indexer, reflector } @@ -108,11 +110,6 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, return r } -func makeValidPrometheusMetricLabel(in string) string { - // this isn't perfect, but it removes our common characters - return strings.NewReplacer("/", "_", ".", "_", "-", "_", ":", "_").Replace(in) -} - // internalPackages are packages that ignored when creating a default reflector name. These packages are in the common // call chains to NewReflector, so they'd be low entropy names for reflectors var internalPackages = []string{"client-go/tools/cache/"} @@ -167,7 +164,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { options := metav1.ListOptions{ResourceVersion: "0"} if err := func() error { - initTrace := trace.New("Reflector " + r.name + " ListAndWatch") + initTrace := trace.New("Reflector ListAndWatch", trace.Field{"name", r.name}) defer initTrace.LogIfLong(10 * time.Second) var list runtime.Object var err error @@ -179,7 +176,16 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { panicCh <- r } }() - list, err = r.listerWatcher.List(options) + // Attempt to gather list in chunks, if supported by listerWatcher, if not, the first + // list request will return the full response. + pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) { + return r.listerWatcher.List(opts) + })) + if r.WatchListPageSize != 0 { + pager.PageSize = r.WatchListPageSize + } + // Pager falls back to full list if paginated list calls fail due to an "Expired" error. + list, err = pager.List(context.Background(), options) close(listCh) }() select { @@ -257,6 +263,10 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // We want to avoid situations of hanging watchers. Stop any wachers that do not // receive any events within the timeout window. TimeoutSeconds: &timeoutSeconds, + // To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks. + // Reflector doesn't assume bookmarks are returned at all (if the server do not support + // watch bookmarks, it will ignore this field). + AllowWatchBookmarks: true, } w, err := r.listerWatcher.Watch(options) @@ -273,20 +283,21 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // It doesn't make sense to re-list all objects because most likely we will be able to restart // watch where we ended. // If that's the case wait and resend watch request. - if urlError, ok := err.(*url.Error); ok { - if opError, ok := urlError.Err.(*net.OpError); ok { - if errno, ok := opError.Err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED { - time.Sleep(time.Second) - continue - } - } + if utilnet.IsConnectionRefused(err) { + time.Sleep(time.Second) + continue } return nil } if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil { if err != errorStopRequested { - klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err) + switch { + case apierrs.IsResourceExpired(err): + klog.V(4).Infof("%s: watch of %v ended with: %v", r.name, r.expectedType, err) + default: + klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err) + } } return nil } @@ -354,6 +365,8 @@ loop: if err != nil { utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err)) } + case watch.Bookmark: + // A `Bookmark` means watch has synced here, just update the resourceVersion default: utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event)) } @@ -363,7 +376,7 @@ loop: } } - watchDuration := r.clock.Now().Sub(start) + watchDuration := r.clock.Since(start) if watchDuration < 1*time.Second && eventCount == 0 { return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name) } diff --git a/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go b/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go index 0945e5c3a..dd849c8fa 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go @@ -94,23 +94,6 @@ var metricsFactory = struct { metricsProvider: noopMetricsProvider{}, } -func newReflectorMetrics(name string) *reflectorMetrics { - var ret *reflectorMetrics - if len(name) == 0 { - return ret - } - return &reflectorMetrics{ - numberOfLists: metricsFactory.metricsProvider.NewListsMetric(name), - listDuration: metricsFactory.metricsProvider.NewListDurationMetric(name), - numberOfItemsInList: metricsFactory.metricsProvider.NewItemsInListMetric(name), - numberOfWatches: metricsFactory.metricsProvider.NewWatchesMetric(name), - numberOfShortWatches: metricsFactory.metricsProvider.NewShortWatchesMetric(name), - watchDuration: metricsFactory.metricsProvider.NewWatchDurationMetric(name), - numberOfItemsInWatch: metricsFactory.metricsProvider.NewItemsInWatchMetric(name), - lastResourceVersion: metricsFactory.metricsProvider.NewLastResourceVersionMetric(name), - } -} - // SetReflectorMetricsProvider sets the metrics provider func SetReflectorMetricsProvider(metricsProvider MetricsProvider) { metricsFactory.setProviders.Do(func() { diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index b2f3dba07..c37423b66 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -31,31 +31,118 @@ import ( "k8s.io/klog" ) -// SharedInformer has a shared data cache and is capable of distributing notifications for changes -// to the cache to multiple listeners who registered via AddEventHandler. If you use this, there is -// one behavior change compared to a standard Informer. When you receive a notification, the cache -// will be AT LEAST as fresh as the notification, but it MAY be more fresh. You should NOT depend -// on the contents of the cache exactly matching the notification you've received in handler -// functions. If there was a create, followed by a delete, the cache may NOT have your item. This -// has advantages over the broadcaster since it allows us to share a common cache across many -// controllers. Extending the broadcaster would have required us keep duplicate caches for each -// watch. +// SharedInformer provides eventually consistent linkage of its +// clients to the authoritative state of a given collection of +// objects. An object is identified by its API group, kind/resource, +// namespace, and name; the `ObjectMeta.UID` is not part of an +// object's ID as far as this contract is concerned. One +// SharedInformer provides linkage to objects of a particular API +// group and kind/resource. The linked object collection of a +// SharedInformer may be further restricted to one namespace and/or by +// label selector and/or field selector. +// +// The authoritative state of an object is what apiservers provide +// access to, and an object goes through a strict sequence of states. +// An object state is either "absent" or present with a +// ResourceVersion and other appropriate content. +// +// A SharedInformer gets object states from apiservers using a +// sequence of LIST and WATCH operations. Through this sequence the +// apiservers provide a sequence of "collection states" to the +// informer, where each collection state defines the state of every +// object of the collection. No promise --- beyond what is implied by +// other remarks here --- is made about how one informer's sequence of +// collection states relates to a different informer's sequence of +// collection states. +// +// A SharedInformer maintains a local cache, exposed by GetStore() and +// by GetIndexer() in the case of an indexed informer, of the state of +// each relevant object. This cache is eventually consistent with the +// authoritative state. This means that, unless prevented by +// persistent communication problems, if ever a particular object ID X +// is authoritatively associated with a state S then for every +// SharedInformer I whose collection includes (X, S) eventually either +// (1) I's cache associates X with S or a later state of X, (2) I is +// stopped, or (3) the authoritative state service for X terminates. +// To be formally complete, we say that the absent state meets any +// restriction by label selector or field selector. +// +// The local cache starts out empty, and gets populated and updated +// during `Run()`. +// +// As a simple example, if a collection of objects is henceforeth +// unchanging, a SharedInformer is created that links to that +// collection, and that SharedInformer is `Run()` then that +// SharedInformer's cache eventually holds an exact copy of that +// collection (unless it is stopped too soon, the authoritative state +// service ends, or communication problems between the two +// persistently thwart achievement). +// +// As another simple example, if the local cache ever holds a +// non-absent state for some object ID and the object is eventually +// removed from the authoritative state then eventually the object is +// removed from the local cache (unless the SharedInformer is stopped +// too soon, the authoritative state service ends, or communication +// problems persistently thwart the desired result). +// +// The keys in the Store are of the form namespace/name for namespaced +// objects, and are simply the name for non-namespaced objects. +// Clients can use `MetaNamespaceKeyFunc(obj)` to extract the key for +// a given object, and `SplitMetaNamespaceKey(key)` to split a key +// into its constituent parts. +// +// A client is identified here by a ResourceEventHandler. For every +// update to the SharedInformer's local cache and for every client +// added before `Run()`, eventually either the SharedInformer is +// stopped or the client is notified of the update. A client added +// after `Run()` starts gets a startup batch of notifications of +// additions of the object existing in the cache at the time that +// client was added; also, for every update to the SharedInformer's +// local cache after that client was added, eventually either the +// SharedInformer is stopped or that client is notified of that +// update. Client notifications happen after the corresponding cache +// update and, in the case of a SharedIndexInformer, after the +// corresponding index updates. It is possible that additional cache +// and index updates happen before such a prescribed notification. +// For a given SharedInformer and client, the notifications are +// delivered sequentially. For a given SharedInformer, client, and +// object ID, the notifications are delivered in order. +// +// A client must process each notification promptly; a SharedInformer +// is not engineered to deal well with a large backlog of +// notifications to deliver. Lengthy processing should be passed off +// to something else, for example through a +// `client-go/util/workqueue`. +// +// Each query to an informer's local cache --- whether a single-object +// lookup, a list operation, or a use of one of its indices --- is +// answered entirely from one of the collection states received by +// that informer. +// +// A delete notification exposes the last locally known non-absent +// state, except that its ResourceVersion is replaced with a +// ResourceVersion in which the object is actually absent. type SharedInformer interface { // AddEventHandler adds an event handler to the shared informer using the shared informer's resync // period. Events to a single handler are delivered sequentially, but there is no coordination // between different handlers. AddEventHandler(handler ResourceEventHandler) - // AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the - // specified resync period. Events to a single handler are delivered sequentially, but there is - // no coordination between different handlers. + // AddEventHandlerWithResyncPeriod adds an event handler to the + // shared informer using the specified resync period. The resync + // operation consists of delivering to the handler a create + // notification for every object in the informer's local cache; it + // does not add any interactions with the authoritative storage. AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) - // GetStore returns the Store. + // GetStore returns the informer's local cache as a Store. GetStore() Store // GetController gives back a synthetic interface that "votes" to start the informer GetController() Controller - // Run starts the shared informer, which will be stopped when stopCh is closed. + // Run starts and runs the shared informer, returning after it stops. + // The informer will be stopped when stopCh is closed. Run(stopCh <-chan struct{}) - // HasSynced returns true if the shared informer's store has synced. + // HasSynced returns true if the shared informer's store has been + // informed by at least one full LIST of the authoritative state + // of the informer's object collection. This is unrelated to "resync". HasSynced() bool // LastSyncResourceVersion is the resource version observed when last synced with the underlying // store. The value returned is not synchronized with access to the underlying store and is not @@ -63,6 +150,7 @@ type SharedInformer interface { LastSyncResourceVersion() string } +// SharedIndexInformer provides add and get Indexers ability based on SharedInformer. type SharedIndexInformer interface { SharedInformer // AddIndexers add indexers to the informer before it starts. @@ -102,8 +190,24 @@ const ( initialBufferSize = 1024 ) +// WaitForNamedCacheSync is a wrapper around WaitForCacheSync that generates log messages +// indicating that the caller identified by name is waiting for syncs, followed by +// either a successful or failed sync. +func WaitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { + klog.Infof("Waiting for caches to sync for %s", controllerName) + + if !WaitForCacheSync(stopCh, cacheSyncs...) { + utilruntime.HandleError(fmt.Errorf("unable to sync caches for %s", controllerName)) + return false + } + + klog.Infof("Caches are synced for %s ", controllerName) + return true +} + // WaitForCacheSync waits for caches to populate. It returns true if it was successful, false // if the controller should shutdown +// callers should prefer WaitForNamedCacheSync() func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { err := wait.PollUntil(syncedPollPeriod, func() (bool, error) { @@ -129,7 +233,7 @@ type sharedIndexInformer struct { controller Controller processor *sharedProcessor - cacheMutationDetector CacheMutationDetector + cacheMutationDetector MutationDetector // This block is tracked to handle late initialization of the controller listerWatcher ListerWatcher @@ -169,7 +273,7 @@ func (v *dummyController) HasSynced() bool { return v.informer.HasSynced() } -func (c *dummyController) LastSyncResourceVersion() string { +func (v *dummyController) LastSyncResourceVersion() string { return "" } @@ -555,7 +659,7 @@ func (p *processorListener) run() { case deleteNotification: p.handler.OnDelete(notification.oldObj) default: - utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next)) + utilruntime.HandleError(fmt.Errorf("unrecognized notification: %T", next)) } } // the only way to get here is if the p.nextCh is empty and closed diff --git a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go index 5cbdd17ed..33e6239a6 100644 --- a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go +++ b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go @@ -185,7 +185,7 @@ func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, erro set := index[indexKey] list := make([]interface{}, 0, set.Len()) - for _, key := range set.List() { + for key := range set { list = append(list, c.items[key]) } @@ -302,6 +302,7 @@ func (c *threadSafeMap) Resync() error { return nil } +// NewThreadSafeStore creates a new instance of ThreadSafeStore. func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore { return &threadSafeMap{ items: map[string]interface{}{}, diff --git a/vendor/k8s.io/client-go/tools/cache/undelta_store.go b/vendor/k8s.io/client-go/tools/cache/undelta_store.go index 117df46c4..220845dd3 100644 --- a/vendor/k8s.io/client-go/tools/cache/undelta_store.go +++ b/vendor/k8s.io/client-go/tools/cache/undelta_store.go @@ -31,6 +31,7 @@ type UndeltaStore struct { // Assert that it implements the Store interface. var _ Store = &UndeltaStore{} +// Add inserts an object into the store and sends complete state by calling PushFunc. // Note about thread safety. The Store implementation (cache.cache) uses a lock for all methods. // In the functions below, the lock gets released and reacquired betweend the {Add,Delete,etc} // and the List. So, the following can happen, resulting in two identical calls to PushFunc. @@ -41,7 +42,6 @@ var _ Store = &UndeltaStore{} // 3 Store.Add(b) // 4 Store.List() -> [a,b] // 5 Store.List() -> [a,b] - func (u *UndeltaStore) Add(obj interface{}) error { if err := u.Store.Add(obj); err != nil { return err @@ -50,6 +50,7 @@ func (u *UndeltaStore) Add(obj interface{}) error { return nil } +// Update sets an item in the cache to its updated state and sends complete state by calling PushFunc. func (u *UndeltaStore) Update(obj interface{}) error { if err := u.Store.Update(obj); err != nil { return err @@ -58,6 +59,7 @@ func (u *UndeltaStore) Update(obj interface{}) error { return nil } +// Delete removes an item from the cache and sends complete state by calling PushFunc. func (u *UndeltaStore) Delete(obj interface{}) error { if err := u.Store.Delete(obj); err != nil { return err @@ -66,6 +68,10 @@ func (u *UndeltaStore) Delete(obj interface{}) error { return nil } +// Replace will delete the contents of current store, using instead the given list. +// 'u' takes ownership of the list, you should not reference the list again +// after calling this function. +// The new contents complete state will be sent by calling PushFunc after replacement. func (u *UndeltaStore) Replace(list []interface{}, resourceVersion string) error { if err := u.Store.Replace(list, resourceVersion); err != nil { return err diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go index a7b8c1c6e..9c6ac3b5d 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -228,6 +228,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI // blindly overwrite existing values based on precedence if len(configAuthInfo.Token) > 0 { mergedConfig.BearerToken = configAuthInfo.Token + mergedConfig.BearerTokenFile = configAuthInfo.TokenFile } else if len(configAuthInfo.TokenFile) > 0 { tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) if err != nil { @@ -296,16 +297,6 @@ func makeUserIdentificationConfig(info clientauth.Info) *restclient.Config { return config } -// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only server identification information -func makeServerIdentificationConfig(info clientauth.Info) restclient.Config { - config := restclient.Config{} - config.CAFile = info.CAFile - if info.Insecure != nil { - config.Insecure = *info.Insecure - } - return config -} - func canIdentifyUser(config restclient.Config) bool { return len(config.Username) > 0 || (len(config.CertFile) > 0 || len(config.CertData) > 0) || @@ -499,8 +490,9 @@ func (config *inClusterClientConfig) ClientConfig() (*restclient.Config, error) if server := config.overrides.ClusterInfo.Server; len(server) > 0 { icc.Host = server } - if token := config.overrides.AuthInfo.Token; len(token) > 0 { - icc.BearerToken = token + if len(config.overrides.AuthInfo.Token) > 0 || len(config.overrides.AuthInfo.TokenFile) > 0 { + icc.BearerToken = config.overrides.AuthInfo.Token + icc.BearerTokenFile = config.overrides.AuthInfo.TokenFile } if certificateAuthorityFile := config.overrides.ClusterInfo.CertificateAuthority; len(certificateAuthorityFile) > 0 { icc.TLSClientConfig.CAFile = certificateAuthorityFile diff --git a/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/vendor/k8s.io/client-go/tools/clientcmd/loader.go index 7e928a918..4e37e7928 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/loader.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/loader.go @@ -127,6 +127,10 @@ type ClientConfigLoadingRules struct { // DefaultClientConfig is an optional field indicating what rules to use to calculate a default configuration. // This should match the overrides passed in to ClientConfig loader. DefaultClientConfig ClientConfig + + // WarnIfAllMissing indicates whether the configuration files pointed by KUBECONFIG environment variable are present or not. + // In case of missing files, it warns the user about the missing files. + WarnIfAllMissing bool } // ClientConfigLoadingRules implements the ClientConfigLoader interface. @@ -136,20 +140,23 @@ var _ ClientConfigLoader = &ClientConfigLoadingRules{} // use this constructor func NewDefaultClientConfigLoadingRules() *ClientConfigLoadingRules { chain := []string{} + warnIfAllMissing := false envVarFiles := os.Getenv(RecommendedConfigPathEnvVar) if len(envVarFiles) != 0 { fileList := filepath.SplitList(envVarFiles) // prevent the same path load multiple times chain = append(chain, deduplicate(fileList)...) + warnIfAllMissing = true } else { chain = append(chain, RecommendedHomeFile) } return &ClientConfigLoadingRules{ - Precedence: chain, - MigrationRules: currentMigrationRules(), + Precedence: chain, + MigrationRules: currentMigrationRules(), + WarnIfAllMissing: warnIfAllMissing, } } @@ -172,6 +179,7 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { } errlist := []error{} + missingList := []string{} kubeConfigFiles := []string{} @@ -195,18 +203,26 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { } config, err := LoadFromFile(filename) + if os.IsNotExist(err) { // skip missing files + // Add to the missing list to produce a warning + missingList = append(missingList, filename) continue } + if err != nil { - errlist = append(errlist, fmt.Errorf("Error loading config file \"%s\": %v", filename, err)) + errlist = append(errlist, fmt.Errorf("error loading config file \"%s\": %v", filename, err)) continue } kubeconfigs = append(kubeconfigs, config) } + if rules.WarnIfAllMissing && len(missingList) > 0 && len(kubeconfigs) == 0 { + klog.Warningf("Config not found: %s", strings.Join(missingList, ", ")) + } + // first merge all of our maps mapConfig := clientcmdapi.NewConfig() @@ -356,7 +372,7 @@ func LoadFromFile(filename string) (*clientcmdapi.Config, error) { if err != nil { return nil, err } - klog.V(6).Infoln("Config loaded from file", filename) + klog.V(6).Infoln("Config loaded from file: ", filename) // set LocationOfOrigin on every Cluster, User, and Context for key, obj := range config.AuthInfos { @@ -467,7 +483,7 @@ func ResolveLocalPaths(config *clientcmdapi.Config) error { } base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) if err != nil { - return fmt.Errorf("Could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err) + return fmt.Errorf("could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err) } if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil { @@ -480,7 +496,7 @@ func ResolveLocalPaths(config *clientcmdapi.Config) error { } base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) if err != nil { - return fmt.Errorf("Could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err) + return fmt.Errorf("could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err) } if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil { diff --git a/vendor/k8s.io/client-go/tools/clientcmd/validation.go b/vendor/k8s.io/client-go/tools/clientcmd/validation.go index 629c0b30a..2f927072b 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/validation.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/validation.go @@ -185,9 +185,10 @@ func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) [ } if len(clusterInfo.CertificateAuthority) != 0 { clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) - defer clientCertCA.Close() if err != nil { validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) + } else { + defer clientCertCA.Close() } } @@ -223,16 +224,18 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []err if len(authInfo.ClientCertificate) != 0 { clientCertFile, err := os.Open(authInfo.ClientCertificate) - defer clientCertFile.Close() if err != nil { validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) + } else { + defer clientCertFile.Close() } } if len(authInfo.ClientKey) != 0 { clientKeyFile, err := os.Open(authInfo.ClientKey) - defer clientKeyFile.Close() if err != nil { validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) + } else { + defer clientKeyFile.Close() } } } @@ -250,8 +253,6 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []err for _, v := range authInfo.Exec.Env { if len(v.Name) == 0 { validationErrors = append(validationErrors, fmt.Errorf("env variable name must be specified for %v to use exec authentication plugin", authInfoName)) - } else if len(v.Value) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("env variable %s value must be specified for %v to use exec authentication plugin", v.Name, authInfoName)) } } } diff --git a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go index 02bdebd1d..4be650c0c 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -16,12 +16,16 @@ limitations under the License. // Package leaderelection implements leader election of a set of endpoints. // It uses an annotation in the endpoints object to store the record of the -// election state. +// election state. This implementation does not guarantee that only one +// client is acting as a leader (a.k.a. fencing). // -// This implementation does not guarantee that only one client is acting as a -// leader (a.k.a. fencing). A client observes timestamps captured locally to -// infer the state of the leader election. Thus the implementation is tolerant -// to arbitrary clock skew, but is not tolerant to arbitrary clock skew rate. +// A client only acts on timestamps captured locally to infer the state of the +// leader election. The client does not consider timestamps in the leader +// election record to be accurate because these timestamps may not have been +// produced by a local clock. The implemention does not depend on their +// accuracy and only uses their change to indicate that another client has +// renewed the leader lease. Thus the implementation is tolerant to arbitrary +// clock skew, but is not tolerant to arbitrary clock skew rate. // // However the level of tolerance to skew rate can be configured by setting // RenewDeadline and LeaseDuration appropriately. The tolerance expressed as a @@ -85,6 +89,12 @@ func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) { if lec.RetryPeriod < 1 { return nil, fmt.Errorf("retryPeriod must be greater than zero") } + if lec.Callbacks.OnStartedLeading == nil { + return nil, fmt.Errorf("OnStartedLeading callback must not be nil") + } + if lec.Callbacks.OnStoppedLeading == nil { + return nil, fmt.Errorf("OnStoppedLeading callback must not be nil") + } if lec.Lock == nil { return nil, fmt.Errorf("Lock must not be nil.") @@ -105,12 +115,26 @@ type LeaderElectionConfig struct { // LeaseDuration is the duration that non-leader candidates will // wait to force acquire leadership. This is measured against time of // last observed ack. + // + // A client needs to wait a full LeaseDuration without observing a change to + // the record before it can attempt to take over. When all clients are + // shutdown and a new set of clients are started with different names against + // the same leader record, they must wait the full LeaseDuration before + // attempting to acquire the lease. Thus LeaseDuration should be as short as + // possible (within your tolerance for clock skew rate) to avoid a possible + // long waits in the scenario. + // + // Core clients default this value to 15 seconds. LeaseDuration time.Duration // RenewDeadline is the duration that the acting master will retry // refreshing leadership before giving up. + // + // Core clients default this value to 10 seconds. RenewDeadline time.Duration // RetryPeriod is the duration the LeaderElector clients should wait // between tries of actions. + // + // Core clients default this value to 2 seconds. RetryPeriod time.Duration // Callbacks are callbacks that are triggered during certain lifecycle diff --git a/vendor/k8s.io/client-go/tools/pager/pager.go b/vendor/k8s.io/client-go/tools/pager/pager.go index 74ea3586a..d265db786 100644 --- a/vendor/k8s.io/client-go/tools/pager/pager.go +++ b/vendor/k8s.io/client-go/tools/pager/pager.go @@ -25,9 +25,11 @@ import ( metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) const defaultPageSize = 500 +const defaultPageBufferSize = 10 // ListPageFunc returns a list object for the given list options. type ListPageFunc func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) @@ -48,6 +50,9 @@ type ListPager struct { PageFn ListPageFunc FullListIfExpired bool + + // Number of pages to buffer + PageBufferSize int32 } // New creates a new pager from the provided pager function using the default @@ -58,6 +63,7 @@ func New(fn ListPageFunc) *ListPager { PageSize: defaultPageSize, PageFn: fn, FullListIfExpired: true, + PageBufferSize: defaultPageBufferSize, } } @@ -73,6 +79,12 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti } var list *metainternalversion.List for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + obj, err := p.PageFn(ctx, options) if err != nil { if !errors.IsResourceExpired(err) || !p.FullListIfExpired { @@ -115,3 +127,105 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti options.Continue = m.GetContinue() } } + +// EachListItem fetches runtime.Object items using this ListPager and invokes fn on each item. If +// fn returns an error, processing stops and that error is returned. If fn does not return an error, +// any error encountered while retrieving the list from the server is returned. If the context +// cancels or times out, the context error is returned. Since the list is retrieved in paginated +// chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if the pagination list +// requests exceed the expiration limit of the apiserver being called. +// +// Items are retrieved in chunks from the server to reduce the impact on the server with up to +// ListPager.PageBufferSize chunks buffered concurrently in the background. +func (p *ListPager) EachListItem(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error { + return meta.EachListItem(obj, fn) + }) +} + +// eachListChunkBuffered fetches runtimeObject list chunks using this ListPager and invokes fn on +// each list chunk. If fn returns an error, processing stops and that error is returned. If fn does +// not return an error, any error encountered while retrieving the list from the server is +// returned. If the context cancels or times out, the context error is returned. Since the list is +// retrieved in paginated chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if +// the pagination list requests exceed the expiration limit of the apiserver being called. +// +// Up to ListPager.PageBufferSize chunks are buffered concurrently in the background. +func (p *ListPager) eachListChunkBuffered(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + if p.PageBufferSize < 0 { + return fmt.Errorf("ListPager.PageBufferSize must be >= 0, got %d", p.PageBufferSize) + } + + // Ensure background goroutine is stopped if this call exits before all list items are + // processed. Cancelation error from this deferred cancel call is never returned to caller; + // either the list result has already been sent to bgResultC or the fn error is returned and + // the cancelation error is discarded. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + chunkC := make(chan runtime.Object, p.PageBufferSize) + bgResultC := make(chan error, 1) + go func() { + defer utilruntime.HandleCrash() + + var err error + defer func() { + close(chunkC) + bgResultC <- err + }() + err = p.eachListChunk(ctx, options, func(chunk runtime.Object) error { + select { + case chunkC <- chunk: // buffer the chunk, this can block + case <-ctx.Done(): + return ctx.Err() + } + return nil + }) + }() + + for o := range chunkC { + err := fn(o) + if err != nil { + return err // any fn error should be returned immediately + } + } + // promote the results of our background goroutine to the foreground + return <-bgResultC +} + +// eachListChunk fetches runtimeObject list chunks using this ListPager and invokes fn on each list +// chunk. If fn returns an error, processing stops and that error is returned. If fn does not return +// an error, any error encountered while retrieving the list from the server is returned. If the +// context cancels or times out, the context error is returned. Since the list is retrieved in +// paginated chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if the +// pagination list requests exceed the expiration limit of the apiserver being called. +func (p *ListPager) eachListChunk(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + if options.Limit == 0 { + options.Limit = p.PageSize + } + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + obj, err := p.PageFn(ctx, options) + if err != nil { + return err + } + m, err := meta.ListAccessor(obj) + if err != nil { + return fmt.Errorf("returned object must be a list: %v", err) + } + if err := fn(obj); err != nil { + return err + } + // if we have no more items, return. + if len(m.GetContinue()) == 0 { + return nil + } + // set the next loop up + options.Continue = m.GetContinue() + } +} diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go index 565e72802..7bd1ef5ed 100644 --- a/vendor/k8s.io/client-go/tools/record/event.go +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -50,6 +50,40 @@ type EventSink interface { Patch(oldEvent *v1.Event, data []byte) (*v1.Event, error) } +// CorrelatorOptions allows you to change the default of the EventSourceObjectSpamFilter +// and EventAggregator in EventCorrelator +type CorrelatorOptions struct { + // The lru cache size used for both EventSourceObjectSpamFilter and the EventAggregator + // If not specified (zero value), the default specified in events_cache.go will be picked + // This means that the LRUCacheSize has to be greater than 0. + LRUCacheSize int + // The burst size used by the token bucket rate filtering in EventSourceObjectSpamFilter + // If not specified (zero value), the default specified in events_cache.go will be picked + // This means that the BurstSize has to be greater than 0. + BurstSize int + // The fill rate of the token bucket in queries per second in EventSourceObjectSpamFilter + // If not specified (zero value), the default specified in events_cache.go will be picked + // This means that the QPS has to be greater than 0. + QPS float32 + // The func used by the EventAggregator to group event keys for aggregation + // If not specified (zero value), EventAggregatorByReasonFunc will be used + KeyFunc EventAggregatorKeyFunc + // The func used by the EventAggregator to produced aggregated message + // If not specified (zero value), EventAggregatorByReasonMessageFunc will be used + MessageFunc EventAggregatorMessageFunc + // The number of events in an interval before aggregation happens by the EventAggregator + // If not specified (zero value), the default specified in events_cache.go will be picked + // This means that the MaxEvents has to be greater than 0 + MaxEvents int + // The amount of time in seconds that must transpire since the last occurrence of a similar event before it is considered new by the EventAggregator + // If not specified (zero value), the default specified in events_cache.go will be picked + // This means that the MaxIntervalInSeconds has to be greater than 0 + MaxIntervalInSeconds int + // The clock used by the EventAggregator to allow for testing + // If not specified (zero value), clock.RealClock{} will be used + Clock clock.Clock +} + // EventRecorder knows how to record events on behalf of an EventSource. type EventRecorder interface { // Event constructs an event from the given information and puts it in the queue for sending. @@ -97,33 +131,45 @@ type EventBroadcaster interface { // Creates a new event broadcaster. func NewBroadcaster() EventBroadcaster { - return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration} + return &eventBroadcasterImpl{ + Broadcaster: watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), + sleepDuration: defaultSleepDuration, + } } func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster { - return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration} + return &eventBroadcasterImpl{ + Broadcaster: watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), + sleepDuration: sleepDuration, + } +} + +func NewBroadcasterWithCorrelatorOptions(options CorrelatorOptions) EventBroadcaster { + return &eventBroadcasterImpl{ + Broadcaster: watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), + sleepDuration: defaultSleepDuration, + options: options, + } } type eventBroadcasterImpl struct { *watch.Broadcaster sleepDuration time.Duration + options CorrelatorOptions } // StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. // The return value can be ignored or used to stop recording, if desired. // TODO: make me an object with parameterizable queue length and retry interval func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface { - // The default math/rand package functions aren't thread safe, so create a - // new Rand object for each StartRecording call. - randGen := rand.New(rand.NewSource(time.Now().UnixNano())) - eventCorrelator := NewEventCorrelator(clock.RealClock{}) + eventCorrelator := NewEventCorrelatorWithOptions(eventBroadcaster.options) return eventBroadcaster.StartEventWatcher( func(event *v1.Event) { - recordToSink(sink, event, eventCorrelator, randGen, eventBroadcaster.sleepDuration) + recordToSink(sink, event, eventCorrelator, eventBroadcaster.sleepDuration) }) } -func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, randGen *rand.Rand, sleepDuration time.Duration) { +func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, sleepDuration time.Duration) { // Make a copy before modification, because there could be multiple listeners. // Events are safe to copy like this. eventCopy := *event @@ -148,7 +194,7 @@ func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrela // Randomize the first sleep so that various clients won't all be // synced up if the master goes down. if tries == 1 { - time.Sleep(time.Duration(float64(sleepDuration) * randGen.Float64())) + time.Sleep(time.Duration(float64(sleepDuration) * rand.Float64())) } else { time.Sleep(sleepDuration) } diff --git a/vendor/k8s.io/client-go/tools/record/events_cache.go b/vendor/k8s.io/client-go/tools/record/events_cache.go index a42084f3a..1b499efd3 100644 --- a/vendor/k8s.io/client-go/tools/record/events_cache.go +++ b/vendor/k8s.io/client-go/tools/record/events_cache.go @@ -443,6 +443,52 @@ func NewEventCorrelator(clock clock.Clock) *EventCorrelator { } } +func NewEventCorrelatorWithOptions(options CorrelatorOptions) *EventCorrelator { + optionsWithDefaults := populateDefaults(options) + spamFilter := NewEventSourceObjectSpamFilter(optionsWithDefaults.LRUCacheSize, + optionsWithDefaults.BurstSize, optionsWithDefaults.QPS, optionsWithDefaults.Clock) + return &EventCorrelator{ + filterFunc: spamFilter.Filter, + aggregator: NewEventAggregator( + optionsWithDefaults.LRUCacheSize, + optionsWithDefaults.KeyFunc, + optionsWithDefaults.MessageFunc, + optionsWithDefaults.MaxEvents, + optionsWithDefaults.MaxIntervalInSeconds, + optionsWithDefaults.Clock), + logger: newEventLogger(optionsWithDefaults.LRUCacheSize, optionsWithDefaults.Clock), + } +} + +// populateDefaults populates the zero value options with defaults +func populateDefaults(options CorrelatorOptions) CorrelatorOptions { + if options.LRUCacheSize == 0 { + options.LRUCacheSize = maxLruCacheEntries + } + if options.BurstSize == 0 { + options.BurstSize = defaultSpamBurst + } + if options.QPS == 0 { + options.QPS = defaultSpamQPS + } + if options.KeyFunc == nil { + options.KeyFunc = EventAggregatorByReasonFunc + } + if options.MessageFunc == nil { + options.MessageFunc = EventAggregatorByReasonMessageFunc + } + if options.MaxEvents == 0 { + options.MaxEvents = defaultAggregateMaxEvents + } + if options.MaxIntervalInSeconds == 0 { + options.MaxIntervalInSeconds = defaultAggregateIntervalInSeconds + } + if options.Clock == nil { + options.Clock = clock.RealClock{} + } + return options +} + // EventCorrelate filters, aggregates, counts, and de-duplicates all incoming events func (c *EventCorrelator) EventCorrelate(newEvent *v1.Event) (*EventCorrelateResult, error) { if newEvent == nil { diff --git a/vendor/k8s.io/client-go/tools/reference/ref.go b/vendor/k8s.io/client-go/tools/reference/ref.go index 573d948a9..442a991cc 100644 --- a/vendor/k8s.io/client-go/tools/reference/ref.go +++ b/vendor/k8s.io/client-go/tools/reference/ref.go @@ -19,8 +19,6 @@ package reference import ( "errors" "fmt" - "net/url" - "strings" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -30,8 +28,7 @@ import ( var ( // Errors that could be returned by GetReference. - ErrNilObject = errors.New("can't reference a nil object") - ErrNoSelfLink = errors.New("selfLink was empty, can't make reference") + ErrNilObject = errors.New("can't reference a nil object") ) // GetReference returns an ObjectReference which refers to the given @@ -47,20 +44,6 @@ func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*v1.ObjectReferen return ref, nil } - gvk := obj.GetObjectKind().GroupVersionKind() - - // if the object referenced is actually persisted, we can just get kind from meta - // if we are building an object reference to something not yet persisted, we should fallback to scheme - kind := gvk.Kind - if len(kind) == 0 { - // TODO: this is wrong - gvks, _, err := scheme.ObjectKinds(obj) - if err != nil { - return nil, err - } - kind = gvks[0].Kind - } - // An object that implements only List has enough metadata to build a reference var listMeta metav1.Common objectMeta, err := meta.Accessor(obj) @@ -73,29 +56,29 @@ func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*v1.ObjectReferen listMeta = objectMeta } - // if the object referenced is actually persisted, we can also get version from meta - version := gvk.GroupVersion().String() - if len(version) == 0 { - selfLink := listMeta.GetSelfLink() - if len(selfLink) == 0 { - return nil, ErrNoSelfLink - } - selfLinkUrl, err := url.Parse(selfLink) + gvk := obj.GetObjectKind().GroupVersionKind() + + // If object meta doesn't contain data about kind and/or version, + // we are falling back to scheme. + // + // TODO: This doesn't work for CRDs, which are not registered in scheme. + if gvk.Empty() { + gvks, _, err := scheme.ObjectKinds(obj) if err != nil { return nil, err } - // example paths: ///* - parts := strings.Split(selfLinkUrl.Path, "/") - if len(parts) < 4 { - return nil, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version) - } - if parts[1] == "api" { - version = parts[2] - } else { - version = parts[2] + "/" + parts[3] + if len(gvks) == 0 || gvks[0].Empty() { + return nil, fmt.Errorf("unexpected gvks registered for object %T: %v", obj, gvks) } + // TODO: The same object can be registered for multiple group versions + // (although in practise this doesn't seem to be used). + // In such case, the version set may not be correct. + gvk = gvks[0] } + kind := gvk.Kind + version := gvk.GroupVersion().String() + // only has list metadata if objectMeta == nil { return &v1.ObjectReference{ diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go index 7cffe2a5f..980d36ae1 100644 --- a/vendor/k8s.io/client-go/transport/cache.go +++ b/vendor/k8s.io/client-go/transport/cache.go @@ -20,6 +20,7 @@ import ( "fmt" "net" "net/http" + "strings" "sync" "time" @@ -39,13 +40,15 @@ const idleConnsPerHost = 25 var tlsCache = &tlsTransportCache{transports: make(map[tlsCacheKey]*http.Transport)} type tlsCacheKey struct { - insecure bool - caData string - certData string - keyData string - getCert string - serverName string - dial string + insecure bool + caData string + certData string + keyData string + getCert string + serverName string + nextProtos string + dial string + disableCompression bool } func (t tlsCacheKey) String() string { @@ -53,7 +56,7 @@ func (t tlsCacheKey) String() string { if len(t.keyData) > 0 { keyText = "" } - return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, getCert: %s, serverName:%s, dial:%s", t.insecure, t.caData, t.certData, keyText, t.getCert, t.serverName, t.dial) + return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, getCert: %s, serverName:%s, dial:%s disableCompression:%t", t.insecure, t.caData, t.certData, keyText, t.getCert, t.serverName, t.dial, t.disableCompression) } func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { @@ -95,6 +98,7 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { TLSClientConfig: tlsConfig, MaxIdleConnsPerHost: idleConnsPerHost, DialContext: dial, + DisableCompression: config.DisableCompression, }) return c.transports[key], nil } @@ -106,12 +110,14 @@ func tlsConfigKey(c *Config) (tlsCacheKey, error) { return tlsCacheKey{}, err } return tlsCacheKey{ - insecure: c.TLS.Insecure, - caData: string(c.TLS.CAData), - certData: string(c.TLS.CertData), - keyData: string(c.TLS.KeyData), - getCert: fmt.Sprintf("%p", c.TLS.GetCert), - serverName: c.TLS.ServerName, - dial: fmt.Sprintf("%p", c.Dial), + insecure: c.TLS.Insecure, + caData: string(c.TLS.CAData), + certData: string(c.TLS.CertData), + keyData: string(c.TLS.KeyData), + getCert: fmt.Sprintf("%p", c.TLS.GetCert), + serverName: c.TLS.ServerName, + nextProtos: strings.Join(c.TLS.NextProtos, ","), + dial: fmt.Sprintf("%p", c.Dial), + disableCompression: c.DisableCompression, }, nil } diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go index 5de0a2cb1..9e18d11d3 100644 --- a/vendor/k8s.io/client-go/transport/config.go +++ b/vendor/k8s.io/client-go/transport/config.go @@ -47,6 +47,10 @@ type Config struct { // Impersonate is the config that this Config will impersonate using Impersonate ImpersonationConfig + // DisableCompression bypasses automatic GZip compression requests to the + // server. + DisableCompression bool + // Transport may be used for custom HTTP behavior. This attribute may // not be specified with the TLS client certificate options. Use // WrapTransport for most client level operations. @@ -122,5 +126,11 @@ type TLSConfig struct { CertData []byte // Bytes of the PEM-encoded client certificate. Supercedes CertFile. KeyData []byte // Bytes of the PEM-encoded client key. Supercedes KeyFile. + // NextProtos is a list of supported application level protocols, in order of preference. + // Used to populate tls.Config.NextProtos. + // To indicate to the server http/1.1 is preferred over http/2, set to ["http/1.1", "h2"] (though the server is free to ignore that preference). + // To use only http/1.1, set to ["http/1.1"]. + NextProtos []string + GetCert func() (*tls.Certificate, error) // Callback that returns a TLS client certificate. CertData, CertFile, KeyData and KeyFile supercede this field. } diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index 117a9c8c4..a272753ae 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -80,10 +80,6 @@ func DebugWrappers(rt http.RoundTripper) http.RoundTripper { return rt } -type requestCanceler interface { - CancelRequest(*http.Request) -} - type authProxyRoundTripper struct { username string groups []string @@ -140,11 +136,7 @@ func SetAuthProxyHeaders(req *http.Request, username string, groups []string, ex } func (rt *authProxyRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt) - } + tryCancelRequest(rt.WrappedRoundTripper(), req) } func (rt *authProxyRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } @@ -168,11 +160,7 @@ func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, e } func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt) - } + tryCancelRequest(rt.WrappedRoundTripper(), req) } func (rt *userAgentRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } @@ -199,11 +187,7 @@ func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, e } func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt) - } + tryCancelRequest(rt.WrappedRoundTripper(), req) } func (rt *basicAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } @@ -259,11 +243,7 @@ func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Respons } func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.delegate.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - klog.Errorf("CancelRequest not implemented by %T", rt.delegate) - } + tryCancelRequest(rt.WrappedRoundTripper(), req) } func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.delegate } @@ -318,11 +298,7 @@ func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, } func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt) - } + tryCancelRequest(rt.WrappedRoundTripper(), req) } func (rt *bearerAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } @@ -402,11 +378,39 @@ func newDebuggingRoundTripper(rt http.RoundTripper, levels ...debugLevel) *debug } func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.delegatedRoundTripper.(requestCanceler); ok { - canceler.CancelRequest(req) + tryCancelRequest(rt.WrappedRoundTripper(), req) +} + +var knownAuthTypes = map[string]bool{ + "bearer": true, + "basic": true, + "negotiate": true, +} + +// maskValue masks credential content from authorization headers +// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Authorization +func maskValue(key string, value string) string { + if !strings.EqualFold(key, "Authorization") { + return value + } + if len(value) == 0 { + return "" + } + var authType string + if i := strings.Index(value, " "); i > 0 { + authType = value[0:i] + } else { + authType = value + } + if !knownAuthTypes[strings.ToLower(authType)] { + return "" + } + if len(value) > len(authType)+1 { + value = authType + " " } else { - klog.Errorf("CancelRequest not implemented by %T", rt.delegatedRoundTripper) + value = authType } + return value } func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { @@ -423,6 +427,7 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e klog.Infof("Request Headers:") for key, values := range reqInfo.RequestHeaders { for _, value := range values { + value = maskValue(key, value) klog.Infof(" %s: %s", key, value) } } diff --git a/vendor/k8s.io/client-go/transport/token_source.go b/vendor/k8s.io/client-go/transport/token_source.go index 8595df271..bb32c3b4d 100644 --- a/vendor/k8s.io/client-go/transport/token_source.go +++ b/vendor/k8s.io/client-go/transport/token_source.go @@ -25,6 +25,7 @@ import ( "time" "golang.org/x/oauth2" + "k8s.io/klog" ) @@ -59,6 +60,15 @@ func NewCachedFileTokenSource(path string) oauth2.TokenSource { } } +// NewCachedTokenSource returns a oauth2.TokenSource reads a token from a +// designed TokenSource. The ts would provide the source of token. +func NewCachedTokenSource(ts oauth2.TokenSource) oauth2.TokenSource { + return &cachingTokenSource{ + now: time.Now, + base: ts, + } +} + type tokenSourceTransport struct { base http.RoundTripper ort http.RoundTripper @@ -72,6 +82,14 @@ func (tst *tokenSourceTransport) RoundTrip(req *http.Request) (*http.Response, e return tst.ort.RoundTrip(req) } +func (tst *tokenSourceTransport) CancelRequest(req *http.Request) { + if req.Header.Get("Authorization") != "" { + tryCancelRequest(tst.base, req) + return + } + tryCancelRequest(tst.ort, req) +} + type fileTokenSource struct { path string period time.Duration diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go index 2a145c971..cd8de9828 100644 --- a/vendor/k8s.io/client-go/transport/transport.go +++ b/vendor/k8s.io/client-go/transport/transport.go @@ -23,6 +23,9 @@ import ( "fmt" "io/ioutil" "net/http" + + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/klog" ) // New returns an http.RoundTripper that will provide the authentication @@ -53,7 +56,7 @@ func New(config *Config) (http.RoundTripper, error) { // TLSConfigFor returns a tls.Config that will provide the transport level security defined // by the provided Config. Will return nil if no transport level security is requested. func TLSConfigFor(c *Config) (*tls.Config, error) { - if !(c.HasCA() || c.HasCertAuth() || c.HasCertCallback() || c.TLS.Insecure || len(c.TLS.ServerName) > 0) { + if !(c.HasCA() || c.HasCertAuth() || c.HasCertCallback() || c.TLS.Insecure || len(c.TLS.ServerName) > 0 || len(c.TLS.NextProtos) > 0) { return nil, nil } if c.HasCA() && c.TLS.Insecure { @@ -70,6 +73,7 @@ func TLSConfigFor(c *Config) (*tls.Config, error) { MinVersion: tls.VersionTLS12, InsecureSkipVerify: c.TLS.Insecure, ServerName: c.TLS.ServerName, + NextProtos: c.TLS.NextProtos, } if c.HasCA() { @@ -225,3 +229,17 @@ func (b *contextCanceller) RoundTrip(req *http.Request) (*http.Response, error) return b.rt.RoundTrip(req) } } + +func tryCancelRequest(rt http.RoundTripper, req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + switch rt := rt.(type) { + case canceler: + rt.CancelRequest(req) + case utilnet.RoundTripperWrapper: + tryCancelRequest(rt.WrappedRoundTripper(), req) + default: + klog.Warningf("Unable to cancel request for %T", rt) + } +} diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go index b7cb70ea7..c48ba03e8 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go @@ -30,7 +30,7 @@ type backoffEntry struct { } type Backoff struct { - sync.Mutex + sync.RWMutex Clock clock.Clock defaultDuration time.Duration maxDuration time.Duration @@ -57,8 +57,8 @@ func NewBackOff(initial, max time.Duration) *Backoff { // Get the current backoff Duration func (p *Backoff) Get(id string) time.Duration { - p.Lock() - defer p.Unlock() + p.RLock() + defer p.RUnlock() var delay time.Duration entry, ok := p.perItemBackoff[id] if ok { @@ -90,8 +90,8 @@ func (p *Backoff) Reset(id string) { // Returns True if the elapsed time since eventTime is smaller than the current backoff window func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool { - p.Lock() - defer p.Unlock() + p.RLock() + defer p.RUnlock() entry, ok := p.perItemBackoff[id] if !ok { return false @@ -99,13 +99,13 @@ func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool { if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { return false } - return p.Clock.Now().Sub(eventTime) < entry.backoff + return p.Clock.Since(eventTime) < entry.backoff } // Returns True if time since lastupdate is less than the current backoff window. func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool { - p.Lock() - defer p.Unlock() + p.RLock() + defer p.RUnlock() entry, ok := p.perItemBackoff[id] if !ok { return false diff --git a/vendor/k8s.io/client-go/util/flowcontrol/throttle.go b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go index e671c044d..ffd912c56 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/throttle.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go @@ -17,6 +17,8 @@ limitations under the License. package flowcontrol import ( + "context" + "errors" "sync" "time" @@ -33,6 +35,8 @@ type RateLimiter interface { Stop() // QPS returns QPS of this rate limiter QPS() float32 + // Wait returns nil if a token is taken before the Context is done. + Wait(ctx context.Context) error } type tokenBucketRateLimiter struct { @@ -98,6 +102,10 @@ func (t *tokenBucketRateLimiter) QPS() float32 { return t.qps } +func (t *tokenBucketRateLimiter) Wait(ctx context.Context) error { + return t.limiter.Wait(ctx) +} + type fakeAlwaysRateLimiter struct{} func NewFakeAlwaysRateLimiter() RateLimiter { @@ -116,6 +124,10 @@ func (t *fakeAlwaysRateLimiter) QPS() float32 { return 1 } +func (t *fakeAlwaysRateLimiter) Wait(ctx context.Context) error { + return nil +} + type fakeNeverRateLimiter struct { wg sync.WaitGroup } @@ -141,3 +153,7 @@ func (t *fakeNeverRateLimiter) Accept() { func (t *fakeNeverRateLimiter) QPS() float32 { return 1 } + +func (t *fakeNeverRateLimiter) Wait(ctx context.Context) error { + return errors.New("can not be accept") +} diff --git a/vendor/k8s.io/client-go/util/homedir/homedir.go b/vendor/k8s.io/client-go/util/homedir/homedir.go index 816db57f5..3fdbeb8cf 100644 --- a/vendor/k8s.io/client-go/util/homedir/homedir.go +++ b/vendor/k8s.io/client-go/util/homedir/homedir.go @@ -18,30 +18,75 @@ package homedir import ( "os" + "path/filepath" "runtime" ) -// HomeDir returns the home directory for the current user +// HomeDir returns the home directory for the current user. +// On Windows: +// 1. the first of %HOME%, %HOMEDRIVE%%HOMEPATH%, %USERPROFILE% containing a `.kube\config` file is returned. +// 2. if none of those locations contain a `.kube\config` file, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists and is writeable is returned. +// 3. if none of those locations are writeable, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists is returned. +// 4. if none of those locations exists, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that is set is returned. func HomeDir() string { if runtime.GOOS == "windows" { + home := os.Getenv("HOME") + homeDriveHomePath := "" + if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 { + homeDriveHomePath = homeDrive + homePath + } + userProfile := os.Getenv("USERPROFILE") - // First prefer the HOME environmental variable - if home := os.Getenv("HOME"); len(home) > 0 { - if _, err := os.Stat(home); err == nil { - return home + // Return first of %HOME%, %HOMEDRIVE%/%HOMEPATH%, %USERPROFILE% that contains a `.kube\config` file. + // %HOMEDRIVE%/%HOMEPATH% is preferred over %USERPROFILE% for backwards-compatibility. + for _, p := range []string{home, homeDriveHomePath, userProfile} { + if len(p) == 0 { + continue } - } - if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 { - homeDir := homeDrive + homePath - if _, err := os.Stat(homeDir); err == nil { - return homeDir + if _, err := os.Stat(filepath.Join(p, ".kube", "config")); err != nil { + continue } + return p } - if userProfile := os.Getenv("USERPROFILE"); len(userProfile) > 0 { - if _, err := os.Stat(userProfile); err == nil { - return userProfile + + firstSetPath := "" + firstExistingPath := "" + + // Prefer %USERPROFILE% over %HOMEDRIVE%/%HOMEPATH% for compatibility with other auth-writing tools + for _, p := range []string{home, userProfile, homeDriveHomePath} { + if len(p) == 0 { + continue + } + if len(firstSetPath) == 0 { + // remember the first path that is set + firstSetPath = p + } + info, err := os.Stat(p) + if err != nil { + continue } + if len(firstExistingPath) == 0 { + // remember the first path that exists + firstExistingPath = p + } + if info.IsDir() && info.Mode().Perm()&(1<<(uint(7))) != 0 { + // return first path that is writeable + return p + } + } + + // If none are writeable, return first location that exists + if len(firstExistingPath) > 0 { + return firstExistingPath } + + // If none exist, return first location that is set + if len(firstSetPath) > 0 { + return firstSetPath + } + + // We've got nothing + return "" } return os.Getenv("HOME") } diff --git a/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go b/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go index a5a8bbf7a..78b6b678f 100644 --- a/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go +++ b/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go @@ -93,17 +93,17 @@ func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) { // encounter an end node, break the current block if j.endRange > 0 && j.endRange <= j.inRange { - j.endRange -= 1 + j.endRange-- break } // encounter a range node, start a range loop if j.beginRange > 0 { - j.beginRange -= 1 - j.inRange += 1 + j.beginRange-- + j.inRange++ for k, value := range results { j.parser.Root.Nodes = nodes[i+1:] if k == len(results)-1 { - j.inRange -= 1 + j.inRange-- } nextResults, err := j.FindResults(value.Interface()) if err != nil { @@ -213,11 +213,11 @@ func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) ( switch node.Name { case "range": j.stack = append(j.stack, j.cur) - j.beginRange += 1 + j.beginRange++ results = input case "end": if j.endRange < j.inRange { // inside a loop, break the current block - j.endRange += 1 + j.endRange++ break } // the loop is about to end, pop value and continue the following execution diff --git a/vendor/k8s.io/client-go/util/jsonpath/parser.go b/vendor/k8s.io/client-go/util/jsonpath/parser.go index 1af8f269f..e1aab6804 100644 --- a/vendor/k8s.io/client-go/util/jsonpath/parser.go +++ b/vendor/k8s.io/client-go/util/jsonpath/parser.go @@ -37,7 +37,6 @@ type Parser struct { Name string Root *ListNode input string - cur *ListNode pos int start int width int @@ -186,8 +185,7 @@ func (p *Parser) parseInsideAction(cur *ListNode) error { func (p *Parser) parseRightDelim(cur *ListNode) error { p.pos += len(rightDelim) p.consumeText() - cur = p.Root - return p.parseText(cur) + return p.parseText(p.Root) } // parseIdentifier scans build-in keywords, like "range" "end" @@ -231,7 +229,7 @@ func (p *Parser) parseRecursive(cur *ListNode) error { func (p *Parser) parseNumber(cur *ListNode) error { r := p.peek() if r == '+' || r == '-' { - r = p.next() + p.next() } for { r = p.next() diff --git a/vendor/k8s.io/client-go/util/retry/util.go b/vendor/k8s.io/client-go/util/retry/util.go index 3ac0840ad..c80ff0877 100644 --- a/vendor/k8s.io/client-go/util/retry/util.go +++ b/vendor/k8s.io/client-go/util/retry/util.go @@ -42,12 +42,12 @@ var DefaultBackoff = wait.Backoff{ Jitter: 0.1, } -// RetryConflict executes the provided function repeatedly, retrying if the server returns a conflicting -// write. Callers should preserve previous executions if they wish to retry changes. It performs an +// OnError executes the provided function repeatedly, retrying if the server returns a specified +// error. Callers should preserve previous executions if they wish to retry changes. It performs an // exponential backoff. // // var pod *api.Pod -// err := RetryOnConflict(DefaultBackoff, func() (err error) { +// err := retry.OnError(DefaultBackoff, errors.IsConflict, func() (err error) { // pod, err = c.Pods("mynamespace").UpdateStatus(podStatus) // return // }) @@ -58,14 +58,14 @@ var DefaultBackoff = wait.Backoff{ // ... // // TODO: Make Backoff an interface? -func RetryOnConflict(backoff wait.Backoff, fn func() error) error { +func OnError(backoff wait.Backoff, errorFunc func(error) bool, fn func() error) error { var lastConflictErr error err := wait.ExponentialBackoff(backoff, func() (bool, error) { err := fn() switch { case err == nil: return true, nil - case errors.IsConflict(err): + case errorFunc(err): lastConflictErr = err return false, nil default: @@ -77,3 +77,8 @@ func RetryOnConflict(backoff wait.Backoff, fn func() error) error { } return err } + +// RetryOnConflict executes the function function repeatedly, retrying if the server returns a conflicting +func RetryOnConflict(backoff wait.Backoff, fn func() error) error { + return OnError(backoff, errors.IsConflict, fn) +} diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go index bd654bf31..073279886 100644 --- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -18,6 +18,7 @@ package workqueue import ( "container/heap" + "sync" "time" "k8s.io/apimachinery/pkg/util/clock" @@ -43,13 +44,12 @@ func NewNamedDelayingQueue(name string) DelayingInterface { func newDelayingQueue(clock clock.Clock, name string) DelayingInterface { ret := &delayingType{ - Interface: NewNamed(name), - clock: clock, - heartbeat: clock.NewTicker(maxWait), - stopCh: make(chan struct{}), - waitingForAddCh: make(chan *waitFor, 1000), - metrics: newRetryMetrics(name), - deprecatedMetrics: newDeprecatedRetryMetrics(name), + Interface: NewNamed(name), + clock: clock, + heartbeat: clock.NewTicker(maxWait), + stopCh: make(chan struct{}), + waitingForAddCh: make(chan *waitFor, 1000), + metrics: newRetryMetrics(name), } go ret.waitingLoop() @@ -66,6 +66,8 @@ type delayingType struct { // stopCh lets us signal a shutdown to the waiting loop stopCh chan struct{} + // stopOnce guarantees we only signal shutdown a single time + stopOnce sync.Once // heartbeat ensures we wait no more than maxWait before firing heartbeat clock.Ticker @@ -74,8 +76,7 @@ type delayingType struct { waitingForAddCh chan *waitFor // metrics counts the number of retries - metrics retryMetrics - deprecatedMetrics retryMetrics + metrics retryMetrics } // waitFor holds the data to add and the time it should be added @@ -133,11 +134,14 @@ func (pq waitForPriorityQueue) Peek() interface{} { return pq[0] } -// ShutDown gives a way to shut off this queue +// ShutDown stops the queue. After the queue drains, the returned shutdown bool +// on Get() will be true. This method may be invoked more than once. func (q *delayingType) ShutDown() { - q.Interface.ShutDown() - close(q.stopCh) - q.heartbeat.Stop() + q.stopOnce.Do(func() { + q.Interface.ShutDown() + close(q.stopCh) + q.heartbeat.Stop() + }) } // AddAfter adds the given item to the work queue after the given delay @@ -148,7 +152,6 @@ func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { } q.metrics.retry() - q.deprecatedMetrics.retry() // immediately add things with no delay if duration <= 0 { diff --git a/vendor/k8s.io/client-go/util/workqueue/doc.go b/vendor/k8s.io/client-go/util/workqueue/doc.go index 2a00c74ac..a5c976e0f 100644 --- a/vendor/k8s.io/client-go/util/workqueue/doc.go +++ b/vendor/k8s.io/client-go/util/workqueue/doc.go @@ -23,4 +23,4 @@ limitations under the License. // * Multiple consumers and producers. In particular, it is allowed for an // item to be reenqueued while it is being processed. // * Shutdown notifications. -package workqueue +package workqueue // import "k8s.io/client-go/util/workqueue" diff --git a/vendor/k8s.io/client-go/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go index be23ddd05..a3911bf2d 100644 --- a/vendor/k8s.io/client-go/util/workqueue/metrics.go +++ b/vendor/k8s.io/client-go/util/workqueue/metrics.go @@ -87,14 +87,6 @@ type defaultQueueMetrics struct { // how long have current threads been working? unfinishedWorkSeconds SettableGaugeMetric longestRunningProcessor SettableGaugeMetric - - // TODO(danielqsj): Remove the following metrics, they are deprecated - deprecatedDepth GaugeMetric - deprecatedAdds CounterMetric - deprecatedLatency SummaryMetric - deprecatedWorkDuration SummaryMetric - deprecatedUnfinishedWorkSeconds SettableGaugeMetric - deprecatedLongestRunningProcessor SettableGaugeMetric } func (m *defaultQueueMetrics) add(item t) { @@ -103,9 +95,7 @@ func (m *defaultQueueMetrics) add(item t) { } m.adds.Inc() - m.deprecatedAdds.Inc() m.depth.Inc() - m.deprecatedDepth.Inc() if _, exists := m.addTimes[item]; !exists { m.addTimes[item] = m.clock.Now() } @@ -117,11 +107,9 @@ func (m *defaultQueueMetrics) get(item t) { } m.depth.Dec() - m.deprecatedDepth.Dec() m.processingStartTimes[item] = m.clock.Now() if startTime, exists := m.addTimes[item]; exists { m.latency.Observe(m.sinceInSeconds(startTime)) - m.deprecatedLatency.Observe(m.sinceInMicroseconds(startTime)) delete(m.addTimes, item) } } @@ -133,7 +121,6 @@ func (m *defaultQueueMetrics) done(item t) { if startTime, exists := m.processingStartTimes[item]; exists { m.workDuration.Observe(m.sinceInSeconds(startTime)) - m.deprecatedWorkDuration.Observe(m.sinceInMicroseconds(startTime)) delete(m.processingStartTimes, item) } } @@ -153,9 +140,7 @@ func (m *defaultQueueMetrics) updateUnfinishedWork() { // Convert to seconds; microseconds is unhelpfully granular for this. total /= 1000000 m.unfinishedWorkSeconds.Set(total) - m.deprecatedUnfinishedWorkSeconds.Set(total) m.longestRunningProcessor.Set(oldest / 1000000) - m.deprecatedLongestRunningProcessor.Set(oldest) // in microseconds. } type noMetrics struct{} @@ -200,13 +185,6 @@ type MetricsProvider interface { NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric NewRetriesMetric(name string) CounterMetric - NewDeprecatedDepthMetric(name string) GaugeMetric - NewDeprecatedAddsMetric(name string) CounterMetric - NewDeprecatedLatencyMetric(name string) SummaryMetric - NewDeprecatedWorkDurationMetric(name string) SummaryMetric - NewDeprecatedUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric - NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric - NewDeprecatedRetriesMetric(name string) CounterMetric } type noopMetricsProvider struct{} @@ -239,34 +217,6 @@ func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric { return noopMetric{} } -func (_ noopMetricsProvider) NewDeprecatedDepthMetric(name string) GaugeMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewDeprecatedAddsMetric(name string) CounterMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewDeprecatedLatencyMetric(name string) SummaryMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewDeprecatedWorkDurationMetric(name string) SummaryMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewDeprecatedUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewDeprecatedRetriesMetric(name string) CounterMetric { - return noopMetric{} -} - var globalMetricsFactory = queueMetricsFactory{ metricsProvider: noopMetricsProvider{}, } @@ -289,21 +239,15 @@ func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) qu return noMetrics{} } return &defaultQueueMetrics{ - clock: clock, - depth: mp.NewDepthMetric(name), - adds: mp.NewAddsMetric(name), - latency: mp.NewLatencyMetric(name), - workDuration: mp.NewWorkDurationMetric(name), - unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name), - longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name), - deprecatedDepth: mp.NewDeprecatedDepthMetric(name), - deprecatedAdds: mp.NewDeprecatedAddsMetric(name), - deprecatedLatency: mp.NewDeprecatedLatencyMetric(name), - deprecatedWorkDuration: mp.NewDeprecatedWorkDurationMetric(name), - deprecatedUnfinishedWorkSeconds: mp.NewDeprecatedUnfinishedWorkSecondsMetric(name), - deprecatedLongestRunningProcessor: mp.NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name), - addTimes: map[t]time.Time{}, - processingStartTimes: map[t]time.Time{}, + clock: clock, + depth: mp.NewDepthMetric(name), + adds: mp.NewAddsMetric(name), + latency: mp.NewLatencyMetric(name), + workDuration: mp.NewWorkDurationMetric(name), + unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name), + longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name), + addTimes: map[t]time.Time{}, + processingStartTimes: map[t]time.Time{}, } } @@ -317,16 +261,6 @@ func newRetryMetrics(name string) retryMetrics { } } -func newDeprecatedRetryMetrics(name string) retryMetrics { - var ret *defaultRetryMetrics - if len(name) == 0 { - return ret - } - return &defaultRetryMetrics{ - retries: globalMetricsFactory.metricsProvider.NewDeprecatedRetriesMetric(name), - } -} - // SetProvider sets the metrics provider for all subsequently created work // queues. Only the first call has an effect. func SetProvider(metricsProvider MetricsProvider) { diff --git a/vendor/k8s.io/client-go/util/workqueue/parallelizer.go b/vendor/k8s.io/client-go/util/workqueue/parallelizer.go index ad2535018..5928a0c5b 100644 --- a/vendor/k8s.io/client-go/util/workqueue/parallelizer.go +++ b/vendor/k8s.io/client-go/util/workqueue/parallelizer.go @@ -25,14 +25,6 @@ import ( type DoWorkPieceFunc func(piece int) -// Parallelize is a very simple framework that allows for parallelizing -// N independent pieces of work. -// -// Deprecated: Use ParallelizeUntil instead. -func Parallelize(workers, pieces int, doWorkPiece DoWorkPieceFunc) { - ParallelizeUntil(nil, workers, pieces, doWorkPiece) -} - // ParallelizeUntil is a framework that allows for parallelizing N // independent pieces of work until done or the context is canceled. func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc) { diff --git a/vendor/k8s.io/klog/.travis.yml b/vendor/k8s.io/klog/.travis.yml index fc0d2caf3..5677664c2 100644 --- a/vendor/k8s.io/klog/.travis.yml +++ b/vendor/k8s.io/klog/.travis.yml @@ -1,14 +1,16 @@ language: go +go_import_path: k8s.io/klog dist: xenial go: - 1.9.x - 1.10.x - 1.11.x + - 1.12.x script: - go get -t -v ./... - diff -u <(echo -n) <(gofmt -d .) - diff -u <(echo -n) <(golint $(go list -e ./...)) - - go tool vet . + - go tool vet . || go vet . - go test -v -race ./... install: - go get golang.org/x/lint/golint diff --git a/vendor/k8s.io/klog/CONTRIBUTING.md b/vendor/k8s.io/klog/CONTRIBUTING.md index de4711513..574a56abb 100644 --- a/vendor/k8s.io/klog/CONTRIBUTING.md +++ b/vendor/k8s.io/klog/CONTRIBUTING.md @@ -8,10 +8,6 @@ _As contributors and maintainers of this project, and in the interest of fosteri We have full documentation on how to get started contributing here: - - - [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests - [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) - [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers @@ -20,12 +16,7 @@ If your repo has certain guidelines for contribution, put them here ahead of the - [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! - +- [Slack](https://kubernetes.slack.com/messages/sig-architecture) +- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture) diff --git a/vendor/k8s.io/klog/OWNERS b/vendor/k8s.io/klog/OWNERS index 56b0eb044..380e514f2 100644 --- a/vendor/k8s.io/klog/OWNERS +++ b/vendor/k8s.io/klog/OWNERS @@ -1,5 +1,13 @@ -# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md - +# See the OWNERS docs at https://go.k8s.io/owners +reviewers: + - jayunit100 + - hoegaarden + - andyxning + - neolit123 + - pohly + - yagonobre + - vincepri + - detiber approvers: - dims - thockin diff --git a/vendor/k8s.io/klog/README.md b/vendor/k8s.io/klog/README.md index a747f538a..841468b4b 100644 --- a/vendor/k8s.io/klog/README.md +++ b/vendor/k8s.io/klog/README.md @@ -1,7 +1,53 @@ klog ==== -klog is a permanant fork of https://github.com/golang/glog. original README from glog is below +klog is a permanent fork of https://github.com/golang/glog. + +## Why was klog created? + +The decision to create klog was one that wasn't made lightly, but it was necessary due to some +drawbacks that are present in [glog](https://github.com/golang/glog). Ultimately, the fork was created due to glog not being under active development; this can be seen in the glog README: + +> The code in this repo [...] is not itself under development + +This makes us unable to solve many use cases without a fork. The factors that contributed to needing feature development are listed below: + + * `glog` [presents a lot "gotchas"](https://github.com/kubernetes/kubernetes/issues/61006) and introduces challenges in containerized environments, all of which aren't well documented. + * `glog` doesn't provide an easy way to test logs, which detracts from the stability of software using it + * A long term goal is to implement a logging interface that allows us to add context, change output format, etc. + +Historical context is available here: + + * https://github.com/kubernetes/kubernetes/issues/61006 + * https://github.com/kubernetes/kubernetes/issues/70264 + * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ + * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ + +---- + +How to use klog +=============== +- Replace imports for `github.com/golang/glog` with `k8s.io/klog` +- Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags +- You can now use `log-file` instead of `log-dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) +- If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) +- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)) + +### Coexisting with glog +This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and syncronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`. + +## Community, discussion, contribution, and support + +Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). + +You can reach the maintainers of this project at: + +- [Slack](https://kubernetes.slack.com/messages/sig-architecture) +- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture) + +### Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). ---- @@ -26,20 +72,20 @@ The comment from glog.go introduces the ideas: Error, Fatal, plus formatting variants such as Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. - + Basic examples: - + glog.Info("Prepare to repel boarders") - + glog.Fatalf("Initialization failed: %s", err) - + See the documentation for the V function for an explanation of these examples: - + if glog.V(2) { glog.Info("Starting transaction...") } - + glog.V(2).Infoln("Processed", nItems, "elements") diff --git a/vendor/k8s.io/klog/SECURITY_CONTACTS b/vendor/k8s.io/klog/SECURITY_CONTACTS index 520ddb525..6128a5869 100644 --- a/vendor/k8s.io/klog/SECURITY_CONTACTS +++ b/vendor/k8s.io/klog/SECURITY_CONTACTS @@ -1,10 +1,10 @@ # Defined below are the security contacts for this repo. # -# They are the contact point for the Product Security Team to reach out +# They are the contact point for the Product Security Committee to reach out # to for triaging and handling of incoming issues. # # The below names agree to abide by the -# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) # and will be removed and replaced if they violate that agreement. # # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE diff --git a/vendor/k8s.io/klog/code-of-conduct.md b/vendor/k8s.io/klog/code-of-conduct.md new file mode 100644 index 000000000..0d15c00cf --- /dev/null +++ b/vendor/k8s.io/klog/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/k8s.io/klog/go.mod b/vendor/k8s.io/klog/go.mod new file mode 100644 index 000000000..3877d8546 --- /dev/null +++ b/vendor/k8s.io/klog/go.mod @@ -0,0 +1,5 @@ +module k8s.io/klog + +go 1.12 + +require github.com/go-logr/logr v0.1.0 diff --git a/vendor/k8s.io/klog/go.sum b/vendor/k8s.io/klog/go.sum new file mode 100644 index 000000000..fb64d277a --- /dev/null +++ b/vendor/k8s.io/klog/go.sum @@ -0,0 +1,2 @@ +github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= diff --git a/vendor/k8s.io/klog/klog.go b/vendor/k8s.io/klog/klog.go index 13bcc81a7..2712ce0af 100644 --- a/vendor/k8s.io/klog/klog.go +++ b/vendor/k8s.io/klog/klog.go @@ -20,26 +20,26 @@ // // Basic examples: // -// glog.Info("Prepare to repel boarders") +// klog.Info("Prepare to repel boarders") // -// glog.Fatalf("Initialization failed: %s", err) +// klog.Fatalf("Initialization failed: %s", err) // // See the documentation for the V function for an explanation of these examples: // -// if glog.V(2) { -// glog.Info("Starting transaction...") +// if klog.V(2) { +// klog.Info("Starting transaction...") // } // -// glog.V(2).Infoln("Processed", nItems, "elements") +// klog.V(2).Infoln("Processed", nItems, "elements") // // Log output is buffered and written periodically using Flush. Programs // should call Flush before exiting to guarantee all log output is written. // -// By default, all log statements write to files in a temporary directory. +// By default, all log statements write to standard error. // This package provides several flags that modify this behavior. // As a result, flag.Parse must be called before any logging is done. // -// -logtostderr=false +// -logtostderr=true // Logs are written to standard error instead of to files. // -alsologtostderr=false // Logs are written to standard error as well as to files. @@ -78,6 +78,7 @@ import ( "fmt" "io" stdLog "log" + "math" "os" "path/filepath" "runtime" @@ -141,7 +142,7 @@ func (s *severity) Set(value string) error { if v, ok := severityByName(value); ok { threshold = v } else { - v, err := strconv.Atoi(value) + v, err := strconv.ParseInt(value, 10, 32) if err != nil { return err } @@ -225,7 +226,7 @@ func (l *Level) Get() interface{} { // Set is part of the flag.Value interface. func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) + v, err := strconv.ParseInt(value, 10, 32) if err != nil { return err } @@ -293,7 +294,7 @@ func (m *moduleSpec) Set(value string) error { return errVmoduleSyntax } pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) + v, err := strconv.ParseInt(patLev[1], 10, 32) if err != nil { return errors.New("syntax error: expect comma-separated list of filename=N") } @@ -395,25 +396,38 @@ type flushSyncWriter interface { io.Writer } +// init sets up the defaults and runs flushDaemon. func init() { - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog - + logging.stderrThreshold = errorLog // Default stderrThreshold is ERROR. logging.setVState(0, nil, false) + logging.logDir = "" + logging.logFile = "" + logging.logFileMaxSizeMB = 1800 + logging.toStderr = true + logging.alsoToStderr = false + logging.skipHeaders = false + logging.addDirHeader = false + logging.skipLogHeaders = false go logging.flushDaemon() } -// InitFlags is for explicitly initializing the flags +// InitFlags is for explicitly initializing the flags. func InitFlags(flagset *flag.FlagSet) { if flagset == nil { flagset = flag.CommandLine } - flagset.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory") - flagset.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file") - flagset.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") - flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") - flagset.Var(&logging.verbosity, "v", "log level for V logs") - flagset.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages") + + flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory") + flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file") + flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", logging.logFileMaxSizeMB, + "Defines the maximum size a log file can grow to. Unit is megabytes. "+ + "If the value is 0, the maximum file size is unlimited.") + flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files") + flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files") + flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") + flagset.BoolVar(&logging.skipHeaders, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header") + flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") + flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") @@ -471,8 +485,18 @@ type loggingT struct { // with the log-dir option. logFile string + // When logFile is specified, this limiter makes sure the logFile won't exceeds a certain size. When exceeds, the + // logFile will be cleaned up. If this value is 0, no size limitation will be applied to logFile. + logFileMaxSizeMB uint64 + // If true, do not add the prefix headers, useful when used with SetOutput skipHeaders bool + + // If true, do not add the headers to log files + skipLogHeaders bool + + // If true, add the file directory to the header + addDirHeader bool } // buffer holds a byte Buffer for reuse. The zero value is ready for use. @@ -558,9 +582,14 @@ func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { file = "???" line = 1 } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] + if slash := strings.LastIndex(file, "/"); slash >= 0 { + path := file + file = path[slash+1:] + if l.addDirHeader { + if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 { + file = path[dirsep+1:] + } + } } } return l.formatHeader(s, file, line), file, line @@ -709,6 +738,8 @@ func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { // SetOutput sets the output destination for all severities func SetOutput(w io.Writer) { + logging.mu.Lock() + defer logging.mu.Unlock() for s := fatalLog; s >= infoLog; s-- { rb := &redirectBuffer{ w: w, @@ -719,6 +750,8 @@ func SetOutput(w io.Writer) { // SetOutputBySeverity sets the output destination for specific severity func SetOutputBySeverity(name string, w io.Writer) { + logging.mu.Lock() + defer logging.mu.Unlock() sev, ok := severityByName(name) if !ok { panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) @@ -744,24 +777,38 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { os.Stderr.Write(data) } - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) + + if logging.logFile != "" { + // Since we are using a single log file, all of the items in l.file array + // will point to the same file, so just use one of them to write data. + if l.file[infoLog] == nil { + if err := l.createFiles(infoLog); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } } - } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: l.file[infoLog].Write(data) + } else { + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } } } if s == fatalLog { @@ -800,7 +847,7 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo // timeoutFlush calls Flush and returns when it completes or after timeout // elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds +// by Flush may deadlock when klog.Fatal is called from a hook that holds // a lock. func timeoutFlush(timeout time.Duration) { done := make(chan bool, 1) @@ -811,7 +858,7 @@ func timeoutFlush(timeout time.Duration) { select { case <-done: case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) + fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) } } @@ -861,18 +908,33 @@ func (l *loggingT) exit(err error) { type syncBuffer struct { logger *loggingT *bufio.Writer - file *os.File - sev severity - nbytes uint64 // The number of bytes written to this file + file *os.File + sev severity + nbytes uint64 // The number of bytes written to this file + maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. } func (sb *syncBuffer) Sync() error { return sb.file.Sync() } +// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options. +func CalculateMaxSize() uint64 { + if logging.logFile != "" { + if logging.logFileMaxSizeMB == 0 { + // If logFileMaxSizeMB is zero, we don't have limitations on the log size. + return math.MaxUint64 + } + // Flag logFileMaxSizeMB is in MB for user convenience. + return logging.logFileMaxSizeMB * 1024 * 1024 + } + // If "log_file" flag is not specified, the target file (sb.file) will be cleaned up when reaches a fixed size. + return MaxSize +} + func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= MaxSize { - if err := sb.rotateFile(time.Now()); err != nil { + if sb.nbytes+uint64(len(p)) >= sb.maxbytes { + if err := sb.rotateFile(time.Now(), false); err != nil { sb.logger.exit(err) } } @@ -885,13 +947,15 @@ func (sb *syncBuffer) Write(p []byte) (n int, err error) { } // rotateFile closes the syncBuffer's file and starts a new one. -func (sb *syncBuffer) rotateFile(now time.Time) error { +// The startup argument indicates whether this is the initial startup of klog. +// If startup is true, existing files are opened for appending instead of truncated. +func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { if sb.file != nil { sb.Flush() sb.file.Close() } var err error - sb.file, _, err = create(severityName[sb.sev], now) + sb.file, _, err = create(severityName[sb.sev], now, startup) sb.nbytes = 0 if err != nil { return err @@ -899,6 +963,10 @@ func (sb *syncBuffer) rotateFile(now time.Time) error { sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + if sb.logger.skipLogHeaders { + return nil + } + // Write header. var buf bytes.Buffer fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) @@ -923,10 +991,11 @@ func (l *loggingT) createFiles(sev severity) error { // has already been created, we can stop. for s := sev; s >= infoLog && l.file[s] == nil; s-- { sb := &syncBuffer{ - logger: l, - sev: s, + logger: l, + sev: s, + maxbytes: CalculateMaxSize(), } - if err := sb.rotateFile(now); err != nil { + if err := sb.rotateFile(now, true); err != nil { return err } l.file[s] = sb @@ -934,7 +1003,7 @@ func (l *loggingT) createFiles(sev severity) error { return nil } -const flushInterval = 30 * time.Second +const flushInterval = 5 * time.Second // flushDaemon periodically flushes the log file buffers. func (l *loggingT) flushDaemon() { @@ -1045,9 +1114,9 @@ type Verbose bool // The returned value is a boolean of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either -// if glog.V(2) { glog.Info("log this") } +// if klog.V(2) { klog.Info("log this") } // or -// glog.V(2).Info("log this") +// klog.V(2).Info("log this") // The second form is shorter but the first is cheaper if logging is off because it does // not evaluate its arguments. // @@ -1121,7 +1190,7 @@ func InfoDepth(depth int, args ...interface{}) { } // Infoln logs to the INFO log. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Infoln(args ...interface{}) { logging.println(infoLog, args...) } @@ -1145,7 +1214,7 @@ func WarningDepth(depth int, args ...interface{}) { } // Warningln logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Warningln(args ...interface{}) { logging.println(warningLog, args...) } @@ -1169,7 +1238,7 @@ func ErrorDepth(depth int, args ...interface{}) { } // Errorln logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Errorln(args ...interface{}) { logging.println(errorLog, args...) } @@ -1195,7 +1264,7 @@ func FatalDepth(depth int, args ...interface{}) { // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Fatalln(args ...interface{}) { logging.println(fatalLog, args...) } diff --git a/vendor/k8s.io/klog/klog_file.go b/vendor/k8s.io/klog/klog_file.go index b76a4e10b..e4010ad4d 100644 --- a/vendor/k8s.io/klog/klog_file.go +++ b/vendor/k8s.io/klog/klog_file.go @@ -97,9 +97,11 @@ var onceLogDirs sync.Once // contains tag ("INFO", "FATAL", etc.) and t. If the file is created // successfully, create also attempts to update the symlink for that tag, ignoring // errors. -func create(tag string, t time.Time) (f *os.File, filename string, err error) { +// The startup argument indicates whether this is the initial startup of klog. +// If startup is true, existing files are opened for appending instead of truncated. +func create(tag string, t time.Time, startup bool) (f *os.File, filename string, err error) { if logging.logFile != "" { - f, err := os.Create(logging.logFile) + f, err := openOrCreate(logging.logFile, startup) if err == nil { return f, logging.logFile, nil } @@ -113,7 +115,7 @@ func create(tag string, t time.Time) (f *os.File, filename string, err error) { var lastErr error for _, dir := range logDirs { fname := filepath.Join(dir, name) - f, err := os.Create(fname) + f, err := openOrCreate(fname, startup) if err == nil { symlink := filepath.Join(dir, link) os.Remove(symlink) // ignore err @@ -124,3 +126,14 @@ func create(tag string, t time.Time) (f *os.File, filename string, err error) { } return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) } + +// The startup argument indicates whether this is the initial startup of klog. +// If startup is true, existing files are opened for appending instead of truncated. +func openOrCreate(name string, startup bool) (*os.File, error) { + if startup { + f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) + return f, err + } + f, err := os.Create(name) + return f, err +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index 890a39399..5eb957aff 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -92,13 +92,16 @@ func NewOpenAPIData(doc *openapi_v2.Document) (Models, error) { // We believe the schema is a reference, verify that and returns a new // Schema func (d *Definitions) parseReference(s *openapi_v2.Schema, path *Path) (Schema, error) { + // TODO(wrong): a schema with a $ref can have properties. We can ignore them (would be incomplete), but we cannot return an error. if len(s.GetProperties().GetAdditionalProperties()) > 0 { return nil, newSchemaError(path, "unallowed embedded type definition") } + // TODO(wrong): a schema with a $ref can have a type. We can ignore it (would be incomplete), but we cannot return an error. if len(s.GetType().GetValue()) > 0 { return nil, newSchemaError(path, "definition reference can't have a type") } + // TODO(wrong): $refs outside of the definitions are completely valid. We can ignore them (would be incomplete), but we cannot return an error. if !strings.HasPrefix(s.GetXRef(), "#/definitions/") { return nil, newSchemaError(path, "unallowed reference to non-definition %q", s.GetXRef()) } @@ -127,6 +130,7 @@ func (d *Definitions) parseMap(s *openapi_v2.Schema, path *Path) (Schema, error) return nil, newSchemaError(path, "invalid object type") } var sub Schema + // TODO(incomplete): this misses the boolean case as AdditionalProperties is a bool+schema sum type. if s.GetAdditionalProperties().GetSchema() == nil { sub = &Arbitrary{ BaseSchema: d.parseBaseSchema(s, path), @@ -157,6 +161,7 @@ func (d *Definitions) parsePrimitive(s *openapi_v2.Schema, path *Path) (Schema, case Number: // do nothing case Integer: // do nothing case Boolean: // do nothing + // TODO(wrong): this misses "null". Would skip the null case (would be incomplete), but we cannot return an error. default: return nil, newSchemaError(path, "Unknown primitive type: %q", t) } @@ -175,6 +180,8 @@ func (d *Definitions) parseArray(s *openapi_v2.Schema, path *Path) (Schema, erro return nil, newSchemaError(path, `array should have type "array"`) } if len(s.GetItems().GetSchema()) != 1 { + // TODO(wrong): Items can have multiple elements. We can ignore Items then (would be incomplete), but we cannot return an error. + // TODO(wrong): "type: array" witohut any items at all is completely valid. return nil, newSchemaError(path, "array should have exactly one sub-item") } sub, err := d.ParseSchema(s.GetItems().GetSchema()[0], path) @@ -227,6 +234,8 @@ func (d *Definitions) parseArbitrary(s *openapi_v2.Schema, path *Path) (Schema, // this function is public, it doesn't leak through the interface. func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, error) { if s.GetXRef() != "" { + // TODO(incomplete): ignoring the rest of s is wrong. As long as there are no conflict, everything from s must be considered + // Reference: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#path-item-object return d.parseReference(s, path) } objectTypes := s.GetType().GetValue() @@ -234,11 +243,15 @@ func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, err case 0: // in the OpenAPI schema served by older k8s versions, object definitions created from structs did not include // the type:object property (they only included the "properties" property), so we need to handle this case + // TODO: validate that we ever published empty, non-nil properties. JSON roundtripping nils them. if s.GetProperties() != nil { + // TODO(wrong): when verifying a non-object later against this, it will be rejected as invalid type. + // TODO(CRD validation schema publishing): we have to filter properties (empty or not) if type=object is not given return d.parseKind(s, path) } else { // Definition has no type and no properties. Treat it as an arbitrary value - // TODO: what if it has additionalProperties or patternProperties? + // TODO(incomplete): what if it has additionalProperties=false or patternProperties? + // ANSWER: parseArbitrary is less strict than it has to be with patternProperties (which is ignored). So this is correct (of course not complete). return d.parseArbitrary(s, path) } case 1: @@ -256,6 +269,8 @@ func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, err return d.parsePrimitive(s, path) default: // the OpenAPI generator never generates (nor it ever did in the past) OpenAPI type definitions with multiple types + // TODO(wrong): this is rejecting a completely valid OpenAPI spec + // TODO(CRD validation schema publishing): filter these out return nil, newSchemaError(path, "definitions with multiple types aren't supported") } } diff --git a/vendor/k8s.io/utils/pointer/OWNERS b/vendor/k8s.io/utils/pointer/OWNERS new file mode 100644 index 000000000..0d6392752 --- /dev/null +++ b/vendor/k8s.io/utils/pointer/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- apelisse +- stewart-yu +- thockin +reviewers: +- apelisse +- stewart-yu +- thockin diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go new file mode 100644 index 000000000..5365a1136 --- /dev/null +++ b/vendor/k8s.io/utils/pointer/pointer.go @@ -0,0 +1,86 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pointer + +import ( + "fmt" + "reflect" +) + +// AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when, +// for example, an API struct is handled by plugins which need to distinguish +// "no plugin accepted this spec" from "this spec is empty". +// +// This function is only valid for structs and pointers to structs. Any other +// type will cause a panic. Passing a typed nil pointer will return true. +func AllPtrFieldsNil(obj interface{}) bool { + v := reflect.ValueOf(obj) + if !v.IsValid() { + panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj)) + } + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return true + } + v = v.Elem() + } + for i := 0; i < v.NumField(); i++ { + if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() { + return false + } + } + return true +} + +// Int32Ptr returns a pointer to an int32 +func Int32Ptr(i int32) *int32 { + return &i +} + +// Int64Ptr returns a pointer to an int64 +func Int64Ptr(i int64) *int64 { + return &i +} + +// Int32PtrDerefOr dereference the int32 ptr and returns it if not nil, +// else returns def. +func Int32PtrDerefOr(ptr *int32, def int32) int32 { + if ptr != nil { + return *ptr + } + return def +} + +// BoolPtr returns a pointer to a bool +func BoolPtr(b bool) *bool { + return &b +} + +// StringPtr returns a pointer to the passed string. +func StringPtr(s string) *string { + return &s +} + +// Float32Ptr returns a pointer to the passed float32. +func Float32Ptr(i float32) *float32 { + return &i +} + +// Float64Ptr returns a pointer to the passed float64. +func Float64Ptr(i float64) *float64 { + return &i +} diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go index f672d88f2..3b424104a 100644 --- a/vendor/k8s.io/utils/trace/trace.go +++ b/vendor/k8s.io/utils/trace/trace.go @@ -25,32 +25,55 @@ import ( "k8s.io/klog" ) +// Field is a key value pair that provides additional details about the trace. +type Field struct { + Key string + Value interface{} +} + +func (f Field) format() string { + return fmt.Sprintf("%s:%v", f.Key, f.Value) +} + +func writeFields(b *bytes.Buffer, l []Field) { + for i, f := range l { + b.WriteString(f.format()) + if i < len(l)-1 { + b.WriteString(",") + } + } +} + type traceStep struct { stepTime time.Time msg string + fields []Field } // Trace keeps track of a set of "steps" and allows us to log a specific // step if it took longer than its share of the total allowed time type Trace struct { name string + fields []Field startTime time.Time steps []traceStep } -// New creates a Trace with the specified name -func New(name string) *Trace { - return &Trace{name, time.Now(), nil} +// New creates a Trace with the specified name. The name identifies the operation to be traced. The +// Fields add key value pairs to provide additional details about the trace, such as operation inputs. +func New(name string, fields ...Field) *Trace { + return &Trace{name: name, startTime: time.Now(), fields: fields} } -// Step adds a new step with a specific message. Call this at the end of an -// execution step to record how long it took. -func (t *Trace) Step(msg string) { +// Step adds a new step with a specific message. Call this at the end of an execution step to record +// how long it took. The Fields add key value pairs to provide additional details about the trace +// step. +func (t *Trace) Step(msg string, fields ...Field) { if t.steps == nil { // traces almost always have less than 6 steps, do this to avoid more than a single allocation t.steps = make([]traceStep, 0, 6) } - t.steps = append(t.steps, traceStep{time.Now(), msg}) + t.steps = append(t.steps, traceStep{stepTime: time.Now(), msg: msg, fields: fields}) } // Log is used to dump all the steps in the Trace @@ -65,12 +88,23 @@ func (t *Trace) logWithStepThreshold(stepThreshold time.Duration) { endTime := time.Now() totalTime := endTime.Sub(t.startTime) - buffer.WriteString(fmt.Sprintf("Trace[%d]: %q (started: %v) (total time: %v):\n", tracenum, t.name, t.startTime, totalTime)) + buffer.WriteString(fmt.Sprintf("Trace[%d]: %q ", tracenum, t.name)) + if len(t.fields) > 0 { + writeFields(&buffer, t.fields) + buffer.WriteString(" ") + } + buffer.WriteString(fmt.Sprintf("(started: %v) (total time: %v):\n", t.startTime, totalTime)) lastStepTime := t.startTime for _, step := range t.steps { stepDuration := step.stepTime.Sub(lastStepTime) if stepThreshold == 0 || stepDuration > stepThreshold || klog.V(4) { - buffer.WriteString(fmt.Sprintf("Trace[%d]: [%v] [%v] %v\n", tracenum, step.stepTime.Sub(t.startTime), stepDuration, step.msg)) + buffer.WriteString(fmt.Sprintf("Trace[%d]: [%v] [%v] ", tracenum, step.stepTime.Sub(t.startTime), stepDuration)) + buffer.WriteString(step.msg) + if len(step.fields) > 0 { + buffer.WriteString(" ") + writeFields(&buffer, step.fields) + } + buffer.WriteString("\n") } lastStepTime = step.stepTime } diff --git a/vendor/modules.txt b/vendor/modules.txt index 8543fc08e..a23f265dc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,26 +1,26 @@ -# cloud.google.com/go v0.43.0 +# cloud.google.com/go v0.50.0 cloud.google.com/go/compute/metadata # github.com/BurntSushi/toml v0.3.1 github.com/BurntSushi/toml # github.com/Masterminds/goutils v1.1.0 github.com/Masterminds/goutils -# github.com/Masterminds/semver v1.4.2 +# github.com/Masterminds/semver v1.5.0 github.com/Masterminds/semver -# github.com/Masterminds/sprig v2.20.0+incompatible +# github.com/Masterminds/sprig v2.22.0+incompatible github.com/Masterminds/sprig -# github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 -github.com/appscode/jsonpatch # github.com/beorn7/perks v1.0.1 github.com/beorn7/perks/quantile +# github.com/cespare/xxhash/v2 v2.1.1 +github.com/cespare/xxhash/v2 # github.com/cyphar/filepath-securejoin v0.2.2 github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew # github.com/evanphx/json-patch v4.5.0+incompatible github.com/evanphx/json-patch -# github.com/gardener/gardener v0.0.0-20191001105106-32539124646f -github.com/gardener/gardener/pkg/utils/imagevector +# github.com/gardener/gardener v0.33.6 github.com/gardener/gardener/pkg/utils +github.com/gardener/gardener/pkg/utils/imagevector # github.com/ghodss/yaml v1.0.0 github.com/ghodss/yaml # github.com/go-logr/logr v0.1.0 @@ -30,28 +30,34 @@ github.com/go-logr/zapr # github.com/gobwas/glob v0.2.3 github.com/gobwas/glob github.com/gobwas/glob/compiler -github.com/gobwas/glob/syntax github.com/gobwas/glob/match +github.com/gobwas/glob/syntax github.com/gobwas/glob/syntax/ast -github.com/gobwas/glob/util/runes github.com/gobwas/glob/syntax/lexer +github.com/gobwas/glob/util/runes github.com/gobwas/glob/util/strings -# github.com/gogo/protobuf v1.2.1 +# github.com/gogo/protobuf v1.3.1 github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys -# github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 +# github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 github.com/golang/groupcache/lru # github.com/golang/protobuf v1.3.2 github.com/golang/protobuf/proto -github.com/golang/protobuf/ptypes/any -github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes +github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration +github.com/golang/protobuf/ptypes/timestamp +# github.com/google/go-cmp v0.3.1 +github.com/google/go-cmp/cmp +github.com/google/go-cmp/cmp/internal/diff +github.com/google/go-cmp/cmp/internal/flags +github.com/google/go-cmp/cmp/internal/function +github.com/google/go-cmp/cmp/internal/value # github.com/google/gofuzz v1.0.0 github.com/google/gofuzz # github.com/google/uuid v1.1.1 github.com/google/uuid -# github.com/googleapis/gnostic v0.3.0 +# github.com/googleapis/gnostic v0.3.1 github.com/googleapis/gnostic/OpenAPIv2 github.com/googleapis/gnostic/compiler github.com/googleapis/gnostic/extensions @@ -68,155 +74,210 @@ github.com/hpcloud/tail/ratelimiter github.com/hpcloud/tail/util github.com/hpcloud/tail/watch github.com/hpcloud/tail/winfile -# github.com/huandu/xstrings v1.2.0 +# github.com/huandu/xstrings v1.2.1 github.com/huandu/xstrings -# github.com/imdario/mergo v0.3.7 +# github.com/imdario/mergo v0.3.8 github.com/imdario/mergo -# github.com/json-iterator/go v1.1.7 +# github.com/json-iterator/go v1.1.9 github.com/json-iterator/go # github.com/konsorten/go-windows-terminal-sequences v1.0.2 github.com/konsorten/go-windows-terminal-sequences # github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/matttproud/golang_protobuf_extensions/pbutil +# github.com/mitchellh/copystructure v1.0.0 +github.com/mitchellh/copystructure +# github.com/mitchellh/reflectwalk v1.0.1 +github.com/mitchellh/reflectwalk # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.1 github.com/modern-go/reflect2 -# github.com/onsi/ginkgo v1.10.3 +# github.com/onsi/ginkgo v1.8.0 github.com/onsi/ginkgo github.com/onsi/ginkgo/config github.com/onsi/ginkgo/internal/codelocation +github.com/onsi/ginkgo/internal/containernode github.com/onsi/ginkgo/internal/failer +github.com/onsi/ginkgo/internal/leafnodes github.com/onsi/ginkgo/internal/remote +github.com/onsi/ginkgo/internal/spec +github.com/onsi/ginkgo/internal/spec_iterator +github.com/onsi/ginkgo/internal/specrunner github.com/onsi/ginkgo/internal/suite github.com/onsi/ginkgo/internal/testingtproxy github.com/onsi/ginkgo/internal/writer github.com/onsi/ginkgo/reporters github.com/onsi/ginkgo/reporters/stenographer github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable -github.com/onsi/ginkgo/types -github.com/onsi/ginkgo/internal/spec_iterator -github.com/onsi/ginkgo/internal/containernode -github.com/onsi/ginkgo/internal/leafnodes -github.com/onsi/ginkgo/internal/spec -github.com/onsi/ginkgo/internal/specrunner github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty +github.com/onsi/ginkgo/types # github.com/onsi/gomega v1.5.0 github.com/onsi/gomega +github.com/onsi/gomega/format +github.com/onsi/gomega/gbytes +github.com/onsi/gomega/gexec github.com/onsi/gomega/internal/assertion github.com/onsi/gomega/internal/asyncassertion +github.com/onsi/gomega/internal/oraclematcher github.com/onsi/gomega/internal/testingtsupport github.com/onsi/gomega/matchers -github.com/onsi/gomega/types -github.com/onsi/gomega/internal/oraclematcher -github.com/onsi/gomega/format github.com/onsi/gomega/matchers/support/goraph/bipartitegraph github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util -github.com/onsi/gomega/gbytes -github.com/onsi/gomega/gexec -# github.com/pborman/uuid v1.2.0 -github.com/pborman/uuid +github.com/onsi/gomega/types # github.com/pkg/errors v0.8.1 github.com/pkg/errors -# github.com/prometheus/client_golang v1.1.0 -github.com/prometheus/client_golang/prometheus/promhttp +# github.com/prometheus/client_golang v1.3.0 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal -# github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 +github.com/prometheus/client_golang/prometheus/promhttp +# github.com/prometheus/client_model v0.1.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.6.0 +# github.com/prometheus/common v0.7.0 github.com/prometheus/common/expfmt -github.com/prometheus/common/model github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg -# github.com/prometheus/procfs v0.0.3 +github.com/prometheus/common/model +# github.com/prometheus/procfs v0.0.8 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs +github.com/prometheus/procfs/internal/util # github.com/sirupsen/logrus v1.4.2 github.com/sirupsen/logrus -# github.com/spf13/pflag v1.0.3 +# github.com/spf13/pflag v1.0.5 github.com/spf13/pflag -# go.uber.org/atomic v1.4.0 +# go.uber.org/atomic v1.5.1 go.uber.org/atomic -# go.uber.org/multierr v1.1.0 +# go.uber.org/multierr v1.4.0 go.uber.org/multierr -# go.uber.org/zap v1.10.0 +# go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee +go.uber.org/tools/update-license +# go.uber.org/zap v1.13.0 go.uber.org/zap go.uber.org/zap/buffer -go.uber.org/zap/zapcore go.uber.org/zap/internal/bufferpool go.uber.org/zap/internal/color go.uber.org/zap/internal/exit -# golang.org/x/crypto v0.0.0-20191128160524-b544559bb6d1 -golang.org/x/crypto/ssh/terminal -golang.org/x/crypto/scrypt +go.uber.org/zap/zapcore +# golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876 golang.org/x/crypto/pbkdf2 -# golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933 +golang.org/x/crypto/scrypt +golang.org/x/crypto/ssh/terminal +# golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f +golang.org/x/lint +golang.org/x/lint/golint +# golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 golang.org/x/net/context -golang.org/x/net/http2 -golang.org/x/net/http/httpguts -golang.org/x/net/http2/hpack -golang.org/x/net/idna golang.org/x/net/context/ctxhttp -golang.org/x/net/html/charset golang.org/x/net/html golang.org/x/net/html/atom -# golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 +golang.org/x/net/html/charset +golang.org/x/net/http/httpguts +golang.org/x/net/http2 +golang.org/x/net/http2/hpack +golang.org/x/net/idna +# golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 golang.org/x/oauth2 golang.org/x/oauth2/google golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9 +# golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8 golang.org/x/sys/unix golang.org/x/sys/windows # golang.org/x/text v0.3.2 -golang.org/x/text/secure/bidirule -golang.org/x/text/unicode/bidi -golang.org/x/text/unicode/norm golang.org/x/text/encoding golang.org/x/text/encoding/charmap golang.org/x/text/encoding/htmlindex -golang.org/x/text/transform -golang.org/x/text/encoding/internal/identifier golang.org/x/text/encoding/internal +golang.org/x/text/encoding/internal/identifier golang.org/x/text/encoding/japanese golang.org/x/text/encoding/korean golang.org/x/text/encoding/simplifiedchinese golang.org/x/text/encoding/traditionalchinese golang.org/x/text/encoding/unicode -golang.org/x/text/language -golang.org/x/text/internal/utf8internal -golang.org/x/text/runes golang.org/x/text/internal/language golang.org/x/text/internal/language/compact golang.org/x/text/internal/tag -# golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 +golang.org/x/text/internal/utf8internal +golang.org/x/text/language +golang.org/x/text/runes +golang.org/x/text/secure/bidirule +golang.org/x/text/transform +golang.org/x/text/unicode/bidi +golang.org/x/text/unicode/norm +# golang.org/x/time v0.0.0-20191024005414-555d28b269f0 golang.org/x/time/rate -# google.golang.org/appengine v1.6.1 +# golang.org/x/tools v0.0.0-20191230220329-2aa90c603ae3 +golang.org/x/tools/go/analysis +golang.org/x/tools/go/analysis/passes/inspect +golang.org/x/tools/go/ast/astutil +golang.org/x/tools/go/ast/inspector +golang.org/x/tools/go/buildutil +golang.org/x/tools/go/gcexportdata +golang.org/x/tools/go/internal/gcimporter +golang.org/x/tools/go/internal/packagesdriver +golang.org/x/tools/go/packages +golang.org/x/tools/go/types/objectpath +golang.org/x/tools/go/types/typeutil +golang.org/x/tools/internal/fastwalk +golang.org/x/tools/internal/gopathwalk +golang.org/x/tools/internal/semver +# golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 +golang.org/x/xerrors +golang.org/x/xerrors/internal +# gomodules.xyz/jsonpatch/v2 v2.0.1 +gomodules.xyz/jsonpatch/v2 +# google.golang.org/appengine v1.6.5 google.golang.org/appengine -google.golang.org/appengine/urlfetch google.golang.org/appengine/internal google.golang.org/appengine/internal/app_identity -google.golang.org/appengine/internal/modules -google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/internal/base google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log +google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api +google.golang.org/appengine/internal/urlfetch +google.golang.org/appengine/urlfetch # gopkg.in/fsnotify.v1 v1.4.7 gopkg.in/fsnotify.v1 # gopkg.in/inf.v0 v0.9.1 gopkg.in/inf.v0 # gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 gopkg.in/tomb.v1 -# gopkg.in/yaml.v2 v2.2.2 +# gopkg.in/yaml.v2 v2.2.7 gopkg.in/yaml.v2 -# k8s.io/api v0.0.0-20190826194732-9f642ccb7a30 => k8s.io/api v0.0.0-20190313235455-40a48860b5ab -k8s.io/api/core/v1 -k8s.io/api/apps/v1 +# honnef.co/go/tools v0.0.1-2019.2.3 +honnef.co/go/tools/arg +honnef.co/go/tools/cmd/staticcheck +honnef.co/go/tools/config +honnef.co/go/tools/deprecated +honnef.co/go/tools/facts +honnef.co/go/tools/functions +honnef.co/go/tools/go/types/typeutil +honnef.co/go/tools/internal/cache +honnef.co/go/tools/internal/passes/buildssa +honnef.co/go/tools/internal/renameio +honnef.co/go/tools/internal/sharedcheck +honnef.co/go/tools/lint +honnef.co/go/tools/lint/lintdsl +honnef.co/go/tools/lint/lintutil +honnef.co/go/tools/lint/lintutil/format +honnef.co/go/tools/loader +honnef.co/go/tools/printf +honnef.co/go/tools/simple +honnef.co/go/tools/ssa +honnef.co/go/tools/ssautil +honnef.co/go/tools/staticcheck +honnef.co/go/tools/staticcheck/vrp +honnef.co/go/tools/stylecheck +honnef.co/go/tools/unused +honnef.co/go/tools/version +# k8s.io/api v0.17.0 => k8s.io/api v0.0.0-20190918155943-95b840bb6a1f +k8s.io/api/admission/v1beta1 +k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1beta1 +k8s.io/api/apps/v1 k8s.io/api/apps/v1beta1 k8s.io/api/apps/v1beta2 k8s.io/api/auditregistration/v1alpha1 @@ -233,6 +294,8 @@ k8s.io/api/batch/v2alpha1 k8s.io/api/certificates/v1beta1 k8s.io/api/coordination/v1 k8s.io/api/coordination/v1beta1 +k8s.io/api/core/v1 +k8s.io/api/discovery/v1alpha1 k8s.io/api/events/v1beta1 k8s.io/api/extensions/v1beta1 k8s.io/api/networking/v1 @@ -250,81 +313,67 @@ k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -k8s.io/api/admission/v1beta1 -# k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 => k8s.io/apiextensions-apiserver v0.0.0-20190315093550-53c4693659ed +# k8s.io/apiextensions-apiserver v0.17.0 => k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783 +k8s.io/apiextensions-apiserver/pkg/apis/apiextensions +k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset -k8s.io/apiextensions-apiserver/pkg/apis/apiextensions -k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme -# k8s.io/apimachinery v0.0.0-20190827074644-f378a67c6af3 => k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1 -k8s.io/apimachinery/pkg/runtime -k8s.io/apimachinery/pkg/api/resource -k8s.io/apimachinery/pkg/apis/meta/v1 -k8s.io/apimachinery/pkg/runtime/schema +k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 +k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 +# k8s.io/apimachinery v0.17.0 => k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655 +k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors -k8s.io/apimachinery/pkg/labels -k8s.io/apimachinery/pkg/types -k8s.io/apimachinery/pkg/util/errors -k8s.io/apimachinery/pkg/util/sets -k8s.io/apimachinery/pkg/util/yaml k8s.io/apimachinery/pkg/api/meta +k8s.io/apimachinery/pkg/api/resource +k8s.io/apimachinery/pkg/apis/meta/internalversion +k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/apis/meta/v1/unstructured +k8s.io/apimachinery/pkg/apis/meta/v1beta1 k8s.io/apimachinery/pkg/conversion k8s.io/apimachinery/pkg/conversion/queryparams -k8s.io/apimachinery/pkg/util/json -k8s.io/apimachinery/pkg/util/naming -k8s.io/apimachinery/pkg/util/runtime -k8s.io/apimachinery/pkg/runtime/serializer -k8s.io/apimachinery/pkg/util/net -k8s.io/apimachinery/pkg/util/intstr k8s.io/apimachinery/pkg/fields -k8s.io/apimachinery/pkg/selection -k8s.io/apimachinery/pkg/watch -k8s.io/apimachinery/pkg/util/validation/field -k8s.io/apimachinery/pkg/util/validation -k8s.io/apimachinery/pkg/runtime/serializer/streaming -k8s.io/apimachinery/pkg/util/wait -k8s.io/apimachinery/pkg/version -k8s.io/apimachinery/pkg/apis/meta/v1beta1 -k8s.io/apimachinery/third_party/forked/golang/reflect +k8s.io/apimachinery/pkg/labels +k8s.io/apimachinery/pkg/runtime +k8s.io/apimachinery/pkg/runtime/schema +k8s.io/apimachinery/pkg/runtime/serializer k8s.io/apimachinery/pkg/runtime/serializer/json k8s.io/apimachinery/pkg/runtime/serializer/protobuf k8s.io/apimachinery/pkg/runtime/serializer/recognizer +k8s.io/apimachinery/pkg/runtime/serializer/streaming k8s.io/apimachinery/pkg/runtime/serializer/versioning -k8s.io/apimachinery/pkg/util/rand +k8s.io/apimachinery/pkg/selection +k8s.io/apimachinery/pkg/types +k8s.io/apimachinery/pkg/util/cache k8s.io/apimachinery/pkg/util/clock -k8s.io/apimachinery/pkg/util/strategicpatch -k8s.io/apimachinery/pkg/util/uuid +k8s.io/apimachinery/pkg/util/diff +k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/util/framer +k8s.io/apimachinery/pkg/util/intstr +k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/util/mergepatch +k8s.io/apimachinery/pkg/util/naming +k8s.io/apimachinery/pkg/util/net +k8s.io/apimachinery/pkg/util/rand +k8s.io/apimachinery/pkg/util/runtime +k8s.io/apimachinery/pkg/util/sets +k8s.io/apimachinery/pkg/util/strategicpatch +k8s.io/apimachinery/pkg/util/uuid +k8s.io/apimachinery/pkg/util/validation +k8s.io/apimachinery/pkg/util/validation/field +k8s.io/apimachinery/pkg/util/wait +k8s.io/apimachinery/pkg/util/yaml +k8s.io/apimachinery/pkg/version +k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json -k8s.io/apimachinery/pkg/util/cache -k8s.io/apimachinery/pkg/util/diff -k8s.io/apimachinery/pkg/apis/meta/internalversion -# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => k8s.io/client-go v11.0.0+incompatible -k8s.io/client-go/kubernetes/scheme -k8s.io/client-go/plugin/pkg/client/auth/gcp -k8s.io/client-go/rest -k8s.io/client-go/util/retry +k8s.io/apimachinery/third_party/forked/golang/reflect +# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90 k8s.io/client-go/discovery k8s.io/client-go/discovery/cached/memory -k8s.io/client-go/kubernetes -k8s.io/client-go/restmapper -k8s.io/client-go/tools/clientcmd -k8s.io/client-go/util/jsonpath -k8s.io/client-go/pkg/version -k8s.io/client-go/plugin/pkg/client/auth/exec -k8s.io/client-go/rest/watch -k8s.io/client-go/tools/clientcmd/api -k8s.io/client-go/tools/metrics -k8s.io/client-go/transport -k8s.io/client-go/util/cert -k8s.io/client-go/util/flowcontrol k8s.io/client-go/dynamic -k8s.io/client-go/tools/leaderelection -k8s.io/client-go/tools/leaderelection/resourcelock -k8s.io/client-go/tools/record +k8s.io/client-go/kubernetes +k8s.io/client-go/kubernetes/scheme +k8s.io/client-go/kubernetes/typed/admissionregistration/v1 k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 k8s.io/client-go/kubernetes/typed/apps/v1 k8s.io/client-go/kubernetes/typed/apps/v1beta1 @@ -344,6 +393,7 @@ k8s.io/client-go/kubernetes/typed/certificates/v1beta1 k8s.io/client-go/kubernetes/typed/coordination/v1 k8s.io/client-go/kubernetes/typed/coordination/v1beta1 k8s.io/client-go/kubernetes/typed/core/v1 +k8s.io/client-go/kubernetes/typed/discovery/v1alpha1 k8s.io/client-go/kubernetes/typed/events/v1beta1 k8s.io/client-go/kubernetes/typed/extensions/v1beta1 k8s.io/client-go/kubernetes/typed/networking/v1 @@ -361,80 +411,98 @@ k8s.io/client-go/kubernetes/typed/settings/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1 k8s.io/client-go/kubernetes/typed/storage/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1beta1 -k8s.io/client-go/tools/auth -k8s.io/client-go/tools/clientcmd/api/latest -k8s.io/client-go/util/homedir -k8s.io/client-go/third_party/forked/golang/template k8s.io/client-go/pkg/apis/clientauthentication k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 -k8s.io/client-go/util/connrotation -k8s.io/client-go/util/keyutil -k8s.io/client-go/tools/record/util -k8s.io/client-go/tools/reference +k8s.io/client-go/pkg/version +k8s.io/client-go/plugin/pkg/client/auth/exec +k8s.io/client-go/plugin/pkg/client/auth/gcp +k8s.io/client-go/rest +k8s.io/client-go/rest/watch +k8s.io/client-go/restmapper +k8s.io/client-go/third_party/forked/golang/template +k8s.io/client-go/tools/auth k8s.io/client-go/tools/cache -k8s.io/client-go/util/workqueue +k8s.io/client-go/tools/clientcmd +k8s.io/client-go/tools/clientcmd/api +k8s.io/client-go/tools/clientcmd/api/latest k8s.io/client-go/tools/clientcmd/api/v1 +k8s.io/client-go/tools/leaderelection +k8s.io/client-go/tools/leaderelection/resourcelock +k8s.io/client-go/tools/metrics k8s.io/client-go/tools/pager -# k8s.io/helm v2.14.3+incompatible => k8s.io/helm v2.13.1+incompatible +k8s.io/client-go/tools/record +k8s.io/client-go/tools/record/util +k8s.io/client-go/tools/reference +k8s.io/client-go/transport +k8s.io/client-go/util/cert +k8s.io/client-go/util/connrotation +k8s.io/client-go/util/flowcontrol +k8s.io/client-go/util/homedir +k8s.io/client-go/util/jsonpath +k8s.io/client-go/util/keyutil +k8s.io/client-go/util/retry +k8s.io/client-go/util/workqueue +# k8s.io/helm v2.16.1+incompatible => k8s.io/helm v2.13.1+incompatible k8s.io/helm/pkg/chartutil k8s.io/helm/pkg/engine +k8s.io/helm/pkg/ignore k8s.io/helm/pkg/manifest k8s.io/helm/pkg/proto/hapi/chart -k8s.io/helm/pkg/timeconv -k8s.io/helm/pkg/ignore +k8s.io/helm/pkg/proto/hapi/release k8s.io/helm/pkg/proto/hapi/version +k8s.io/helm/pkg/releaseutil k8s.io/helm/pkg/sympath +k8s.io/helm/pkg/timeconv k8s.io/helm/pkg/version -k8s.io/helm/pkg/releaseutil -k8s.io/helm/pkg/proto/hapi/release -# k8s.io/klog v0.3.3 => k8s.io/klog v0.1.0 +# k8s.io/klog v1.0.0 k8s.io/klog -# k8s.io/kube-openapi v0.0.0-20190722073852-5e22f3d471e6 => k8s.io/kube-openapi v0.0.0-20190320154901-5e45bb682580 +# k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a => k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf k8s.io/kube-openapi/pkg/util/proto -# k8s.io/utils v0.0.0-20190712204705-3dccf664f023 -k8s.io/utils/integer +# k8s.io/utils v0.0.0-20191218082557-f07c713de883 k8s.io/utils/buffer +k8s.io/utils/integer +k8s.io/utils/pointer k8s.io/utils/trace -# sigs.k8s.io/controller-runtime v0.2.0-beta.4 => sigs.k8s.io/controller-runtime v0.2.0-beta.2 +# sigs.k8s.io/controller-runtime v0.4.0 sigs.k8s.io/controller-runtime -sigs.k8s.io/controller-runtime/pkg/log/zap -sigs.k8s.io/controller-runtime/pkg/scheme -sigs.k8s.io/controller-runtime/pkg/client -sigs.k8s.io/controller-runtime/pkg/controller/controllerutil -sigs.k8s.io/controller-runtime/pkg/manager sigs.k8s.io/controller-runtime/pkg/builder +sigs.k8s.io/controller-runtime/pkg/cache +sigs.k8s.io/controller-runtime/pkg/cache/internal +sigs.k8s.io/controller-runtime/pkg/client +sigs.k8s.io/controller-runtime/pkg/client/apiutil sigs.k8s.io/controller-runtime/pkg/client/config -sigs.k8s.io/controller-runtime/pkg/log -sigs.k8s.io/controller-runtime/pkg/manager/signals -sigs.k8s.io/controller-runtime/pkg/reconcile +sigs.k8s.io/controller-runtime/pkg/controller +sigs.k8s.io/controller-runtime/pkg/controller/controllerutil +sigs.k8s.io/controller-runtime/pkg/conversion sigs.k8s.io/controller-runtime/pkg/envtest -sigs.k8s.io/controller-runtime/pkg/client/apiutil -sigs.k8s.io/controller-runtime/pkg/cache +sigs.k8s.io/controller-runtime/pkg/envtest/printer +sigs.k8s.io/controller-runtime/pkg/event +sigs.k8s.io/controller-runtime/pkg/handler +sigs.k8s.io/controller-runtime/pkg/healthz +sigs.k8s.io/controller-runtime/pkg/internal/controller +sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics sigs.k8s.io/controller-runtime/pkg/internal/log sigs.k8s.io/controller-runtime/pkg/internal/recorder sigs.k8s.io/controller-runtime/pkg/leaderelection +sigs.k8s.io/controller-runtime/pkg/log +sigs.k8s.io/controller-runtime/pkg/log/zap +sigs.k8s.io/controller-runtime/pkg/manager +sigs.k8s.io/controller-runtime/pkg/manager/signals sigs.k8s.io/controller-runtime/pkg/metrics +sigs.k8s.io/controller-runtime/pkg/predicate +sigs.k8s.io/controller-runtime/pkg/reconcile sigs.k8s.io/controller-runtime/pkg/recorder sigs.k8s.io/controller-runtime/pkg/runtime/inject -sigs.k8s.io/controller-runtime/pkg/webhook -sigs.k8s.io/controller-runtime/pkg/webhook/conversion -sigs.k8s.io/controller-runtime/pkg/controller -sigs.k8s.io/controller-runtime/pkg/handler -sigs.k8s.io/controller-runtime/pkg/predicate +sigs.k8s.io/controller-runtime/pkg/scheme sigs.k8s.io/controller-runtime/pkg/source +sigs.k8s.io/controller-runtime/pkg/source/internal +sigs.k8s.io/controller-runtime/pkg/webhook sigs.k8s.io/controller-runtime/pkg/webhook/admission -sigs.k8s.io/controller-runtime/pkg/envtest/printer -sigs.k8s.io/controller-runtime/pkg/cache/internal +sigs.k8s.io/controller-runtime/pkg/webhook/conversion sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics -sigs.k8s.io/controller-runtime/pkg/conversion -sigs.k8s.io/controller-runtime/pkg/internal/controller -sigs.k8s.io/controller-runtime/pkg/event -sigs.k8s.io/controller-runtime/pkg/source/internal -sigs.k8s.io/controller-runtime/pkg/internal/objectutil -sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics -# sigs.k8s.io/testing_frameworks v0.1.1 +# sigs.k8s.io/testing_frameworks v0.1.2 sigs.k8s.io/testing_frameworks/integration sigs.k8s.io/testing_frameworks/integration/addr sigs.k8s.io/testing_frameworks/integration/internal diff --git a/vendor/sigs.k8s.io/controller-runtime/.gitignore b/vendor/sigs.k8s.io/controller-runtime/.gitignore index c25057f68..f749874ce 100644 --- a/vendor/sigs.k8s.io/controller-runtime/.gitignore +++ b/vendor/sigs.k8s.io/controller-runtime/.gitignore @@ -16,3 +16,6 @@ *.swp *.swo *~ + +# Vscode files +.vscode diff --git a/vendor/sigs.k8s.io/controller-runtime/.travis.yml b/vendor/sigs.k8s.io/controller-runtime/.travis.yml deleted file mode 100644 index 1562e222f..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/.travis.yml +++ /dev/null @@ -1,31 +0,0 @@ -language: go - -os: - - linux - - osx - -cache: - directories: - - $HOME/.cache/go-build - - $GOPATH/pkg/mod - -go: -- "1.12" - -git: - depth: 3 - -go_import_path: sigs.k8s.io/controller-runtime - -install: -- go get -u github.com/golang/dep/cmd/dep -#- go get -u golang.org/x/lint/golint -- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.15.0 - -script: -- GO111MODULE=on TRACE=1 ./hack/check-everything.sh - - -# TBD. Suppressing for now. -notifications: - email: false diff --git a/vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md b/vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md index 063016c2b..98617fa75 100644 --- a/vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md +++ b/vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md @@ -18,8 +18,7 @@ Please see https://git.k8s.io/community/CLA.md for more info 1. Setup tools ```bash - $ go get -u github.com/golang/dep/cmd/dep - $ curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.15.0 + $ curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.17.1 ``` 1. Test ```bash diff --git a/vendor/sigs.k8s.io/controller-runtime/FAQ.md b/vendor/sigs.k8s.io/controller-runtime/FAQ.md index ab926e7e3..cfc299792 100644 --- a/vendor/sigs.k8s.io/controller-runtime/FAQ.md +++ b/vendor/sigs.k8s.io/controller-runtime/FAQ.md @@ -5,7 +5,7 @@ **A**: Each controller should only reconcile one object type. Other affected objects should be mapped to a single type of root object, using the `EnqueueRequestForOwner` or `EnqueueRequestsFromMapFunc` event -handlers, and potentially indicies. Then, your Reconcile method should +handlers, and potentially indices. Then, your Reconcile method should attempt to reconcile *all* state for that given root objects. ### Q: How do I have different logic in my reconciler for different types of events (e.g. create, update, delete)? diff --git a/vendor/sigs.k8s.io/controller-runtime/Gopkg.lock b/vendor/sigs.k8s.io/controller-runtime/Gopkg.lock deleted file mode 100644 index dbd73bbbc..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/Gopkg.lock +++ /dev/null @@ -1,985 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - digest = "1:5c3894b2aa4d6bead0ceeea6831b305d62879c871780e7b76296ded1b004bc57" - name = "cloud.google.com/go" - packages = ["compute/metadata"] - pruneopts = "UT" - revision = "64a2037ec6be8a4b0c1d1f706ed35b428b989239" - version = "v0.26.0" - -[[projects]] - digest = "1:327b9226c8ea5f1cd9952ba859bb7c335cab40fd8781c4a790ef259b0c5fbc40" - name = "github.com/Azure/go-autorest" - packages = [ - "autorest", - "autorest/adal", - "autorest/azure", - "autorest/date", - "logger", - "version", - ] - pruneopts = "UT" - revision = "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf" - version = "v10.15.2" - -[[projects]] - digest = "1:d1665c44bd5db19aaee18d1b6233c99b0b9a986e8bccb24ef54747547a48027f" - name = "github.com/PuerkitoBio/purell" - packages = ["."] - pruneopts = "UT" - revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4" - version = "v1.1.0" - -[[projects]] - branch = "master" - digest = "1:c739832d67eb1e9cc478a19cc1a1ccd78df0397bf8a32978b759152e205f644b" - name = "github.com/PuerkitoBio/urlesc" - packages = ["."] - pruneopts = "UT" - revision = "de5bf2ad457846296e2031421a34e2568e304e35" - -[[projects]] - digest = "1:464aef731a5f82ded547c62e249a2e9ec59fbbc9ddab53cda7b9857852630a61" - name = "github.com/appscode/jsonpatch" - packages = ["."] - pruneopts = "UT" - revision = "7c0e3b262f30165a8ec3d0b4c6059fd92703bfb2" - version = "1.0.0" - -[[projects]] - branch = "master" - digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" - name = "github.com/beorn7/perks" - packages = ["quantile"] - pruneopts = "UT" - revision = "3a771d992973f24aa725d07868b467d1ddfceafb" - -[[projects]] - digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "UT" - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" - -[[projects]] - digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55" - name = "github.com/dgrijalva/jwt-go" - packages = ["."] - pruneopts = "UT" - revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" - version = "v3.2.0" - -[[projects]] - digest = "1:899234af23e5793c34e06fd397f86ba33af5307b959b6a7afd19b63db065a9d7" - name = "github.com/emicklei/go-restful" - packages = [ - ".", - "log", - ] - pruneopts = "UT" - revision = "3eb9738c1697594ea6e71a7156a9bb32ed216cf0" - version = "v2.8.0" - -[[projects]] - digest = "1:f1f2bd73c025d24c3b93abf6364bccb802cf2fdedaa44360804c67800e8fab8d" - name = "github.com/evanphx/json-patch" - packages = ["."] - pruneopts = "UT" - revision = "72bf35d0ff611848c1dc9df0f976c81192392fa5" - version = "v4.1.0" - -[[projects]] - branch = "master" - digest = "1:99f994f4c2f71aeea1810d6061f3b52f3d2ee0084cfcb1ebbf40e00a0d0666f6" - name = "github.com/go-logr/logr" - packages = [ - ".", - "testing", - ] - pruneopts = "UT" - revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e" - -[[projects]] - digest = "1:ce43ad4015e7cdad3f0e8f2c8339439dd4470859a828d2a6988b0f713699e94a" - name = "github.com/go-logr/zapr" - packages = ["."] - pruneopts = "UT" - revision = "7536572e8d55209135cd5e7ccf7fce43dca217ab" - -[[projects]] - digest = "1:2997679181d901ac8aaf4330d11138ecf3974c6d3334995ff36f20cbd597daf8" - name = "github.com/go-openapi/jsonpointer" - packages = ["."] - pruneopts = "UT" - revision = "3a0015ad55fa9873f41605d3e8f28cd279c32ab2" - version = "0.16.0" - -[[projects]] - digest = "1:1ae3f233d75a731b164ca9feafd8ed646cbedf1784095876ed6988ce8aa88b1f" - name = "github.com/go-openapi/jsonreference" - packages = ["."] - pruneopts = "UT" - revision = "3fb327e6747da3043567ee86abd02bb6376b6be2" - version = "0.16.0" - -[[projects]] - digest = "1:3e5bdbd2a071c72c778c28fd7b5dfde95cdfbcef412f364377e031877205e418" - name = "github.com/go-openapi/spec" - packages = ["."] - pruneopts = "UT" - revision = "384415f06ee238aae1df5caad877de6ceac3a5c4" - version = "0.16.0" - -[[projects]] - digest = "1:c80984d4a9bb79539743aff5af91b595d84f513700150b0ed73c1697d1200d54" - name = "github.com/go-openapi/swag" - packages = ["."] - pruneopts = "UT" - revision = "becd2f08beafcca035645a8a101e0e3e18140458" - version = "0.16.0" - -[[projects]] - digest = "1:34e709f36fd4f868fb00dbaf8a6cab4c1ae685832d392874ba9d7c5dec2429d1" - name = "github.com/gogo/protobuf" - packages = [ - "proto", - "sortkeys", - ] - pruneopts = "UT" - revision = "636bf0302bc95575d69441b25a2603156ffdddf1" - version = "v1.1.1" - -[[projects]] - branch = "master" - digest = "1:3fb07f8e222402962fa190eb060608b34eddfb64562a18e2167df2de0ece85d8" - name = "github.com/golang/groupcache" - packages = ["lru"] - pruneopts = "UT" - revision = "24b0969c4cb722950103eed87108c8d291a8df00" - -[[projects]] - digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf" - name = "github.com/golang/protobuf" - packages = [ - "proto", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp", - ] - pruneopts = "UT" - revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" - version = "v1.2.0" - -[[projects]] - branch = "master" - digest = "1:3ee90c0d94da31b442dde97c99635aaafec68d0b8a3c12ee2075c6bdabeec6bb" - name = "github.com/google/gofuzz" - packages = ["."] - pruneopts = "UT" - revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" - -[[projects]] - digest = "1:65c4414eeb350c47b8de71110150d0ea8a281835b1f386eacaa3ad7325929c21" - name = "github.com/googleapis/gnostic" - packages = [ - "OpenAPIv2", - "compiler", - "extensions", - ] - pruneopts = "UT" - revision = "7c663266750e7d82587642f65e60bc4083f1f84e" - version = "v0.2.0" - -[[projects]] - branch = "master" - digest = "1:349090dc3e864fe83388be8e8949df6e22607c477644abcc3b530d1b2c7bb714" - name = "github.com/gophercloud/gophercloud" - packages = [ - ".", - "openstack", - "openstack/identity/v2/tenants", - "openstack/identity/v2/tokens", - "openstack/identity/v3/tokens", - "openstack/utils", - "pagination", - ] - pruneopts = "UT" - revision = "199db9a3a2bfa55802ab074ab8fed67fce2a2791" - -[[projects]] - branch = "master" - digest = "1:cf296baa185baae04a9a7004efee8511d08e2f5f51d4cbe5375da89722d681db" - name = "github.com/hashicorp/golang-lru" - packages = [ - ".", - "simplelru", - ] - pruneopts = "UT" - revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3" - -[[projects]] - digest = "1:a1038ef593beb4771c8f0f9c26e8b00410acd800af5c6864651d9bf160ea1813" - name = "github.com/hpcloud/tail" - packages = [ - ".", - "ratelimiter", - "util", - "watch", - "winfile", - ] - pruneopts = "UT" - revision = "a30252cb686a21eb2d0b98132633053ec2f7f1e5" - version = "v1.0.0" - -[[projects]] - digest = "1:8eb1de8112c9924d59bf1d3e5c26f5eaa2bfc2a5fcbb92dc1c2e4546d695f277" - name = "github.com/imdario/mergo" - packages = ["."] - pruneopts = "UT" - revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4" - version = "v0.3.6" - -[[projects]] - digest = "1:3e551bbb3a7c0ab2a2bf4660e7fcad16db089fdcfbb44b0199e62838038623ea" - name = "github.com/json-iterator/go" - packages = ["."] - pruneopts = "UT" - revision = "1624edc4454b8682399def8740d46db5e4362ba4" - version = "1.1.5" - -[[projects]] - branch = "master" - digest = "1:84a5a2b67486d5d67060ac393aa255d05d24ed5ee41daecd5635ec22657b6492" - name = "github.com/mailru/easyjson" - packages = [ - "buffer", - "jlexer", - "jwriter", - ] - pruneopts = "UT" - revision = "60711f1a8329503b04e1c88535f419d0bb440bff" - -[[projects]] - digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - pruneopts = "UT" - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - digest = "1:33422d238f147d247752996a26574ac48dcf472976eda7f5134015f06bf16563" - name = "github.com/modern-go/concurrent" - packages = ["."] - pruneopts = "UT" - revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" - version = "1.0.3" - -[[projects]] - digest = "1:e32bdbdb7c377a07a9a46378290059822efdce5c8d96fe71940d87cb4f918855" - name = "github.com/modern-go/reflect2" - packages = ["."] - pruneopts = "UT" - revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" - version = "1.0.1" - -[[projects]] - digest = "1:42e29deef12327a69123b9cb2cb45fee4af5c12c2a23c6e477338279a052703f" - name = "github.com/onsi/ginkgo" - packages = [ - ".", - "config", - "internal/codelocation", - "internal/containernode", - "internal/failer", - "internal/leafnodes", - "internal/remote", - "internal/spec", - "internal/spec_iterator", - "internal/specrunner", - "internal/suite", - "internal/testingtproxy", - "internal/writer", - "reporters", - "reporters/stenographer", - "reporters/stenographer/support/go-colorable", - "reporters/stenographer/support/go-isatty", - "types", - ] - pruneopts = "UT" - revision = "3774a09d95489ccaa16032e0770d08ea77ba6184" - version = "v1.6.0" - -[[projects]] - digest = "1:58418daa018f162c520512737c19ecd42582524b0f900f2eee7c212ce55cf0da" - name = "github.com/onsi/gomega" - packages = [ - ".", - "format", - "gbytes", - "gexec", - "internal/assertion", - "internal/asyncassertion", - "internal/oraclematcher", - "internal/testingtsupport", - "matchers", - "matchers/support/goraph/bipartitegraph", - "matchers/support/goraph/edge", - "matchers/support/goraph/node", - "matchers/support/goraph/util", - "types", - ] - pruneopts = "UT" - revision = "b6ea1ea48f981d0f615a154a45eabb9dd466556d" - version = "v1.4.1" - -[[projects]] - digest = "1:361de06aa7ae272616cbe71c3994a654cc6316324e30998e650f7765b20c5b33" - name = "github.com/pborman/uuid" - packages = ["."] - pruneopts = "UT" - revision = "e790cca94e6cc75c7064b1332e63811d4aae1a53" - version = "v1.1" - -[[projects]] - digest = "1:20a8a18488bdef471795af0413d9a3c9c35dc24ca93342329b884ba3c15cbaab" - name = "github.com/prometheus/client_golang" - packages = [ - "prometheus", - "prometheus/internal", - "prometheus/promhttp", - ] - pruneopts = "UT" - revision = "1cafe34db7fdec6022e17e00e1c1ea501022f3e4" - version = "v0.9.0" - -[[projects]] - branch = "master" - digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" - name = "github.com/prometheus/client_model" - packages = ["go"] - pruneopts = "UT" - revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" - -[[projects]] - branch = "master" - digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "model", - ] - pruneopts = "UT" - revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" - -[[projects]] - branch = "master" - digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "internal/util", - "nfs", - "xfs", - ] - pruneopts = "UT" - revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92" - -[[projects]] - digest = "1:dab83a1bbc7ad3d7a6ba1a1cc1760f25ac38cdf7d96a5cdd55cd915a4f5ceaf9" - name = "github.com/spf13/pflag" - packages = ["."] - pruneopts = "UT" - revision = "9a97c102cda95a86cec2345a6f09f55a939babf5" - version = "v1.0.2" - -[[projects]] - digest = "1:3c1a69cdae3501bf75e76d0d86dc6f2b0a7421bc205c0cb7b96b19eed464a34d" - name = "go.uber.org/atomic" - packages = ["."] - pruneopts = "UT" - revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289" - version = "v1.3.2" - -[[projects]] - digest = "1:60bf2a5e347af463c42ed31a493d817f8a72f102543060ed992754e689805d1a" - name = "go.uber.org/multierr" - packages = ["."] - pruneopts = "UT" - revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" - version = "v1.1.0" - -[[projects]] - digest = "1:c52caf7bd44f92e54627a31b85baf06a68333a196b3d8d241480a774733dcf8b" - name = "go.uber.org/zap" - packages = [ - ".", - "buffer", - "internal/bufferpool", - "internal/color", - "internal/exit", - "zapcore", - ] - pruneopts = "UT" - revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982" - version = "v1.9.1" - -[[projects]] - branch = "master" - digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8" - name = "golang.org/x/crypto" - packages = ["ssh/terminal"] - pruneopts = "UT" - revision = "614d502a4dac94afa3a6ce146bd1736da82514c6" - -[[projects]] - branch = "master" - digest = "1:ff58bc124488348ba2ca02f62de899d07ba3e7dce51da02557bfc9a8052608db" - name = "golang.org/x/net" - packages = [ - "context", - "context/ctxhttp", - "html", - "html/atom", - "html/charset", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - ] - pruneopts = "UT" - revision = "922f4815f713f213882e8ef45e0d315b164d705c" - -[[projects]] - branch = "master" - digest = "1:f645667d687fc8bf228865a2c5455824ef05bad08841e673673ef2bb89ac5b90" - name = "golang.org/x/oauth2" - packages = [ - ".", - "google", - "internal", - "jws", - "jwt", - ] - pruneopts = "UT" - revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9" - -[[projects]] - branch = "master" - digest = "1:7710ea76bd73d3154e86eaf2db8b36a6dffbd6114e4e37b516f1d232c03ddd8d" - name = "golang.org/x/sys" - packages = [ - "unix", - "windows", - ] - pruneopts = "UT" - revision = "11551d06cbcc94edc80a0facaccbda56473c19c1" - -[[projects]] - digest = "1:bb8277a2ca2bcad6ff7f413b939375924099be908cedd1314baa21ecd08df477" - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "encoding", - "encoding/charmap", - "encoding/htmlindex", - "encoding/internal", - "encoding/internal/identifier", - "encoding/japanese", - "encoding/korean", - "encoding/simplifiedchinese", - "encoding/traditionalchinese", - "encoding/unicode", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "internal/utf8internal", - "language", - "runes", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - "width", - ] - pruneopts = "UT" - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" - -[[projects]] - branch = "master" - digest = "1:c9e7a4b4d47c0ed205d257648b0e5b0440880cb728506e318f8ac7cd36270bc4" - name = "golang.org/x/time" - packages = ["rate"] - pruneopts = "UT" - revision = "fbb02b2291d28baffd63558aa44b4b56f178d650" - -[[projects]] - digest = "1:c8907869850adaa8bd7631887948d0684f3787d0912f1c01ab72581a6c34432e" - name = "google.golang.org/appengine" - packages = [ - ".", - "internal", - "internal/app_identity", - "internal/base", - "internal/datastore", - "internal/log", - "internal/modules", - "internal/remote_api", - "internal/urlfetch", - "urlfetch", - ] - pruneopts = "UT" - revision = "b1f26356af11148e710935ed1ac8a7f5702c7612" - version = "v1.1.0" - -[[projects]] - digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd" - name = "gopkg.in/fsnotify.v1" - packages = ["."] - pruneopts = "UT" - revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" - source = "https://github.com/fsnotify/fsnotify.git" - version = "v1.4.7" - -[[projects]] - digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" - name = "gopkg.in/inf.v0" - packages = ["."] - pruneopts = "UT" - revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" - version = "v0.9.1" - -[[projects]] - branch = "v1" - digest = "1:0caa92e17bc0b65a98c63e5bc76a9e844cd5e56493f8fdbb28fad101a16254d9" - name = "gopkg.in/tomb.v1" - packages = ["."] - pruneopts = "UT" - revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8" - -[[projects]] - digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202" - name = "gopkg.in/yaml.v2" - packages = ["."] - pruneopts = "UT" - revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" - version = "v2.2.1" - -[[projects]] - digest = "1:baf5a7da864b8f493ff372a9486c2d6a7fae8363699af2dc0ef6dba9a869928f" - name = "k8s.io/api" - packages = [ - "admission/v1beta1", - "admissionregistration/v1beta1", - "apps/v1", - "apps/v1beta1", - "apps/v1beta2", - "auditregistration/v1alpha1", - "authentication/v1", - "authentication/v1beta1", - "authorization/v1", - "authorization/v1beta1", - "autoscaling/v1", - "autoscaling/v2beta1", - "autoscaling/v2beta2", - "batch/v1", - "batch/v1beta1", - "batch/v2alpha1", - "certificates/v1beta1", - "coordination/v1", - "coordination/v1beta1", - "core/v1", - "events/v1beta1", - "extensions/v1beta1", - "networking/v1", - "networking/v1beta1", - "node/v1alpha1", - "node/v1beta1", - "policy/v1beta1", - "rbac/v1", - "rbac/v1alpha1", - "rbac/v1beta1", - "scheduling/v1", - "scheduling/v1alpha1", - "scheduling/v1beta1", - "settings/v1alpha1", - "storage/v1", - "storage/v1alpha1", - "storage/v1beta1", - ] - pruneopts = "UT" - revision = "6e4e0e4f393bf5e8bbff570acd13217aa5a770cd" - version = "kubernetes-1.14.1" - -[[projects]] - digest = "1:65226935ef0a84716e327aa46c5a11cf2158ca6d0c79b379202d0b2c2be0e9bc" - name = "k8s.io/apiextensions-apiserver" - packages = [ - "pkg/apis/apiextensions", - "pkg/apis/apiextensions/v1beta1", - "pkg/client/clientset/clientset", - "pkg/client/clientset/clientset/scheme", - "pkg/client/clientset/clientset/typed/apiextensions/v1beta1", - ] - pruneopts = "UT" - revision = "727a075fdec8319bf095330e344b3ccc668abc73" - version = "kubernetes-1.14.1" - -[[projects]] - digest = "1:2616411ee46c05b1488f127c171e9204695d15d1e654fe79ea2d204f96f28070" - name = "k8s.io/apimachinery" - packages = [ - "pkg/api/errors", - "pkg/api/meta", - "pkg/api/resource", - "pkg/apis/meta/internalversion", - "pkg/apis/meta/v1", - "pkg/apis/meta/v1/unstructured", - "pkg/apis/meta/v1beta1", - "pkg/conversion", - "pkg/conversion/queryparams", - "pkg/fields", - "pkg/labels", - "pkg/runtime", - "pkg/runtime/schema", - "pkg/runtime/serializer", - "pkg/runtime/serializer/json", - "pkg/runtime/serializer/protobuf", - "pkg/runtime/serializer/recognizer", - "pkg/runtime/serializer/streaming", - "pkg/runtime/serializer/versioning", - "pkg/selection", - "pkg/types", - "pkg/util/cache", - "pkg/util/clock", - "pkg/util/diff", - "pkg/util/errors", - "pkg/util/framer", - "pkg/util/intstr", - "pkg/util/json", - "pkg/util/mergepatch", - "pkg/util/naming", - "pkg/util/net", - "pkg/util/runtime", - "pkg/util/sets", - "pkg/util/strategicpatch", - "pkg/util/uuid", - "pkg/util/validation", - "pkg/util/validation/field", - "pkg/util/wait", - "pkg/util/yaml", - "pkg/version", - "pkg/watch", - "third_party/forked/golang/json", - "third_party/forked/golang/reflect", - ] - pruneopts = "UT" - revision = "6a84e37a896db9780c75367af8d2ed2bb944022e" - version = "kubernetes-1.14.1" - -[[projects]] - digest = "1:00f288473b016529887e152dad2465482cad64b233298d58f1526385505b27db" - name = "k8s.io/client-go" - packages = [ - "discovery", - "dynamic", - "informers", - "informers/admissionregistration", - "informers/admissionregistration/v1beta1", - "informers/apps", - "informers/apps/v1", - "informers/apps/v1beta1", - "informers/apps/v1beta2", - "informers/auditregistration", - "informers/auditregistration/v1alpha1", - "informers/autoscaling", - "informers/autoscaling/v1", - "informers/autoscaling/v2beta1", - "informers/autoscaling/v2beta2", - "informers/batch", - "informers/batch/v1", - "informers/batch/v1beta1", - "informers/batch/v2alpha1", - "informers/certificates", - "informers/certificates/v1beta1", - "informers/coordination", - "informers/coordination/v1", - "informers/coordination/v1beta1", - "informers/core", - "informers/core/v1", - "informers/events", - "informers/events/v1beta1", - "informers/extensions", - "informers/extensions/v1beta1", - "informers/internalinterfaces", - "informers/networking", - "informers/networking/v1", - "informers/networking/v1beta1", - "informers/node", - "informers/node/v1alpha1", - "informers/node/v1beta1", - "informers/policy", - "informers/policy/v1beta1", - "informers/rbac", - "informers/rbac/v1", - "informers/rbac/v1alpha1", - "informers/rbac/v1beta1", - "informers/scheduling", - "informers/scheduling/v1", - "informers/scheduling/v1alpha1", - "informers/scheduling/v1beta1", - "informers/settings", - "informers/settings/v1alpha1", - "informers/storage", - "informers/storage/v1", - "informers/storage/v1alpha1", - "informers/storage/v1beta1", - "kubernetes", - "kubernetes/scheme", - "kubernetes/typed/admissionregistration/v1beta1", - "kubernetes/typed/apps/v1", - "kubernetes/typed/apps/v1beta1", - "kubernetes/typed/apps/v1beta2", - "kubernetes/typed/auditregistration/v1alpha1", - "kubernetes/typed/authentication/v1", - "kubernetes/typed/authentication/v1beta1", - "kubernetes/typed/authorization/v1", - "kubernetes/typed/authorization/v1beta1", - "kubernetes/typed/autoscaling/v1", - "kubernetes/typed/autoscaling/v2beta1", - "kubernetes/typed/autoscaling/v2beta2", - "kubernetes/typed/batch/v1", - "kubernetes/typed/batch/v1beta1", - "kubernetes/typed/batch/v2alpha1", - "kubernetes/typed/certificates/v1beta1", - "kubernetes/typed/coordination/v1", - "kubernetes/typed/coordination/v1beta1", - "kubernetes/typed/core/v1", - "kubernetes/typed/events/v1beta1", - "kubernetes/typed/extensions/v1beta1", - "kubernetes/typed/networking/v1", - "kubernetes/typed/networking/v1beta1", - "kubernetes/typed/node/v1alpha1", - "kubernetes/typed/node/v1beta1", - "kubernetes/typed/policy/v1beta1", - "kubernetes/typed/rbac/v1", - "kubernetes/typed/rbac/v1alpha1", - "kubernetes/typed/rbac/v1beta1", - "kubernetes/typed/scheduling/v1", - "kubernetes/typed/scheduling/v1alpha1", - "kubernetes/typed/scheduling/v1beta1", - "kubernetes/typed/settings/v1alpha1", - "kubernetes/typed/storage/v1", - "kubernetes/typed/storage/v1alpha1", - "kubernetes/typed/storage/v1beta1", - "listers/admissionregistration/v1beta1", - "listers/apps/v1", - "listers/apps/v1beta1", - "listers/apps/v1beta2", - "listers/auditregistration/v1alpha1", - "listers/autoscaling/v1", - "listers/autoscaling/v2beta1", - "listers/autoscaling/v2beta2", - "listers/batch/v1", - "listers/batch/v1beta1", - "listers/batch/v2alpha1", - "listers/certificates/v1beta1", - "listers/coordination/v1", - "listers/coordination/v1beta1", - "listers/core/v1", - "listers/events/v1beta1", - "listers/extensions/v1beta1", - "listers/networking/v1", - "listers/networking/v1beta1", - "listers/node/v1alpha1", - "listers/node/v1beta1", - "listers/policy/v1beta1", - "listers/rbac/v1", - "listers/rbac/v1alpha1", - "listers/rbac/v1beta1", - "listers/scheduling/v1", - "listers/scheduling/v1alpha1", - "listers/scheduling/v1beta1", - "listers/settings/v1alpha1", - "listers/storage/v1", - "listers/storage/v1alpha1", - "listers/storage/v1beta1", - "pkg/apis/clientauthentication", - "pkg/apis/clientauthentication/v1alpha1", - "pkg/apis/clientauthentication/v1beta1", - "pkg/version", - "plugin/pkg/client/auth", - "plugin/pkg/client/auth/azure", - "plugin/pkg/client/auth/exec", - "plugin/pkg/client/auth/gcp", - "plugin/pkg/client/auth/oidc", - "plugin/pkg/client/auth/openstack", - "rest", - "rest/watch", - "restmapper", - "testing", - "third_party/forked/golang/template", - "tools/auth", - "tools/cache", - "tools/clientcmd", - "tools/clientcmd/api", - "tools/clientcmd/api/latest", - "tools/clientcmd/api/v1", - "tools/leaderelection", - "tools/leaderelection/resourcelock", - "tools/metrics", - "tools/pager", - "tools/record", - "tools/record/util", - "tools/reference", - "transport", - "util/cert", - "util/connrotation", - "util/flowcontrol", - "util/homedir", - "util/jsonpath", - "util/keyutil", - "util/retry", - "util/workqueue", - ] - pruneopts = "UT" - revision = "1a26190bd76a9017e289958b9fba936430aa3704" - version = "kubernetes-1.14.1" - -[[projects]] - digest = "1:e2999bf1bb6eddc2a6aa03fe5e6629120a53088926520ca3b4765f77d7ff7eab" - name = "k8s.io/klog" - packages = ["."] - pruneopts = "UT" - revision = "a5bc97fbc634d635061f3146511332c7e313a55a" - version = "v0.1.0" - -[[projects]] - branch = "master" - digest = "1:0dac10b488f2671c15738b662478d665b0c0ec7cae3362669dec172147331ce5" - name = "k8s.io/kube-openapi" - packages = [ - "pkg/common", - "pkg/util/proto", - ] - pruneopts = "UT" - revision = "e3762e86a74c878ffed47484592986685639c2cd" - -[[projects]] - branch = "master" - digest = "1:14e8a3b53e6d8cb5f44783056b71bb2ca1ac7e333939cc97f3e50b579c920845" - name = "k8s.io/utils" - packages = [ - "buffer", - "integer", - "trace", - ] - pruneopts = "UT" - revision = "21c4ce38f2a793ec01e925ddc31216500183b773" - -[[projects]] - digest = "1:9070222ca967d09b3966552a161dd4420d62315964bf5e1efd8cc4c7c30ebca8" - name = "sigs.k8s.io/testing_frameworks" - packages = [ - "integration", - "integration/addr", - "integration/internal", - ] - pruneopts = "UT" - revision = "d348cb12705b516376e0c323bacca72b00a78425" - version = "v0.1.1" - -[[projects]] - digest = "1:7719608fe0b52a4ece56c2dde37bedd95b938677d1ab0f84b8a7852e4c59f849" - name = "sigs.k8s.io/yaml" - packages = ["."] - pruneopts = "UT" - revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480" - version = "v1.1.0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/appscode/jsonpatch", - "github.com/emicklei/go-restful", - "github.com/evanphx/json-patch", - "github.com/go-logr/logr", - "github.com/go-logr/logr/testing", - "github.com/go-logr/zapr", - "github.com/go-openapi/spec", - "github.com/onsi/ginkgo", - "github.com/onsi/ginkgo/config", - "github.com/onsi/ginkgo/types", - "github.com/onsi/gomega", - "github.com/prometheus/client_golang/prometheus", - "github.com/prometheus/client_golang/prometheus/promhttp", - "github.com/prometheus/client_model/go", - "github.com/spf13/pflag", - "go.uber.org/zap", - "go.uber.org/zap/buffer", - "go.uber.org/zap/zapcore", - "gopkg.in/fsnotify.v1", - "k8s.io/api/admission/v1beta1", - "k8s.io/api/apps/v1", - "k8s.io/api/apps/v1beta1", - "k8s.io/api/autoscaling/v1", - "k8s.io/api/core/v1", - "k8s.io/api/extensions/v1beta1", - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", - "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset", - "k8s.io/apimachinery/pkg/api/errors", - "k8s.io/apimachinery/pkg/api/meta", - "k8s.io/apimachinery/pkg/apis/meta/v1", - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - "k8s.io/apimachinery/pkg/fields", - "k8s.io/apimachinery/pkg/labels", - "k8s.io/apimachinery/pkg/runtime", - "k8s.io/apimachinery/pkg/runtime/schema", - "k8s.io/apimachinery/pkg/runtime/serializer", - "k8s.io/apimachinery/pkg/selection", - "k8s.io/apimachinery/pkg/types", - "k8s.io/apimachinery/pkg/util/json", - "k8s.io/apimachinery/pkg/util/runtime", - "k8s.io/apimachinery/pkg/util/sets", - "k8s.io/apimachinery/pkg/util/uuid", - "k8s.io/apimachinery/pkg/util/wait", - "k8s.io/apimachinery/pkg/util/yaml", - "k8s.io/apimachinery/pkg/watch", - "k8s.io/client-go/discovery", - "k8s.io/client-go/dynamic", - "k8s.io/client-go/informers", - "k8s.io/client-go/kubernetes", - "k8s.io/client-go/kubernetes/scheme", - "k8s.io/client-go/kubernetes/typed/core/v1", - "k8s.io/client-go/plugin/pkg/client/auth", - "k8s.io/client-go/plugin/pkg/client/auth/gcp", - "k8s.io/client-go/rest", - "k8s.io/client-go/restmapper", - "k8s.io/client-go/testing", - "k8s.io/client-go/tools/cache", - "k8s.io/client-go/tools/clientcmd", - "k8s.io/client-go/tools/leaderelection", - "k8s.io/client-go/tools/leaderelection/resourcelock", - "k8s.io/client-go/tools/metrics", - "k8s.io/client-go/tools/record", - "k8s.io/client-go/tools/reference", - "k8s.io/client-go/util/workqueue", - "k8s.io/kube-openapi/pkg/common", - "sigs.k8s.io/testing_frameworks/integration", - "sigs.k8s.io/testing_frameworks/integration/addr", - "sigs.k8s.io/yaml", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/sigs.k8s.io/controller-runtime/Gopkg.toml b/vendor/sigs.k8s.io/controller-runtime/Gopkg.toml deleted file mode 100644 index 81598f106..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/Gopkg.toml +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright 2018 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Packages required by users -# NB(directxman12): be very careful how you word these -- -# dep considers them as a dependency on the package you list -# meaning that if there's a main.go in the root package -# (like in apiextensions-apiserver), you'll pull it's deps in. -required = ["sigs.k8s.io/testing_frameworks/integration", - "k8s.io/client-go/plugin/pkg/client/auth", - "github.com/spf13/pflag", - "github.com/emicklei/go-restful", - "github.com/go-openapi/spec", - "k8s.io/kube-openapi/pkg/common", - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", - "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset", - "github.com/prometheus/client_golang/prometheus", - ] - -[[constraint]] - name = "k8s.io/api" - version = "kubernetes-1.14.1" - -[[constraint]] - name = "k8s.io/apiextensions-apiserver" - version = "kubernetes-1.14.1" - -[[constraint]] - name = "k8s.io/apimachinery" - version = "kubernetes-1.14.1" - -[[constraint]] - name = "k8s.io/client-go" - version = "kubernetes-1.14.1" - -[[constraint]] - name = "sigs.k8s.io/testing_frameworks" - version = "v0.1.1" - -[[constraint]] - name = "github.com/onsi/ginkgo" - version = "v1.5.0" - -[[constraint]] - name = "github.com/onsi/gomega" - version = "v1.4.0" - -[[constraint]] - name = "sigs.k8s.io/yaml" - version = "1.1.0" - -[[constraint]] - name = "go.uber.org/zap" - version = "1.8.0" - -[[constraint]] - name = "github.com/prometheus/client_golang" - version = "0.9.0" - -# these are not listed explicitly until we get version tags, -# since dep doesn't like bare revision dependencies - -# [[constraint]] -# name = "github.com/go-logr/logr" -# -# [[constraint]] -# name = "github.com/go-logr/zapr" - -# For dependency below: Refer to issue https://github.com/golang/dep/issues/1799 -[[override]] -name = "gopkg.in/fsnotify.v1" -source = "https://github.com/fsnotify/fsnotify.git" -version="v1.4.7" - -[prune] - go-tests = true - unused-packages = true diff --git a/vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md b/vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md index 78da1951d..17860350c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md +++ b/vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md @@ -30,7 +30,7 @@ or even write func (r *Reconciler) Reconcile(req reconcile.Request) (reconcile.Response, error) { logger := logger.WithValues("pod", req.NamespacedName) // do some stuff - logger.Info("starting reconcilation") + logger.Info("starting reconciliation") } ``` @@ -49,7 +49,7 @@ provides some helpers to make it easy to use You can configure the logging implementation using `"sigs.k8s.io/controller-runtime/pkg/log".SetLogger`. That -package also contains the convinience functions for setting up Zap. +package also contains the convenience functions for setting up Zap. You can get a handle to the the "root" logger using `"sigs.k8s.io/controller-runtime/pkg/log".Log`, and can then call @@ -58,7 +58,7 @@ repeatedly to chain names together: ```go logger := log.Log.WithName("controller").WithName("replicaset") -// in reconile... +// in reconcile... logger = logger.WithValues("replicaset", req.NamespacedName) // later on in reconcile... logger.Info("doing things with pods", "pod", newPod) @@ -86,7 +86,7 @@ Errors should *always* be logged with `log.Error`, which allows logr implementations to provide special handling of errors (for instance, providing stack traces in debug mode). -It's acceptible to log call `log.Error` with a nil error object. This +It's acceptable to log call `log.Error` with a nil error object. This conveys that an error occurred in some capacity, but that no actual `error` object was involved. @@ -125,12 +125,12 @@ logic. ### Groups, Versions, and Kinds -- Kinds should not be logged alone (they're meanless alone). Use +- Kinds should not be logged alone (they're meaningless alone). Use a `GroupKind` object to log them instead, or a `GroupVersionKind` when version is relevant. - If you need to log an API version string, use `api version` as the key - (formatted as with a `GroupVersion`, or as recieved directly from API + (formatted as with a `GroupVersion`, or as received directly from API discovery). ### Objects and Types diff --git a/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md b/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md index 57038eff6..3638aae60 100644 --- a/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md +++ b/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md @@ -66,7 +66,7 @@ greatest code, including breaking changes, happens on master. The *release-X* branches contain stable, backwards compatible code. Every major (X) release, a new such branch is created. It is from these branches that minor and patch releases are tagged. If some cases, it may -be neccessary open PRs for bugfixes directly against stable branches, but +be necessary open PRs for bugfixes directly against stable branches, but this should generally not be the case. The maintainers are responsible for updating the contents of this branch; @@ -75,10 +75,10 @@ filters and checks for changes tagged as breaking (see below). ### Tooling -* [release-notes.sh](hack/release-notes.sh): generate release notes +* [release-notes.sh](hack/release/release-notes.sh): generate release notes for a range of commits, and check for next version type (***TODO***) -* [verify-commit-messages.sh](hack/verify-commit-messages.sh): check that +* [verify-emoji.sh](hack/release/verify-emoji.sh): check that your PR and/or commit messages have the right versioning icon (***TODO***). @@ -131,7 +131,7 @@ branch, except in exceptional circumstances. Patches will be backported to maintained stable versions, as needed. Major releases are done shortly after a breaking change is merged -- once -a breaking change is merged, the next release *must* be a major revison. +a breaking change is merged, the next release *must* be a major revision. We don't intend to have a lot of these, so we may put off merging breaking PRs until a later date. @@ -172,9 +172,9 @@ have to rewrite their code when they eventually upgrade, and for maintainers/contributors, who have to deal with differences between master and stable branches. -That being said, we'll occaisonally want to make breaking changes. They'll +That being said, we'll occasionally want to make breaking changes. They'll be merged onto master, and will then trigger a major release (see [Release -Proccess](#release-process)). Because breaking changes induce a major +Process](#release-process)). Because breaking changes induce a major revision, the maintainers may delay a particular breaking change until a later date when they are ready to make a major revision with a few breaking changes. @@ -187,7 +187,7 @@ Maintainers should treat breaking changes with caution, and evaluate potential non-breaking solutions (see below). Note that API breakage in public APIs due to dependencies will trigger -a major revision, so you may occaisonally need to have a major release +a major revision, so you may occasionally need to have a major release anyway, due to changes in libraries like `k8s.io/client-go` or `k8s.io/apimachinery`. diff --git a/vendor/sigs.k8s.io/controller-runtime/alias.go b/vendor/sigs.k8s.io/controller-runtime/alias.go index b41f51435..af955ad30 100644 --- a/vendor/sigs.k8s.io/controller-runtime/alias.go +++ b/vendor/sigs.k8s.io/controller-runtime/alias.go @@ -94,6 +94,9 @@ var ( // NewControllerManagedBy returns a new controller builder that will be started by the provided Manager NewControllerManagedBy = builder.ControllerManagedBy + // NewWebhookManagedBy returns a new webhook builder that will be started by the provided Manager + NewWebhookManagedBy = builder.WebhookManagedBy + // NewManager returns a new Manager for creating Controllers. NewManager = manager.New diff --git a/vendor/sigs.k8s.io/controller-runtime/doc.go b/vendor/sigs.k8s.io/controller-runtime/doc.go index 16eef52f8..d90970796 100644 --- a/vendor/sigs.k8s.io/controller-runtime/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/doc.go @@ -64,7 +64,7 @@ limitations under the License. // sources (pkg/source), like Kubernetes API object changes, to event handlers // (pkg/handler), like "enqueue a reconcile request for the object owner". // Predicates (pkg/predicate) can be used to filter which events actually -// trigger reconciles. There are pre-written utilies for the common cases, and +// trigger reconciles. There are pre-written utilities for the common cases, and // interfaces and helpers for advanced cases. // // Reconcilers diff --git a/vendor/sigs.k8s.io/controller-runtime/go.mod b/vendor/sigs.k8s.io/controller-runtime/go.mod index 568ccb60d..e31d7a58c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/go.mod +++ b/vendor/sigs.k8s.io/controller-runtime/go.mod @@ -1,50 +1,31 @@ module sigs.k8s.io/controller-runtime -go 1.11 +go 1.13 require ( - cloud.google.com/go v0.26.0 // indirect - github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 - github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect - github.com/evanphx/json-patch v4.1.0+incompatible + github.com/evanphx/json-patch v4.5.0+incompatible github.com/go-logr/logr v0.1.0 github.com/go-logr/zapr v0.1.0 - github.com/gogo/protobuf v1.1.1 // indirect github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 // indirect - github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect - github.com/googleapis/gnostic v0.2.0 // indirect - github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 // indirect + github.com/googleapis/gnostic v0.3.1 // indirect github.com/imdario/mergo v0.3.6 // indirect - github.com/json-iterator/go v1.1.5 // indirect - github.com/kr/pretty v0.1.0 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 // indirect - github.com/onsi/ginkgo v1.6.0 - github.com/onsi/gomega v1.4.2 - github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c // indirect - github.com/pkg/errors v0.8.1 // indirect - github.com/prometheus/client_golang v0.9.0 + github.com/onsi/ginkgo v1.8.0 + github.com/onsi/gomega v1.5.0 + github.com/prometheus/client_golang v0.9.2 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 - github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e // indirect - github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 // indirect - github.com/spf13/pflag v1.0.2 // indirect go.uber.org/atomic v1.3.2 // indirect go.uber.org/multierr v1.1.0 // indirect go.uber.org/zap v1.9.1 - golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac // indirect - golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be // indirect - golang.org/x/sync v0.0.0-20190423024810-112230192c58 // indirect - golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 // indirect - google.golang.org/appengine v1.1.0 // indirect - gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect + golang.org/x/time v0.0.0-20181108054448-85acf8d2951c + golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 + gomodules.xyz/jsonpatch/v2 v2.0.1 + gopkg.in/fsnotify.v1 v1.4.7 gopkg.in/inf.v0 v0.9.1 // indirect - k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b - k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 - k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d - k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible - k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c // indirect - k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 // indirect - sigs.k8s.io/testing_frameworks v0.1.1 + k8s.io/api v0.0.0-20190918155943-95b840bb6a1f + k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783 + k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655 + k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90 + k8s.io/utils v0.0.0-20190801114015-581e00157fb1 + sigs.k8s.io/testing_frameworks v0.1.2 sigs.k8s.io/yaml v1.1.0 ) diff --git a/vendor/sigs.k8s.io/controller-runtime/go.sum b/vendor/sigs.k8s.io/controller-runtime/go.sum index b5e1bdd0c..8f6a90aac 100644 --- a/vendor/sigs.k8s.io/controller-runtime/go.sum +++ b/vendor/sigs.k8s.io/controller-runtime/go.sum @@ -1,125 +1,387 @@ -cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 h1:Kn3rqvbUFqSepE2OqVu0Pn1CbDw9IuMlONapol0zuwk= -github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M= +cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.1.0+incompatible h1:K1MDoo4AZ4wU0GIU/fPmtZg7VpzLjCxu+UwBD1FvwOc= -github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= +github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 h1:u4bArs140e9+AfE52mFHOXVFnOSBJBRlzTHrOPLOIhE= github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= -github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= -github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw= -github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= +github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= -github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I= -github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c h1:MUyE44mTvnI5A0xrxIxaMqoWFzPfQvtE2IWUollMDMs= -github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= +github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.0 h1:tXuTFVHC03mW0D+Ua1Q2d1EAVqLTuggX50V0VLICCzY= -github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc= -github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac h1:7d7lG9fHOLdL6jZPtnV4LpI41SbohIJ1Atq7U991dMg= -golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180112015858-5ccada7d0a7b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc h1:gkKoSkUmnU6bpS/VhkuO27bzQeSA51uaEfbOW5dNb68= +golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313 h1:pczuHS43Cp2ktBEEmLwScxgjWsBSzdaQiKzUyf3DTTc= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= +golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= +gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.0.0/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b h1:aBGgKJUM9Hk/3AE8WaZIApnTxG35kbuQba2w+SXqezo= -k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 h1:q1Qvjzs/iEdXF6A1a8H3AKVFDzJNcJn3nXMs6R6qFtA= -k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= -k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d h1:Jmdtdt1ZnoGfWWIIik61Z7nKYgO3J+swQJtPYsP9wHA= -k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible h1:U5Bt+dab9K8qaUmXINrkXO135kA11/i5Kg1RUydgaMQ= -k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.0.0-20190918155943-95b840bb6a1f h1:8FRUST8oUkEI45WYKyD8ed7Ad0Kg5v11zHyPkEVb2xo= +k8s.io/api v0.0.0-20190918155943-95b840bb6a1f/go.mod h1:uWuOHnjmNrtQomJrvEBg0c0HRNyQ+8KTEERVsK0PW48= +k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783 h1:V6ndwCPoao1yZ52agqOKaUAl7DYWVGiXjV7ePA2i610= +k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY= +k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655 h1:CS1tBQz3HOXiseWZu6ZicKX361CZLT97UFnnPx0aqBw= +k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4= +k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= +k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90 h1:mLmhKUm1X+pXu0zXMEzNsOF5E2kKFGe5o6BZBIIqA6A= +k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90/go.mod h1:J69/JveO6XESwVgG53q3Uz5OSfgsv4uxpScmmyYOOlk= +k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE= +k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c h1:3KSCztE7gPitlZmWbNwue/2U0YruD65DqX3INopDAQM= -k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= -k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 h1:VBM/0P5TWxwk+Nw6Z+lAw3DKgO76g90ETOiA6rfLV1Y= -k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs= -sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= +k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ= +k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf h1:EYm5AW/UUDbnmnI+gK0TJDVK9qPLhM+sRHYanNKw0EQ= +k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1 h1:+ySTxfHnfzZb9ys375PXNlLhkJPLKgHajBU0N62BDvE= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= +sigs.k8s.io/testing_frameworks v0.1.2 h1:vK0+tvjF0BZ/RYFeZ1E6BYBwHJJXhjuZ3TdsEKH+UQM= +sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/build.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go similarity index 60% rename from vendor/sigs.k8s.io/controller-runtime/pkg/builder/build.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go index 487786586..c3f542468 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/build.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go @@ -21,24 +21,18 @@ import ( "strings" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "sigs.k8s.io/controller-runtime/pkg/webhook/conversion" ) // Supporting mocking out functions for testing -var getConfig = config.GetConfig var newController = controller.New -var newManager = manager.New var getGvk = apiutil.GVKForObject // Builder builds a Controller. @@ -50,27 +44,19 @@ type Builder struct { watchRequest []watchRequest config *rest.Config ctrl controller.Controller + ctrlOptions controller.Options name string } -// SimpleController returns a new Builder. -// -// Deprecated: Use ControllerManagedBy(Manager) instead. -func SimpleController() *Builder { - return &Builder{} -} - // ControllerManagedBy returns a new controller builder that will be started by the provided Manager func ControllerManagedBy(m manager.Manager) *Builder { - return SimpleController().WithManager(m) + return &Builder{mgr: m} } // ForType defines the type of Object being *reconciled*, and configures the ControllerManagedBy to respond to create / delete / // update events by *reconciling the object*. // This is the equivalent of calling // Watches(&source.Kind{Type: apiType}, &handler.EnqueueRequestForObject{}) -// If the passed in object has implemented the admission.Defaulter interface, a MutatingWebhook will be wired for this type. -// If the passed in object has implemented the admission.Validator interface, a ValidatingWebhook will be wired for this type. // // Deprecated: Use For func (blder *Builder) ForType(apiType runtime.Object) *Builder { @@ -81,8 +67,6 @@ func (blder *Builder) ForType(apiType runtime.Object) *Builder { // update events by *reconciling the object*. // This is the equivalent of calling // Watches(&source.Kind{Type: apiType}, &handler.EnqueueRequestForObject{}) -// If the passed in object has implemented the admission.Defaulter interface, a MutatingWebhook will be wired for this type. -// If the passed in object has implemented the admission.Validator interface, a ValidatingWebhook will be wired for this type. func (blder *Builder) For(apiType runtime.Object) *Builder { blder.apiType = apiType return blder @@ -116,14 +100,6 @@ func (blder *Builder) WithConfig(config *rest.Config) *Builder { return blder } -// WithManager sets the Manager to use for registering the ControllerManagedBy. Defaults to a new manager.Manager. -// -// Deprecated: Use ControllerManagedBy(Manager) and this isn't needed. -func (blder *Builder) WithManager(m manager.Manager) *Builder { - blder.mgr = m - return blder -} - // WithEventFilter sets the event filters, to filter which create/update/delete/generic events eventually // trigger reconciliations. For example, filtering on whether the resource version has changed. // Defaults to the empty list. @@ -132,6 +108,12 @@ func (blder *Builder) WithEventFilter(p predicate.Predicate) *Builder { return blder } +// WithOptions overrides the controller options use in doController. Defaults to empty. +func (blder *Builder) WithOptions(options controller.Options) *Builder { + blder.ctrlOptions = options + return blder +} + // Named sets the name of the controller to the given name. The name shows up // in metrics, among other things, and thus should be a prometheus compatible name // (underscores and alphanumeric characters only). @@ -148,40 +130,29 @@ func (blder *Builder) Complete(r reconcile.Reconciler) error { return err } -// Build builds the Application ControllerManagedBy and returns the Manager used to start it. -// -// Deprecated: Use Complete -func (blder *Builder) Build(r reconcile.Reconciler) (manager.Manager, error) { +// Build builds the Application ControllerManagedBy and returns the Controller it created. +func (blder *Builder) Build(r reconcile.Reconciler) (controller.Controller, error) { if r == nil { return nil, fmt.Errorf("must provide a non-nil Reconciler") } - - // Set the Config - if err := blder.doConfig(); err != nil { - return nil, err + if blder.mgr == nil { + return nil, fmt.Errorf("must provide a non-nil Manager") } - // Set the Manager - if err := blder.doManager(); err != nil { - return nil, err - } + // Set the Config + blder.loadRestConfig() // Set the ControllerManagedBy if err := blder.doController(r); err != nil { return nil, err } - // Set the Webook if needed - if err := blder.doWebhook(); err != nil { - return nil, err - } - // Set the Watch if err := blder.doWatch(); err != nil { return nil, err } - return blder.mgr, nil + return blder.ctrl, nil } func (blder *Builder) doWatch() error { @@ -215,26 +186,10 @@ func (blder *Builder) doWatch() error { return nil } -func (blder *Builder) doConfig() error { - if blder.config != nil { - return nil - } - if blder.mgr != nil { +func (blder *Builder) loadRestConfig() { + if blder.config == nil { blder.config = blder.mgr.GetConfig() - return nil } - var err error - blder.config, err = getConfig() - return err -} - -func (blder *Builder) doManager() error { - if blder.mgr != nil { - return nil - } - var err error - blder.mgr, err = newManager(blder.config, manager.Options{}) - return err } func (blder *Builder) getControllerName() (string, error) { @@ -253,65 +208,8 @@ func (blder *Builder) doController(r reconcile.Reconciler) error { if err != nil { return err } - blder.ctrl, err = newController(name, blder.mgr, controller.Options{Reconciler: r}) + ctrlOptions := blder.ctrlOptions + ctrlOptions.Reconciler = r + blder.ctrl, err = newController(name, blder.mgr, ctrlOptions) return err } - -func (blder *Builder) doWebhook() error { - // Create a webhook for each type - gvk, err := apiutil.GVKForObject(blder.apiType, blder.mgr.GetScheme()) - if err != nil { - return err - } - - // TODO: When the conversion webhook lands, we need to handle all registered versions of a given group-kind. - // A potential workflow for defaulting webhook - // 1) a bespoke (non-hub) version comes in - // 2) convert it to the hub version - // 3) do defaulting - // 4) convert it back to the same bespoke version - // 5) calculate the JSON patch - // - // A potential workflow for validating webhook - // 1) a bespoke (non-hub) version comes in - // 2) convert it to the hub version - // 3) do validation - if defaulter, isDefaulter := blder.apiType.(admission.Defaulter); isDefaulter { - mwh := admission.DefaultingWebhookFor(defaulter) - if mwh != nil { - path := generateMutatePath(gvk) - log.Info("Registering a mutating webhook", - "GVK", gvk, - "path", path) - - blder.mgr.GetWebhookServer().Register(path, mwh) - } - } - - if validator, isValidator := blder.apiType.(admission.Validator); isValidator { - vwh := admission.ValidatingWebhookFor(validator) - if vwh != nil { - path := generateValidatePath(gvk) - log.Info("Registering a validating webhook", - "GVK", gvk, - "path", path) - blder.mgr.GetWebhookServer().Register(path, vwh) - } - } - - err = conversion.CheckConvertibility(blder.mgr.GetScheme(), blder.apiType) - if err != nil { - log.Error(err, "conversion check failed", "GVK", gvk) - } - return nil -} - -func generateMutatePath(gvk schema.GroupVersionKind) string { - return "/mutate-" + strings.Replace(gvk.Group, ".", "-", -1) + "-" + - gvk.Version + "-" + strings.ToLower(gvk.Kind) -} - -func generateValidatePath(gvk schema.GroupVersionKind) string { - return "/validate-" + strings.Replace(gvk.Group, ".", "-", -1) + "-" + - gvk.Version + "-" + strings.ToLower(gvk.Kind) -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go new file mode 100644 index 000000000..ada4ddd23 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go @@ -0,0 +1,166 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "net/http" + "net/url" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "sigs.k8s.io/controller-runtime/pkg/webhook/conversion" +) + +// WebhookBuilder builds a Webhook. +type WebhookBuilder struct { + apiType runtime.Object + gvk schema.GroupVersionKind + mgr manager.Manager + config *rest.Config +} + +func WebhookManagedBy(m manager.Manager) *WebhookBuilder { + return &WebhookBuilder{mgr: m} +} + +// TODO(droot): update the GoDoc for conversion. + +// For takes a runtime.Object which should be a CR. +// If the given object implements the admission.Defaulter interface, a MutatingWebhook will be wired for this type. +// If the given object implements the admission.Validator interface, a ValidatingWebhook will be wired for this type. +func (blder *WebhookBuilder) For(apiType runtime.Object) *WebhookBuilder { + blder.apiType = apiType + return blder +} + +// Complete builds the webhook. +func (blder *WebhookBuilder) Complete() error { + // Set the Config + blder.loadRestConfig() + + // Set the Webhook if needed + return blder.registerWebhooks() +} + +func (blder *WebhookBuilder) loadRestConfig() { + if blder.config == nil { + blder.config = blder.mgr.GetConfig() + } +} + +func (blder *WebhookBuilder) registerWebhooks() error { + // Create webhook(s) for each type + var err error + blder.gvk, err = apiutil.GVKForObject(blder.apiType, blder.mgr.GetScheme()) + if err != nil { + return err + } + + blder.registerDefaultingWebhook() + blder.registerValidatingWebhook() + + err = blder.registerConversionWebhook() + if err != nil { + return err + } + return nil +} + +// registerDefaultingWebhook registers a defaulting webhook if th +func (blder *WebhookBuilder) registerDefaultingWebhook() { + defaulter, isDefaulter := blder.apiType.(admission.Defaulter) + if !isDefaulter { + log.Info("skip registering a mutating webhook, admission.Defaulter interface is not implemented", "GVK", blder.gvk) + return + } + mwh := admission.DefaultingWebhookFor(defaulter) + if mwh != nil { + path := generateMutatePath(blder.gvk) + + // Checking if the path is already registered. + // If so, just skip it. + if !blder.isAlreadyHandled(path) { + log.Info("Registering a mutating webhook", + "GVK", blder.gvk, + "path", path) + blder.mgr.GetWebhookServer().Register(path, mwh) + } + } +} + +func (blder *WebhookBuilder) registerValidatingWebhook() { + validator, isValidator := blder.apiType.(admission.Validator) + if !isValidator { + log.Info("skip registering a validating webhook, admission.Validator interface is not implemented", "GVK", blder.gvk) + return + } + vwh := admission.ValidatingWebhookFor(validator) + if vwh != nil { + path := generateValidatePath(blder.gvk) + + // Checking if the path is already registered. + // If so, just skip it. + if !blder.isAlreadyHandled(path) { + log.Info("Registering a validating webhook", + "GVK", blder.gvk, + "path", path) + blder.mgr.GetWebhookServer().Register(path, vwh) + } + } +} + +func (blder *WebhookBuilder) registerConversionWebhook() error { + ok, err := conversion.IsConvertible(blder.mgr.GetScheme(), blder.apiType) + if err != nil { + log.Error(err, "conversion check failed", "object", blder.apiType) + return err + } + if ok { + if !blder.isAlreadyHandled("/convert") { + blder.mgr.GetWebhookServer().Register("/convert", &conversion.Webhook{}) + } + log.Info("conversion webhook enabled", "object", blder.apiType) + } + + return nil +} + +func (blder *WebhookBuilder) isAlreadyHandled(path string) bool { + if blder.mgr.GetWebhookServer().WebhookMux == nil { + return false + } + h, p := blder.mgr.GetWebhookServer().WebhookMux.Handler(&http.Request{URL: &url.URL{Path: path}}) + if p == path && h != nil { + return true + } + return false +} + +func generateMutatePath(gvk schema.GroupVersionKind) string { + return "/mutate-" + strings.Replace(gvk.Group, ".", "-", -1) + "-" + + gvk.Version + "-" + strings.ToLower(gvk.Kind) +} + +func generateValidatePath(gvk schema.GroupVersionKind) string { + return "/validate-" + strings.Replace(gvk.Group, ".", "-", -1) + "-" + + gvk.Version + "-" + strings.ToLower(gvk.Kind) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go index 03a86d3fc..4903ab560 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -36,17 +36,17 @@ var log = logf.RuntimeLog.WithName("object-cache") // Cache knows how to load Kubernetes objects, fetch informers to request // to receive events for Kubernetes objects (at a low-level), -// and add indicies to fields on the objects stored in the cache. +// and add indices to fields on the objects stored in the cache. type Cache interface { // Cache acts as a client to objects stored in the cache. client.Reader - // Cache loads informers and adds field indicies. + // Cache loads informers and adds field indices. Informers } // Informers knows how to create or fetch informers for different -// group-version-kinds, and add indicies to those informers. It's safe to call +// group-version-kinds, and add indices to those informers. It's safe to call // GetInformer from multiple threads. type Informers interface { // GetInformer fetches or constructs an informer for the given object that corresponds to a single @@ -64,7 +64,7 @@ type Informers interface { // WaitForCacheSync waits for all the caches to sync. Returns false if it could not sync a cache. WaitForCacheSync(stop <-chan struct{}) bool - // Informers knows how to add indicies to the caches (informers) that it manages. + // Informers knows how to add indices to the caches (informers) that it manages. client.FieldIndexer } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go index 634893746..786609bd1 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go @@ -38,6 +38,13 @@ var ( _ Cache = &informerCache{} ) +// ErrCacheNotStarted is returned when trying to read from the cache that wasn't started. +type ErrCacheNotStarted struct{} + +func (*ErrCacheNotStarted) Error() string { + return "the cache is not started, can not read objects" +} + // informerCache is a Kubernetes Object cache populated from InformersMap. informerCache wraps an InformersMap. type informerCache struct { *internal.InformersMap @@ -50,15 +57,19 @@ func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out runt return err } - cache, err := ip.InformersMap.Get(gvk, out) + started, cache, err := ip.InformersMap.Get(gvk, out) if err != nil { return err } + + if !started { + return &ErrCacheNotStarted{} + } return cache.Reader.Get(ctx, key, out) } // List implements Reader -func (ip *informerCache) List(ctx context.Context, out runtime.Object, opts ...client.ListOptionFunc) error { +func (ip *informerCache) List(ctx context.Context, out runtime.Object, opts ...client.ListOption) error { gvk, err := apiutil.GVKForObject(out, ip.Scheme) if err != nil { return err @@ -90,11 +101,15 @@ func (ip *informerCache) List(ctx context.Context, out runtime.Object, opts ...c } } - cache, err := ip.InformersMap.Get(gvk, cacheTypeObj) + started, cache, err := ip.InformersMap.Get(gvk, cacheTypeObj) if err != nil { return err } + if !started { + return &ErrCacheNotStarted{} + } + return cache.Reader.List(ctx, out, opts...) } @@ -105,7 +120,7 @@ func (ip *informerCache) GetInformerForKind(gvk schema.GroupVersionKind) (Inform if err != nil { return nil, err } - i, err := ip.InformersMap.Get(gvk, obj) + _, i, err := ip.InformersMap.Get(gvk, obj) if err != nil { return nil, err } @@ -118,13 +133,19 @@ func (ip *informerCache) GetInformer(obj runtime.Object) (Informer, error) { if err != nil { return nil, err } - i, err := ip.InformersMap.Get(gvk, obj) + _, i, err := ip.InformersMap.Get(gvk, obj) if err != nil { return nil, err } return i.Informer, err } +// NeedLeaderElection implements the LeaderElectionRunnable interface +// to indicate that this can be started without requiring the leader lock +func (ip *informerCache) NeedLeaderElection() bool { + return false +} + // IndexField adds an indexer to the underlying cache, using extraction function to get // value(s) from the given field. This index can then be used by passing a field selector // to List. For one-to-one compatibility with "normal" field selectors, only return one value. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go index 1611aa372..5c3fac922 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go @@ -30,10 +30,9 @@ import ( "k8s.io/apimachinery/pkg/selection" "k8s.io/client-go/tools/cache" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" ) -// CacheReader is a CacheReader +// CacheReader is a client.Reader var _ client.Reader = &CacheReader{} // CacheReader wraps a cache.Index to implement the client.CacheReader interface for a single type @@ -88,7 +87,7 @@ func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out runtime.O } // List lists items out of the indexer and writes them to out -func (c *CacheReader) List(_ context.Context, out runtime.Object, opts ...client.ListOptionFunc) error { +func (c *CacheReader) List(_ context.Context, out runtime.Object, opts ...client.ListOption) error { var objs []interface{} var err error @@ -97,7 +96,7 @@ func (c *CacheReader) List(_ context.Context, out runtime.Object, opts ...client if listOpts.FieldSelector != nil { // TODO(directxman12): support more complicated field selectors by - // combining multiple indicies, GetIndexers, etc + // combining multiple indices, GetIndexers, etc field, val, requiresExact := requiresExactMatch(listOpts.FieldSelector) if !requiresExact { return fmt.Errorf("non-exact field matches are not supported by the cache") @@ -125,15 +124,22 @@ func (c *CacheReader) List(_ context.Context, out runtime.Object, opts ...client if !isObj { return fmt.Errorf("cache contained %T, which is not an Object", obj) } + meta, err := apimeta.Accessor(obj) + if err != nil { + return err + } + if labelSel != nil { + lbls := labels.Set(meta.GetLabels()) + if !labelSel.Matches(lbls) { + continue + } + } + outObj := obj.DeepCopyObject() outObj.GetObjectKind().SetGroupVersionKind(c.groupVersionKind) runtimeObjs = append(runtimeObjs, outObj) } - filteredItems, err := objectutil.FilterWithLabels(runtimeObjs, labelSel) - if err != nil { - return err - } - return apimeta.SetList(out, filteredItems) + return apimeta.SetList(out, runtimeObjs) } // objectKeyToStorageKey converts an object key to store key. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go index 978002d9c..8e2c65f4b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go @@ -73,7 +73,7 @@ func (m *InformersMap) WaitForCacheSync(stop <-chan struct{}) bool { // Get will create a new Informer and add it to the map of InformersMap if none exists. Returns // the Informer from the map. -func (m *InformersMap) Get(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, error) { +func (m *InformersMap) Get(gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { _, isUnstructured := obj.(*unstructured.Unstructured) _, isUnstructuredList := obj.(*unstructured.UnstructuredList) isUnstructured = isUnstructured || isUnstructuredList diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go index b2787adfc..7345c54b4 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go @@ -145,7 +145,7 @@ func (ip *specificInformersMap) HasSyncedFuncs() []cache.InformerSynced { // Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns // the Informer from the map. -func (ip *specificInformersMap) Get(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, error) { +func (ip *specificInformersMap) Get(gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { // Return the informer if it is found i, started, ok := func() (*MapEntry, bool, bool) { ip.mu.RLock() @@ -157,18 +157,18 @@ func (ip *specificInformersMap) Get(gvk schema.GroupVersionKind, obj runtime.Obj if !ok { var err error if i, started, err = ip.addInformerToMap(gvk, obj); err != nil { - return nil, err + return started, nil, err } } if started && !i.Informer.HasSynced() { // Wait for it to sync before returning the Informer so that folks don't read from a stale cache. if !cache.WaitForCacheSync(ip.stop, i.Informer.HasSynced) { - return nil, fmt.Errorf("failed waiting for %T Informer to sync", obj) + return started, nil, fmt.Errorf("failed waiting for %T Informer to sync", obj) } } - return i, nil + return started, i, nil } func (ip *specificInformersMap) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, bool, error) { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go index fedb42653..dc6e159e3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go @@ -133,7 +133,7 @@ func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj } // List multi namespace cache will get all the objects in the namespaces that the cache is watching if asked for all namespaces. -func (c *multiNamespaceCache) List(ctx context.Context, list runtime.Object, opts ...client.ListOptionFunc) error { +func (c *multiNamespaceCache) List(ctx context.Context, list runtime.Object, opts ...client.ListOption) error { listOpts := client.ListOptions{} listOpts.ApplyOptions(opts) if listOpts.Namespace != corev1.NamespaceAll { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go index cf775c3dc..157f11f7c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -35,7 +35,10 @@ import ( // information fetched by a new client with the given config. func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { // Get a mapper - dc := discovery.NewDiscoveryClientForConfigOrDie(c) + dc, err := discovery.NewDiscoveryClientForConfig(c) + if err != nil { + return nil, err + } gr, err := restmapper.GetAPIGroupResources(dc) if err != nil { return nil, err @@ -71,7 +74,7 @@ func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersi func RESTClientForGVK(gvk schema.GroupVersionKind, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) { cfg := createRestConfig(gvk, baseConfig) if cfg.NegotiatedSerializer == nil { - cfg.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: codecs} + cfg.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: codecs} } return rest.RESTClientFor(cfg) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go new file mode 100644 index 000000000..4588fb06c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go @@ -0,0 +1,322 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil + +import ( + "sync" + "time" + + "golang.org/x/time/rate" + "golang.org/x/xerrors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +// ErrRateLimited is returned by a RESTMapper method if the number of API +// calls has exceeded a limit within a certain time period. +type ErrRateLimited struct { + // Duration to wait until the next API call can be made. + Delay time.Duration +} + +func (e ErrRateLimited) Error() string { + return "too many API calls to the RESTMapper within a timeframe" +} + +// DelayIfRateLimited returns the delay time until the next API call is +// allowed and true if err is of type ErrRateLimited. The zero +// time.Duration value and false are returned if err is not a ErrRateLimited. +func DelayIfRateLimited(err error) (time.Duration, bool) { + var rlerr ErrRateLimited + if xerrors.As(err, &rlerr) { + return rlerr.Delay, true + } + return 0, false +} + +// dynamicRESTMapper is a RESTMapper that dynamically discovers resource +// types at runtime. +type dynamicRESTMapper struct { + mu sync.RWMutex // protects the following fields + staticMapper meta.RESTMapper + limiter *dynamicLimiter + newMapper func() (meta.RESTMapper, error) + + lazy bool + // Used for lazy init. + initOnce sync.Once +} + +// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper +type DynamicRESTMapperOption func(*dynamicRESTMapper) error + +// WithLimiter sets the RESTMapper's underlying limiter to lim. +func WithLimiter(lim *rate.Limiter) DynamicRESTMapperOption { + return func(drm *dynamicRESTMapper) error { + drm.limiter = &dynamicLimiter{lim} + return nil + } +} + +// WithLazyDiscovery prevents the RESTMapper from discovering REST mappings +// until an API call is made. +var WithLazyDiscovery DynamicRESTMapperOption = func(drm *dynamicRESTMapper) error { + drm.lazy = true + return nil +} + +// WithCustomMapper supports setting a custom RESTMapper refresher instead of +// the default method, which uses a discovery client. +// +// This exists mainly for testing, but can be useful if you need tighter control +// over how discovery is performed, which discovery endpoints are queried, etc. +func WithCustomMapper(newMapper func() (meta.RESTMapper, error)) DynamicRESTMapperOption { + return func(drm *dynamicRESTMapper) error { + drm.newMapper = newMapper + return nil + } +} + +// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic +// RESTMapper dynamically discovers resource types at runtime. opts +// configure the RESTMapper. +func NewDynamicRESTMapper(cfg *rest.Config, opts ...DynamicRESTMapperOption) (meta.RESTMapper, error) { + client, err := discovery.NewDiscoveryClientForConfig(cfg) + if err != nil { + return nil, err + } + drm := &dynamicRESTMapper{ + limiter: &dynamicLimiter{ + rate.NewLimiter(rate.Limit(defaultRefillRate), defaultLimitSize), + }, + newMapper: func() (meta.RESTMapper, error) { + groupResources, err := restmapper.GetAPIGroupResources(client) + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryRESTMapper(groupResources), nil + }, + } + for _, opt := range opts { + if err = opt(drm); err != nil { + return nil, err + } + } + if !drm.lazy { + if err := drm.setStaticMapper(); err != nil { + return nil, err + } + } + return drm, nil +} + +var ( + // defaultRefilRate is the default rate at which potential calls are + // added back to the "bucket" of allowed calls. + defaultRefillRate = 5 + // defaultLimitSize is the default starting/max number of potential calls + // per second. Once a call is used, it's added back to the bucket at a rate + // of defaultRefillRate per second. + defaultLimitSize = 5 +) + +// setStaticMapper sets drm's staticMapper by querying its client, regardless +// of reload backoff. +func (drm *dynamicRESTMapper) setStaticMapper() error { + newMapper, err := drm.newMapper() + if err != nil { + return err + } + drm.staticMapper = newMapper + return nil +} + +// init initializes drm only once if drm is lazy. +func (drm *dynamicRESTMapper) init() (err error) { + drm.initOnce.Do(func() { + if drm.lazy { + err = drm.setStaticMapper() + } + }) + return err +} + +// checkAndReload attempts to call the given callback, which is assumed to be dependent +// on the data in the restmapper. +// +// If the callback returns a NoKindMatchError, it will attempt to reload +// the RESTMapper's data and re-call the callback once that's occurred. +// If the callback returns any other error, the function will return immediately regardless. +// +// It will take care +// ensuring that reloads are rate-limitted and that extraneous calls aren't made. +// It's thread-safe, and worries about thread-safety for the callback (so the callback does +// not need to attempt to lock the restmapper). +func (drm *dynamicRESTMapper) checkAndReload(needsReloadErr error, checkNeedsReload func() error) error { + // first, check the common path -- data is fresh enough + // (use an IIFE for the lock's defer) + err := func() error { + drm.mu.RLock() + defer drm.mu.RUnlock() + + return checkNeedsReload() + }() + + // NB(directxman12): `Is` and `As` have a confusing relationship -- + // `Is` is like `== or does this implement .Is`, whereas `As` says + // `can I type-assert into` + needsReload := xerrors.As(err, &needsReloadErr) + if !needsReload { + return err + } + + // if the data wasn't fresh, we'll need to try and update it, so grab the lock... + drm.mu.Lock() + defer drm.mu.Unlock() + + // ... and double-check that we didn't reload in the meantime + err = checkNeedsReload() + needsReload = xerrors.As(err, &needsReloadErr) + if !needsReload { + return err + } + + // we're still stale, so grab a rate-limit token if we can... + if err := drm.limiter.checkRate(); err != nil { + return err + } + + // ...reload... + if err := drm.setStaticMapper(); err != nil { + return err + } + + // ...and return the results of the closure regardless + return checkNeedsReload() +} + +// TODO: wrap reload errors on NoKindMatchError with go 1.13 errors. + +func (drm *dynamicRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + if err := drm.init(); err != nil { + return schema.GroupVersionKind{}, err + } + var gvk schema.GroupVersionKind + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvk, err = drm.staticMapper.KindFor(resource) + return err + }) + return gvk, err +} + +func (drm *dynamicRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + if err := drm.init(); err != nil { + return nil, err + } + var gvks []schema.GroupVersionKind + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvks, err = drm.staticMapper.KindsFor(resource) + return err + }) + return gvks, err +} + +func (drm *dynamicRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + if err := drm.init(); err != nil { + return schema.GroupVersionResource{}, err + } + + var gvr schema.GroupVersionResource + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvr, err = drm.staticMapper.ResourceFor(input) + return err + }) + return gvr, err +} + +func (drm *dynamicRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + if err := drm.init(); err != nil { + return nil, err + } + var gvrs []schema.GroupVersionResource + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvrs, err = drm.staticMapper.ResourcesFor(input) + return err + }) + return gvrs, err +} + +func (drm *dynamicRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + if err := drm.init(); err != nil { + return nil, err + } + var mapping *meta.RESTMapping + err := drm.checkAndReload(&meta.NoKindMatchError{}, func() error { + var err error + mapping, err = drm.staticMapper.RESTMapping(gk, versions...) + return err + }) + return mapping, err +} + +func (drm *dynamicRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + if err := drm.init(); err != nil { + return nil, err + } + var mappings []*meta.RESTMapping + err := drm.checkAndReload(&meta.NoKindMatchError{}, func() error { + var err error + mappings, err = drm.staticMapper.RESTMappings(gk, versions...) + return err + }) + return mappings, err +} + +func (drm *dynamicRESTMapper) ResourceSingularizer(resource string) (string, error) { + if err := drm.init(); err != nil { + return "", err + } + var singular string + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + singular, err = drm.staticMapper.ResourceSingularizer(resource) + return err + }) + return singular, err +} + +// dynamicLimiter holds a rate limiter used to throttle chatty RESTMapper users. +type dynamicLimiter struct { + *rate.Limiter +} + +// checkRate returns an ErrRateLimited if too many API calls have been made +// within the set limit. +func (b *dynamicLimiter) checkRate() error { + res := b.Reserve() + if res.Delay() == 0 { + return nil + } + return ErrRateLimited{res.Delay()} +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go index 43facbfba..a3a0ddc98 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes/scheme" @@ -48,7 +49,7 @@ type Options struct { // // In the case of normal types, the scheme will be used to look up the // corresponding group, version, and kind for the given type. In the -// case of unstrctured types, the group, version, and kind will be extracted +// case of unstructured types, the group, version, and kind will be extracted // from the corresponding fields on the object. func New(config *rest.Config, options Options) (Client, error) { if config == nil { @@ -103,8 +104,18 @@ type client struct { unstructuredClient unstructuredClient } +// resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object. +// TODO(vincepri): Remove this function and its calls once controller-runtime dependencies are upgraded to 1.16? +func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersionKind) { + if gvk != schema.EmptyObjectKind.GroupVersionKind() { + if v, ok := obj.(schema.ObjectKind); ok { + v.SetGroupVersionKind(gvk) + } + } +} + // Create implements client.Client -func (c *client) Create(ctx context.Context, obj runtime.Object, opts ...CreateOptionFunc) error { +func (c *client) Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error { _, ok := obj.(*unstructured.Unstructured) if ok { return c.unstructuredClient.Create(ctx, obj, opts...) @@ -113,7 +124,8 @@ func (c *client) Create(ctx context.Context, obj runtime.Object, opts ...CreateO } // Update implements client.Client -func (c *client) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error { +func (c *client) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) _, ok := obj.(*unstructured.Unstructured) if ok { return c.unstructuredClient.Update(ctx, obj, opts...) @@ -122,7 +134,7 @@ func (c *client) Update(ctx context.Context, obj runtime.Object, opts ...UpdateO } // Delete implements client.Client -func (c *client) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOptionFunc) error { +func (c *client) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error { _, ok := obj.(*unstructured.Unstructured) if ok { return c.unstructuredClient.Delete(ctx, obj, opts...) @@ -130,8 +142,18 @@ func (c *client) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteO return c.typedClient.Delete(ctx, obj, opts...) } +// DeleteAllOf implements client.Client +func (c *client) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error { + _, ok := obj.(*unstructured.Unstructured) + if ok { + return c.unstructuredClient.DeleteAllOf(ctx, obj, opts...) + } + return c.typedClient.DeleteAllOf(ctx, obj, opts...) +} + // Patch implements client.Client -func (c *client) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error { +func (c *client) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) _, ok := obj.(*unstructured.Unstructured) if ok { return c.unstructuredClient.Patch(ctx, obj, patch, opts...) @@ -149,7 +171,7 @@ func (c *client) Get(ctx context.Context, key ObjectKey, obj runtime.Object) err } // List implements client.Client -func (c *client) List(ctx context.Context, obj runtime.Object, opts ...ListOptionFunc) error { +func (c *client) List(ctx context.Context, obj runtime.Object, opts ...ListOption) error { _, ok := obj.(*unstructured.UnstructuredList) if ok { return c.unstructuredClient.List(ctx, obj, opts...) @@ -171,7 +193,8 @@ type statusWriter struct { var _ StatusWriter = &statusWriter{} // Update implements client.StatusWriter -func (sw *statusWriter) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error { +func (sw *statusWriter) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { + defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) _, ok := obj.(*unstructured.Unstructured) if ok { return sw.client.unstructuredClient.UpdateStatus(ctx, obj, opts...) @@ -180,7 +203,8 @@ func (sw *statusWriter) Update(ctx context.Context, obj runtime.Object, opts ... } // Patch implements client.Client -func (sw *statusWriter) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error { +func (sw *statusWriter) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { + defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) _, ok := obj.(*unstructured.Unstructured) if ok { return sw.client.unstructuredClient.PatchStatus(ctx, obj, patch, opts...) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go index 8d022c5cf..fd75c68b8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go @@ -20,11 +20,10 @@ import ( "flag" "fmt" "os" - "os/user" - "path/filepath" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" ) @@ -61,7 +60,27 @@ func init() { // // * $HOME/.kube/config if exists func GetConfig() (*rest.Config, error) { - cfg, err := loadConfig() + return GetConfigWithContext("") +} + +// GetConfigWithContext creates a *rest.Config for talking to a Kubernetes API server with a specific context. +// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running +// in cluster and use the cluster provided kubeconfig. +// +// It also applies saner defaults for QPS and burst based on the Kubernetes +// controller manager defaults (20 QPS, 30 burst) +// +// Config precedence +// +// * --kubeconfig flag pointing at a file +// +// * KUBECONFIG environment variable pointing at a file +// +// * In-cluster config if running in cluster +// +// * $HOME/.kube/config if exists +func GetConfigWithContext(context string) (*rest.Config, error) { + cfg, err := loadConfig(context) if err != nil { return nil, err } @@ -74,31 +93,48 @@ func GetConfig() (*rest.Config, error) { return cfg, nil } +// loadInClusterConfig is a function used to load the in-cluster +// Kubernetes client config. This variable makes is possible to +// test the precedence of loading the config. +var loadInClusterConfig = rest.InClusterConfig + // loadConfig loads a REST Config as per the rules specified in GetConfig -func loadConfig() (*rest.Config, error) { +func loadConfig(context string) (*rest.Config, error) { + // If a flag is specified with the config location, use that if len(kubeconfig) > 0 { - return clientcmd.BuildConfigFromFlags(apiServerURL, kubeconfig) - } - // If an env variable is specified with the config locaiton, use that - if len(os.Getenv("KUBECONFIG")) > 0 { - return clientcmd.BuildConfigFromFlags(apiServerURL, os.Getenv("KUBECONFIG")) - } - // If no explicit location, try the in-cluster config - if c, err := rest.InClusterConfig(); err == nil { - return c, nil + return loadConfigWithContext(apiServerURL, &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}, context) } - // If no in-cluster config, try the default location in the user's home directory - if usr, err := user.Current(); err == nil { - if c, err := clientcmd.BuildConfigFromFlags( - "", filepath.Join(usr.HomeDir, ".kube", "config")); err == nil { + + // If the recommended kubeconfig env variable is not specified, + // try the in-cluster config. + kubeconfigPath := os.Getenv(clientcmd.RecommendedConfigPathEnvVar) + if len(kubeconfigPath) == 0 { + if c, err := loadInClusterConfig(); err == nil { return c, nil } } + // If the recommended kubeconfig env variable is set, or there + // is no in-cluster config, try the default recommended locations. + if c, err := loadConfigWithContext(apiServerURL, clientcmd.NewDefaultClientConfigLoadingRules(), context); err == nil { + return c, nil + } + return nil, fmt.Errorf("could not locate a kubeconfig") } +func loadConfigWithContext(apiServerURL string, loader clientcmd.ClientConfigLoader, context string) (*rest.Config, error) { + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + loader, + &clientcmd.ConfigOverrides{ + ClusterInfo: clientcmdapi.Cluster{ + Server: apiServerURL, + }, + CurrentContext: context, + }).ClientConfig() +} + // GetConfigOrDie creates a *rest.Config for talking to a Kubernetes apiserver. // If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running // in cluster and use the cluster provided kubeconfig. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go index 6c13af211..2965e5fa9 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go @@ -34,7 +34,7 @@ limitations under the License. // Many client operations in Kubernetes support options. These options are // represented as variadic arguments at the end of a given method call. // For instance, to use a label selector on list, you can call -// err := someReader.List(context.Background(), &podList, client.MatchingLabels(someLabelMap)) +// err := someReader.List(context.Background(), &podList, client.MatchingLabels{"somelabel": "someval"}) // // Indexing // diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go index e4ca8be85..00b43caee 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go @@ -58,24 +58,27 @@ type Reader interface { // List retrieves list of objects for a given namespace and list options. On a // successful call, Items field in the list will be populated with the // result returned from the server. - List(ctx context.Context, list runtime.Object, opts ...ListOptionFunc) error + List(ctx context.Context, list runtime.Object, opts ...ListOption) error } // Writer knows how to create, delete, and update Kubernetes objects. type Writer interface { // Create saves the object obj in the Kubernetes cluster. - Create(ctx context.Context, obj runtime.Object, opts ...CreateOptionFunc) error + Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error // Delete deletes the given obj from Kubernetes cluster. - Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOptionFunc) error + Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error // Update updates the given obj in the Kubernetes cluster. obj must be a // struct pointer so that obj can be updated with the content returned by the Server. - Update(ctx context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error + Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error // Patch patches the given obj in the Kubernetes cluster. obj must be a // struct pointer so that obj can be updated with the content returned by the Server. - Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error + Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error + + // DeleteAllOf deletes all objects of the given type matching the given options. + DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error } // StatusClient knows how to create a client which can update status subresource @@ -89,12 +92,12 @@ type StatusWriter interface { // Update updates the fields corresponding to the status subresource for the // given obj. obj must be a struct pointer so that obj can be updated // with the content returned by the Server. - Update(ctx context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error + Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error // Patch patches the given object's subresource. obj must be a struct // pointer so that obj can be updated with the content returned by the // Server. - Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error + Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error } // Client knows how to perform CRUD operations on Kubernetes objects. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go index 46ddf66f9..62b5a8624 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -22,6 +22,84 @@ import ( "k8s.io/apimachinery/pkg/labels" ) +// {{{ "Functional" Option Interfaces + +// CreateOption is some configuration that modifies options for a create request. +type CreateOption interface { + // ApplyToCreate applies this configuration to the given create options. + ApplyToCreate(*CreateOptions) +} + +// DeleteOption is some configuration that modifies options for a delete request. +type DeleteOption interface { + // ApplyToDelete applies this configuration to the given delete options. + ApplyToDelete(*DeleteOptions) +} + +// ListOption is some configuration that modifies options for a list request. +type ListOption interface { + // ApplyToList applies this configuration to the given list options. + ApplyToList(*ListOptions) +} + +// UpdateOption is some configuration that modifies options for a update request. +type UpdateOption interface { + // ApplyToUpdate applies this configuration to the given update options. + ApplyToUpdate(*UpdateOptions) +} + +// PatchOption is some configuration that modifies options for a patch request. +type PatchOption interface { + // ApplyToPatch applies this configuration to the given patch options. + ApplyToPatch(*PatchOptions) +} + +// DeleteAllOfOption is some configuration that modifies options for a delete request. +type DeleteAllOfOption interface { + // ApplyToDeleteAllOf applies this configuration to the given deletecollection options. + ApplyToDeleteAllOf(*DeleteAllOfOptions) +} + +// }}} + +// {{{ Multi-Type Options + +// DryRunAll sets the "dry run" option to "all", executing all +// validation, etc without persisting the change to storage. +var DryRunAll = dryRunAll{} + +type dryRunAll struct{} + +func (dryRunAll) ApplyToCreate(opts *CreateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} +func (dryRunAll) ApplyToUpdate(opts *UpdateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} +func (dryRunAll) ApplyToPatch(opts *PatchOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} +func (dryRunAll) ApplyToDelete(opts *DeleteOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// FieldOwner set the field manager name for the given server-side apply patch. +type FieldOwner string + +func (f FieldOwner) ApplyToPatch(opts *PatchOptions) { + opts.FieldManager = string(f) +} +func (f FieldOwner) ApplyToCreate(opts *CreateOptions) { + opts.FieldManager = string(f) +} +func (f FieldOwner) ApplyToUpdate(opts *UpdateOptions) { + opts.FieldManager = string(f) +} + +// }}} + +// {{{ Create Options + // CreateOptions contains options for create requests. It's generally a subset // of metav1.CreateOptions. type CreateOptions struct { @@ -32,6 +110,10 @@ type CreateOptions struct { // - All: all dry run stages will be processed DryRun []string + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + // Raw represents raw CreateOptions, as passed to the API server. Raw *metav1.CreateOptions } @@ -39,7 +121,6 @@ type CreateOptions struct { // AsCreateOptions returns these options as a metav1.CreateOptions. // This may mutate the Raw field. func (o *CreateOptions) AsCreateOptions() *metav1.CreateOptions { - if o == nil { return &metav1.CreateOptions{} } @@ -48,29 +129,43 @@ func (o *CreateOptions) AsCreateOptions() *metav1.CreateOptions { } o.Raw.DryRun = o.DryRun + o.Raw.FieldManager = o.FieldManager return o.Raw } -// ApplyOptions executes the given CreateOptionFuncs and returns the mutated -// CreateOptions. -func (o *CreateOptions) ApplyOptions(optFuncs []CreateOptionFunc) *CreateOptions { - for _, optFunc := range optFuncs { - optFunc(o) +// ApplyOptions applies the given create options on these options, +// and then returns itself (for convenient chaining). +func (o *CreateOptions) ApplyOptions(opts []CreateOption) *CreateOptions { + for _, opt := range opts { + opt.ApplyToCreate(o) } return o } -// CreateOptionFunc is a function that mutates a CreateOptions struct. It implements -// the functional options pattern. See -// https://github.com/tmrts/go-patterns/blob/master/idiom/functional-options.md. -type CreateOptionFunc func(*CreateOptions) - -// CreateDryRunAll is a functional option that sets the DryRun -// field of a CreateOptions struct to metav1.DryRunAll. -var CreateDryRunAll CreateOptionFunc = func(opts *CreateOptions) { - opts.DryRun = []string{metav1.DryRunAll} +// ApplyToCreate implements CreateOption +func (o *CreateOptions) ApplyToCreate(co *CreateOptions) { + if o.DryRun != nil { + co.DryRun = o.DryRun + } + if o.FieldManager != "" { + co.FieldManager = o.FieldManager + } + if o.Raw != nil { + co.Raw = o.Raw + } } +var _ CreateOption = &CreateOptions{} + +// CreateDryRunAll sets the "dry run" option to "all". +// +// Deprecated: Use DryRunAll +var CreateDryRunAll = DryRunAll + +// }}} + +// {{{ Delete Options + // DeleteOptions contains options for delete requests. It's generally a subset // of metav1.DeleteOptions. type DeleteOptions struct { @@ -96,12 +191,18 @@ type DeleteOptions struct { // Raw represents raw DeleteOptions, as passed to the API server. Raw *metav1.DeleteOptions + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string } // AsDeleteOptions returns these options as a metav1.DeleteOptions. // This may mutate the Raw field. func (o *DeleteOptions) AsDeleteOptions() *metav1.DeleteOptions { - if o == nil { return &metav1.DeleteOptions{} } @@ -112,47 +213,79 @@ func (o *DeleteOptions) AsDeleteOptions() *metav1.DeleteOptions { o.Raw.GracePeriodSeconds = o.GracePeriodSeconds o.Raw.Preconditions = o.Preconditions o.Raw.PropagationPolicy = o.PropagationPolicy + o.Raw.DryRun = o.DryRun return o.Raw } -// ApplyOptions executes the given DeleteOptionFuncs and returns the mutated -// DeleteOptions. -func (o *DeleteOptions) ApplyOptions(optFuncs []DeleteOptionFunc) *DeleteOptions { - for _, optFunc := range optFuncs { - optFunc(o) +// ApplyOptions applies the given delete options on these options, +// and then returns itself (for convenient chaining). +func (o *DeleteOptions) ApplyOptions(opts []DeleteOption) *DeleteOptions { + for _, opt := range opts { + opt.ApplyToDelete(o) } return o } -// DeleteOptionFunc is a function that mutates a DeleteOptions struct. It implements -// the functional options pattern. See -// https://github.com/tmrts/go-patterns/blob/master/idiom/functional-options.md. -type DeleteOptionFunc func(*DeleteOptions) +var _ DeleteOption = &DeleteOptions{} -// GracePeriodSeconds is a functional option that sets the GracePeriodSeconds -// field of a DeleteOptions struct. -func GracePeriodSeconds(gp int64) DeleteOptionFunc { - return func(opts *DeleteOptions) { - opts.GracePeriodSeconds = &gp +// ApplyToDelete implements DeleteOption +func (o *DeleteOptions) ApplyToDelete(do *DeleteOptions) { + if o.GracePeriodSeconds != nil { + do.GracePeriodSeconds = o.GracePeriodSeconds + } + if o.Preconditions != nil { + do.Preconditions = o.Preconditions + } + if o.PropagationPolicy != nil { + do.PropagationPolicy = o.PropagationPolicy + } + if o.Raw != nil { + do.Raw = o.Raw + } + if o.DryRun != nil { + do.DryRun = o.DryRun } } -// Preconditions is a functional option that sets the Preconditions field of a -// DeleteOptions struct. -func Preconditions(p *metav1.Preconditions) DeleteOptionFunc { - return func(opts *DeleteOptions) { - opts.Preconditions = p - } +// GracePeriodSeconds sets the grace period for the deletion +// to the given number of seconds. +type GracePeriodSeconds int64 + +func (s GracePeriodSeconds) ApplyToDelete(opts *DeleteOptions) { + secs := int64(s) + opts.GracePeriodSeconds = &secs } -// PropagationPolicy is a functional option that sets the PropagationPolicy -// field of a DeleteOptions struct. -func PropagationPolicy(p metav1.DeletionPropagation) DeleteOptionFunc { - return func(opts *DeleteOptions) { - opts.PropagationPolicy = &p - } +func (s GracePeriodSeconds) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + s.ApplyToDelete(&opts.DeleteOptions) } +type Preconditions metav1.Preconditions + +func (p Preconditions) ApplyToDelete(opts *DeleteOptions) { + preconds := metav1.Preconditions(p) + opts.Preconditions = &preconds +} + +func (p Preconditions) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + p.ApplyToDelete(&opts.DeleteOptions) +} + +type PropagationPolicy metav1.DeletionPropagation + +func (p PropagationPolicy) ApplyToDelete(opts *DeleteOptions) { + policy := metav1.DeletionPropagation(p) + opts.PropagationPolicy = &policy +} + +func (p PropagationPolicy) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + p.ApplyToDelete(&opts.DeleteOptions) +} + +// }}} + +// {{{ List Options + // ListOptions contains options for limiting or filtering results. // It's generally a subset of metav1.ListOptions, with support for // pre-parsed selectors (since generally, selectors will be executed @@ -170,32 +303,45 @@ type ListOptions struct { // non-namespaced objects, or to list across all namespaces. Namespace string + // Limit specifies the maximum number of results to return from the server. The server may + // not support this field on all resource types, but if it does and more results remain it + // will set the continue field on the returned list object. This field is not supported if watch + // is true in the Raw ListOptions. + Limit int64 + // Continue is a token returned by the server that lets a client retrieve chunks of results + // from the server by specifying limit. The server may reject requests for continuation tokens + // it does not recognize and will return a 410 error if the token can no longer be used because + // it has expired. This field is not supported if watch is true in the Raw ListOptions. + Continue string + // Raw represents raw ListOptions, as passed to the API server. Note // that these may not be respected by all implementations of interface, - // and the LabelSelector and FieldSelector fields are ignored. + // and the LabelSelector, FieldSelector, Limit and Continue fields are ignored. Raw *metav1.ListOptions } -// SetLabelSelector sets this the label selector of these options -// from a string form of the selector. -func (o *ListOptions) SetLabelSelector(selRaw string) error { - sel, err := labels.Parse(selRaw) - if err != nil { - return err - } - o.LabelSelector = sel - return nil -} +var _ ListOption = &ListOptions{} -// SetFieldSelector sets this the label selector of these options -// from a string form of the selector. -func (o *ListOptions) SetFieldSelector(selRaw string) error { - sel, err := fields.ParseSelector(selRaw) - if err != nil { - return err +// ApplyToList implements ListOption for ListOptions +func (o *ListOptions) ApplyToList(lo *ListOptions) { + if o.LabelSelector != nil { + lo.LabelSelector = o.LabelSelector + } + if o.FieldSelector != nil { + lo.FieldSelector = o.FieldSelector + } + if o.Namespace != "" { + lo.Namespace = o.Namespace + } + if o.Raw != nil { + lo.Raw = o.Raw + } + if o.Limit > 0 { + lo.Limit = o.Limit + } + if o.Continue != "" { + lo.Continue = o.Continue } - o.FieldSelector = sel - return nil } // AsListOptions returns these options as a flattened metav1.ListOptions. @@ -213,85 +359,120 @@ func (o *ListOptions) AsListOptions() *metav1.ListOptions { if o.FieldSelector != nil { o.Raw.FieldSelector = o.FieldSelector.String() } + if !o.Raw.Watch { + o.Raw.Limit = o.Limit + o.Raw.Continue = o.Continue + } return o.Raw } -// ApplyOptions executes the given ListOptionFuncs and returns the mutated -// ListOptions. -func (o *ListOptions) ApplyOptions(optFuncs []ListOptionFunc) *ListOptions { - for _, optFunc := range optFuncs { - optFunc(o) +// ApplyOptions applies the given list options on these options, +// and then returns itself (for convenient chaining). +func (o *ListOptions) ApplyOptions(opts []ListOption) *ListOptions { + for _, opt := range opts { + opt.ApplyToList(o) } return o } -// ListOptionFunc is a function that mutates a ListOptions struct. It implements -// the functional options pattern. See -// https://github.com/tmrts/go-patterns/blob/master/idiom/functional-options.md. -type ListOptionFunc func(*ListOptions) +// MatchingLabels filters the list/delete operation on the given set of labels. +type MatchingLabels map[string]string -// MatchingLabels is a convenience function that sets the label selector -// to match the given labels, and then returns the options. -// It mutates the list options. -func (o *ListOptions) MatchingLabels(lbls map[string]string) *ListOptions { - sel := labels.SelectorFromSet(lbls) - o.LabelSelector = sel - return o +func (m MatchingLabels) ApplyToList(opts *ListOptions) { + // TODO(directxman12): can we avoid reserializing this over and over? + sel := labels.SelectorFromSet(map[string]string(m)) + opts.LabelSelector = sel } -// MatchingField is a convenience function that sets the field selector -// to match the given field, and then returns the options. -// It mutates the list options. -func (o *ListOptions) MatchingField(name, val string) *ListOptions { - sel := fields.SelectorFromSet(fields.Set{name: val}) - o.FieldSelector = sel - return o +func (m MatchingLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) } -// InNamespace is a convenience function that sets the namespace, -// and then returns the options. It mutates the list options. -func (o *ListOptions) InNamespace(ns string) *ListOptions { - o.Namespace = ns - return o +// MatchingLabelsSelector filters the list/delete operation on the given label +// selector (or index in the case of cached lists). A struct is used because +// labels.Selector is an interface, which cannot be aliased. +type MatchingLabelsSelector struct { + labels.Selector } -// MatchingLabels is a functional option that sets the LabelSelector field of -// a ListOptions struct. -func MatchingLabels(lbls map[string]string) ListOptionFunc { - sel := labels.SelectorFromSet(lbls) - return func(opts *ListOptions) { - opts.LabelSelector = sel - } +func (m MatchingLabelsSelector) ApplyToList(opts *ListOptions) { + opts.LabelSelector = m } -// MatchingField is a functional option that sets the FieldSelector field of -// a ListOptions struct. -func MatchingField(name, val string) ListOptionFunc { - sel := fields.SelectorFromSet(fields.Set{name: val}) - return func(opts *ListOptions) { - opts.FieldSelector = sel - } +func (m MatchingLabelsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) } -// InNamespace is a functional option that sets the Namespace field of -// a ListOptions struct. -func InNamespace(ns string) ListOptionFunc { - return func(opts *ListOptions) { - opts.Namespace = ns - } +// MatchingField filters the list operation on the given field selector +// (or index in the case of cached lists). +// +// Deprecated: Use MatchingFields +func MatchingField(name, val string) MatchingFields { + return MatchingFields{name: val} } -// UseListOptions is a functional option that replaces the fields of a -// ListOptions struct with those of a different ListOptions struct. -// -// Example: -// cl.List(ctx, list, client.UseListOptions(lo.InNamespace(ns).MatchingLabels(labels))) -func UseListOptions(newOpts *ListOptions) ListOptionFunc { - return func(opts *ListOptions) { - *opts = *newOpts - } +// MatchingField filters the list/delete operation on the given field Set +// (or index in the case of cached lists). +type MatchingFields fields.Set + +func (m MatchingFields) ApplyToList(opts *ListOptions) { + // TODO(directxman12): can we avoid re-serializing this? + sel := fields.SelectorFromSet(fields.Set(m)) + opts.FieldSelector = sel +} + +func (m MatchingFields) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingFieldsSelector filters the list/delete operation on the given field +// selector (or index in the case of cached lists). A struct is used because +// fields.Selector is an interface, which cannot be aliased. +type MatchingFieldsSelector struct { + fields.Selector +} + +func (m MatchingFieldsSelector) ApplyToList(opts *ListOptions) { + opts.FieldSelector = m +} + +func (m MatchingFieldsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) } +// InNamespace restricts the list/delete operation to the given namespace. +type InNamespace string + +func (n InNamespace) ApplyToList(opts *ListOptions) { + opts.Namespace = string(n) +} + +func (n InNamespace) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + n.ApplyToList(&opts.ListOptions) +} + +// Limit specifies the maximum number of results to return from the server. +// Limit does not implement DeleteAllOfOption interface because the server +// does not support setting it for deletecollection operations. +type Limit int64 + +func (l Limit) ApplyToList(opts *ListOptions) { + opts.Limit = int64(l) +} + +// Continue sets a continuation token to retrieve chunks of results when using limit. +// Continue does not implement DeleteAllOfOption interface because the server +// does not support setting it for deletecollection operations. +type Continue string + +func (c Continue) ApplyToList(opts *ListOptions) { + opts.Continue = string(c) +} + +// }}} + +// {{{ Update Options + // UpdateOptions contains options for create requests. It's generally a subset // of metav1.UpdateOptions. type UpdateOptions struct { @@ -302,6 +483,10 @@ type UpdateOptions struct { // - All: all dry run stages will be processed DryRun []string + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + // Raw represents raw UpdateOptions, as passed to the API server. Raw *metav1.UpdateOptions } @@ -309,7 +494,6 @@ type UpdateOptions struct { // AsUpdateOptions returns these options as a metav1.UpdateOptions. // This may mutate the Raw field. func (o *UpdateOptions) AsUpdateOptions() *metav1.UpdateOptions { - if o == nil { return &metav1.UpdateOptions{} } @@ -318,29 +502,43 @@ func (o *UpdateOptions) AsUpdateOptions() *metav1.UpdateOptions { } o.Raw.DryRun = o.DryRun + o.Raw.FieldManager = o.FieldManager return o.Raw } -// ApplyOptions executes the given UpdateOptionFuncs and returns the mutated -// UpdateOptions. -func (o *UpdateOptions) ApplyOptions(optFuncs []UpdateOptionFunc) *UpdateOptions { - for _, optFunc := range optFuncs { - optFunc(o) +// ApplyOptions applies the given update options on these options, +// and then returns itself (for convenient chaining). +func (o *UpdateOptions) ApplyOptions(opts []UpdateOption) *UpdateOptions { + for _, opt := range opts { + opt.ApplyToUpdate(o) } return o } -// UpdateOptionFunc is a function that mutates a UpdateOptions struct. It implements -// the functional options pattern. See -// https://github.com/tmrts/go-patterns/blob/master/idiom/functional-options.md. -type UpdateOptionFunc func(*UpdateOptions) +var _ UpdateOption = &UpdateOptions{} -// UpdateDryRunAll is a functional option that sets the DryRun -// field of a UpdateOptions struct to metav1.DryRunAll. -var UpdateDryRunAll UpdateOptionFunc = func(opts *UpdateOptions) { - opts.DryRun = []string{metav1.DryRunAll} +// ApplyToUpdate implements UpdateOption +func (o *UpdateOptions) ApplyToUpdate(uo *UpdateOptions) { + if o.DryRun != nil { + uo.DryRun = o.DryRun + } + if o.FieldManager != "" { + uo.FieldManager = o.FieldManager + } + if o.Raw != nil { + uo.Raw = o.Raw + } } +// UpdateDryRunAll sets the "dry run" option to "all". +// +// Deprecated: Use DryRunAll +var UpdateDryRunAll = DryRunAll + +// }}} + +// {{{ Patch Options + // PatchOptions contains options for patch requests. type PatchOptions struct { // When present, indicates that modifications should not be @@ -364,11 +562,11 @@ type PatchOptions struct { Raw *metav1.PatchOptions } -// ApplyOptions executes the given PatchOptionFuncs, mutating these PatchOptions. -// It returns the mutated PatchOptions for convenience. -func (o *PatchOptions) ApplyOptions(optFuncs []PatchOptionFunc) *PatchOptions { - for _, optFunc := range optFuncs { - optFunc(o) +// ApplyOptions applies the given patch options on these options, +// and then returns itself (for convenient chaining). +func (o *PatchOptions) ApplyOptions(opts []PatchOption) *PatchOptions { + for _, opt := range opts { + opt.ApplyToPatch(o) } return o } @@ -389,29 +587,69 @@ func (o *PatchOptions) AsPatchOptions() *metav1.PatchOptions { return o.Raw } -// PatchOptionFunc is a function that mutates a PatchOptions struct. It implements -// the functional options pattern. See -// https://github.com/tmrts/go-patterns/blob/master/idiom/functional-options.md. -type PatchOptionFunc func(*PatchOptions) +var _ PatchOption = &PatchOptions{} + +// ApplyToPatch implements PatchOptions +func (o *PatchOptions) ApplyToPatch(po *PatchOptions) { + if o.DryRun != nil { + po.DryRun = o.DryRun + } + if o.Force != nil { + po.Force = o.Force + } + if o.FieldManager != "" { + po.FieldManager = o.FieldManager + } + if o.Raw != nil { + po.Raw = o.Raw + } +} + +// ForceOwnership indicates that in case of conflicts with server-side apply, +// the client should acquire ownership of the conflicting field. Most +// controllers should use this. +var ForceOwnership = forceOwnership{} + +type forceOwnership struct{} -// ForceOwnership sets the Force option, indicating that -// in case of conflicts with server-side apply, the client should -// acquire ownership of the conflicting field. Most controllers -// should use this. -var ForceOwnership PatchOptionFunc = func(opts *PatchOptions) { +func (forceOwnership) ApplyToPatch(opts *PatchOptions) { definitelyTrue := true opts.Force = &definitelyTrue } -// PatchDryRunAll is a functional option that sets the DryRun -// field of a PatchOptions struct to metav1.DryRunAll. -var PatchDryRunAll PatchOptionFunc = func(opts *PatchOptions) { - opts.DryRun = []string{metav1.DryRunAll} +// PatchDryRunAll sets the "dry run" option to "all". +// +// Deprecated: Use DryRunAll +var PatchDryRunAll = DryRunAll + +// }}} + +// {{{ DeleteAllOf Options + +// these are all just delete options and list options + +// DeleteAllOfOptions contains options for deletecollection (deleteallof) requests. +// It's just list and delete options smooshed together. +type DeleteAllOfOptions struct { + ListOptions + DeleteOptions } -// FieldOwner set the field manager name for the given server-side apply patch. -func FieldOwner(name string) PatchOptionFunc { - return func(opts *PatchOptions) { - opts.FieldManager = name +// ApplyOptions applies the given deleteallof options on these options, +// and then returns itself (for convenient chaining). +func (o *DeleteAllOfOptions) ApplyOptions(opts []DeleteAllOfOption) *DeleteAllOfOptions { + for _, opt := range opts { + opt.ApplyToDeleteAllOf(o) } + return o } + +var _ DeleteAllOfOption = &DeleteAllOfOptions{} + +// ApplyToDeleteAllOf implements DeleteAllOfOption +func (o *DeleteAllOfOptions) ApplyToDeleteAllOf(do *DeleteAllOfOptions) { + o.ApplyToList(&do.ListOptions) + o.ApplyToDelete(&do.DeleteOptions) +} + +// }}} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go index ab7efc287..164de17f3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go @@ -87,7 +87,7 @@ func (p applyPatch) Type() types.PatchType { // Data implements Patch. func (p applyPatch) Data(obj runtime.Object) ([]byte, error) { - // NB(directxman12): we might techically want to be using an actual encoder + // NB(directxman12): we might technically want to be using an actual encoder // here (in case some more performant encoder is introduced) but this is // correct and sufficient for our uses (it's what the JSON serializer in // client-go does, more-or-less). diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go index db7f16a71..47cba9576 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go @@ -52,7 +52,7 @@ func (d *DelegatingReader) Get(ctx context.Context, key ObjectKey, obj runtime.O } // List retrieves list of objects for a given namespace and list options. -func (d *DelegatingReader) List(ctx context.Context, list runtime.Object, opts ...ListOptionFunc) error { +func (d *DelegatingReader) List(ctx context.Context, list runtime.Object, opts ...ListOption) error { _, isUnstructured := list.(*unstructured.UnstructuredList) if isUnstructured { return d.ClientReader.List(ctx, list, opts...) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go index 76f429b65..6ddc1171d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go @@ -30,7 +30,7 @@ type typedClient struct { } // Create implements client.Client -func (c *typedClient) Create(ctx context.Context, obj runtime.Object, opts ...CreateOptionFunc) error { +func (c *typedClient) Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { return err @@ -49,7 +49,7 @@ func (c *typedClient) Create(ctx context.Context, obj runtime.Object, opts ...Cr } // Update implements client.Client -func (c *typedClient) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error { +func (c *typedClient) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { return err @@ -69,25 +69,47 @@ func (c *typedClient) Update(ctx context.Context, obj runtime.Object, opts ...Up } // Delete implements client.Client -func (c *typedClient) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOptionFunc) error { +func (c *typedClient) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { return err } deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + return o.Delete(). NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). Resource(o.resource()). Name(o.GetName()). - Body(deleteOpts.ApplyOptions(opts).AsDeleteOptions()). + Body(deleteOpts.AsDeleteOptions()). + Context(ctx). + Do(). + Error() +} + +// DeleteAllOf implements client.Client +func (c *typedClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + + return o.Delete(). + NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()). + Resource(o.resource()). + VersionedParams(deleteAllOfOpts.AsListOptions(), c.paramCodec). + Body(deleteAllOfOpts.AsDeleteOptions()). Context(ctx). Do(). Error() } // Patch implements client.Client -func (c *typedClient) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error { +func (c *typedClient) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { return err @@ -124,7 +146,7 @@ func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj runtime.Object } // List implements client.Client -func (c *typedClient) List(ctx context.Context, obj runtime.Object, opts ...ListOptionFunc) error { +func (c *typedClient) List(ctx context.Context, obj runtime.Object, opts ...ListOption) error { r, err := c.cache.getResource(obj) if err != nil { return err @@ -141,7 +163,7 @@ func (c *typedClient) List(ctx context.Context, obj runtime.Object, opts ...List } // UpdateStatus used by StatusWriter to write status. -func (c *typedClient) UpdateStatus(ctx context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error { +func (c *typedClient) UpdateStatus(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { return err @@ -163,7 +185,7 @@ func (c *typedClient) UpdateStatus(ctx context.Context, obj runtime.Object, opts } // PatchStatus used by StatusWriter to write status. -func (c *typedClient) PatchStatus(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error { +func (c *typedClient) PatchStatus(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { return err diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go index f13dd1885..440ad2f97 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go @@ -37,7 +37,7 @@ type unstructuredClient struct { } // Create implements client.Client -func (uc *unstructuredClient) Create(_ context.Context, obj runtime.Object, opts ...CreateOptionFunc) error { +func (uc *unstructuredClient) Create(_ context.Context, obj runtime.Object, opts ...CreateOption) error { u, ok := obj.(*unstructured.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) @@ -57,7 +57,7 @@ func (uc *unstructuredClient) Create(_ context.Context, obj runtime.Object, opts } // Update implements client.Client -func (uc *unstructuredClient) Update(_ context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error { +func (uc *unstructuredClient) Update(_ context.Context, obj runtime.Object, opts ...UpdateOption) error { u, ok := obj.(*unstructured.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) @@ -77,7 +77,7 @@ func (uc *unstructuredClient) Update(_ context.Context, obj runtime.Object, opts } // Delete implements client.Client -func (uc *unstructuredClient) Delete(_ context.Context, obj runtime.Object, opts ...DeleteOptionFunc) error { +func (uc *unstructuredClient) Delete(_ context.Context, obj runtime.Object, opts ...DeleteOption) error { u, ok := obj.(*unstructured.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) @@ -87,12 +87,30 @@ func (uc *unstructuredClient) Delete(_ context.Context, obj runtime.Object, opts return err } deleteOpts := DeleteOptions{} - err = r.Delete(u.GetName(), deleteOpts.ApplyOptions(opts).AsDeleteOptions()) + deleteOpts.ApplyOptions(opts) + err = r.Delete(u.GetName(), deleteOpts.AsDeleteOptions()) + return err +} + +// DeleteAllOf implements client.Client +func (uc *unstructuredClient) DeleteAllOf(_ context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + r, err := uc.getResourceInterface(u.GroupVersionKind(), u.GetNamespace()) + if err != nil { + return err + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + err = r.DeleteCollection(deleteAllOfOpts.AsDeleteOptions(), *deleteAllOfOpts.AsListOptions()) return err } // Patch implements client.Client -func (uc *unstructuredClient) Patch(_ context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error { +func (uc *unstructuredClient) Patch(_ context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { u, ok := obj.(*unstructured.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) @@ -135,7 +153,7 @@ func (uc *unstructuredClient) Get(_ context.Context, key ObjectKey, obj runtime. } // List implements client.Client -func (uc *unstructuredClient) List(_ context.Context, obj runtime.Object, opts ...ListOptionFunc) error { +func (uc *unstructuredClient) List(_ context.Context, obj runtime.Object, opts ...ListOption) error { u, ok := obj.(*unstructured.UnstructuredList) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) @@ -160,7 +178,7 @@ func (uc *unstructuredClient) List(_ context.Context, obj runtime.Object, opts . return nil } -func (uc *unstructuredClient) UpdateStatus(_ context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error { +func (uc *unstructuredClient) UpdateStatus(_ context.Context, obj runtime.Object, opts ...UpdateOption) error { u, ok := obj.(*unstructured.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) @@ -177,7 +195,7 @@ func (uc *unstructuredClient) UpdateStatus(_ context.Context, obj runtime.Object return nil } -func (uc *unstructuredClient) PatchStatus(_ context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error { +func (uc *unstructuredClient) PatchStatus(_ context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { u, ok := obj.(*unstructured.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go index 3411e95f7..a4e2a982d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go @@ -80,13 +80,15 @@ func New(name string, mgr manager.Manager, options Options) (Controller, error) // Create controller with dependencies set c := &controller.Controller{ - Do: options.Reconciler, - Cache: mgr.GetCache(), - Config: mgr.GetConfig(), - Scheme: mgr.GetScheme(), - Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor(name), - Queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), + Do: options.Reconciler, + Cache: mgr.GetCache(), + Config: mgr.GetConfig(), + Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + Recorder: mgr.GetEventRecorderFor(name), + MakeQueue: func() workqueue.RateLimitingInterface { + return workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name) + }, MaxConcurrentReconciles: options.MaxConcurrentReconciles, Name: name, } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go index e9f44b65c..d0a576c03 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go @@ -22,6 +22,7 @@ import ( "reflect" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -56,7 +57,18 @@ func newAlreadyOwnedError(Object metav1.Object, Owner metav1.OwnerReference) *Al func SetControllerReference(owner, object metav1.Object, scheme *runtime.Scheme) error { ro, ok := owner.(runtime.Object) if !ok { - return fmt.Errorf("is not a %T a runtime.Object, cannot call SetControllerReference", owner) + return fmt.Errorf("%T is not a runtime.Object, cannot call SetControllerReference", owner) + } + + ownerNs := owner.GetNamespace() + if ownerNs != "" { + objNs := object.GetNamespace() + if objNs == "" { + return fmt.Errorf("cluster-scoped resource must not have a namespace-scoped owner, owner's namespace %s", ownerNs) + } + if ownerNs != objNs { + return fmt.Errorf("cross-namespace owner references are disallowed, owner's namespace %s, obj's namespace %s", owner.GetNamespace(), object.GetNamespace()) + } } gvk, err := apiutil.GVKForObject(ro, scheme) @@ -168,3 +180,47 @@ func mutate(f MutateFn, key client.ObjectKey, obj runtime.Object) error { // MutateFn is a function which mutates the existing object into it's desired state. type MutateFn func() error + +// AddFinalizer accepts a metav1 object and adds the provided finalizer if not present. +func AddFinalizer(o metav1.Object, finalizer string) { + f := o.GetFinalizers() + for _, e := range f { + if e == finalizer { + return + } + } + o.SetFinalizers(append(f, finalizer)) +} + +// AddFinalizerWithError tries to convert a runtime object to a metav1 object and add the provided finalizer. +// It returns an error if the provided object cannot provide an accessor. +func AddFinalizerWithError(o runtime.Object, finalizer string) error { + m, err := meta.Accessor(o) + if err != nil { + return err + } + AddFinalizer(m, finalizer) + return nil +} + +// RemoveFinalizer accepts a metav1 object and removes the provided finalizer if present. +func RemoveFinalizer(o metav1.Object, finalizer string) { + f := o.GetFinalizers() + for i, e := range f { + if e == finalizer { + f = append(f[:i], f[i+1:]...) + } + } + o.SetFinalizers(f) +} + +// RemoveFinalizerWithError tries to convert a runtime object to a metav1 object and remove the provided finalizer. +// It returns an error if the provided object cannot provide an accessor. +func RemoveFinalizerWithError(o runtime.Object, finalizer string) error { + m, err := meta.Accessor(o) + if err != nil { + return err + } + RemoveFinalizer(m, finalizer) + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go index 375994ab5..c14c8688a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go @@ -37,7 +37,7 @@ import ( // CRDInstallOptions are the options for installing CRDs type CRDInstallOptions struct { - // Paths is the path to the directory containing CRDs + // Paths is a list of paths to the directories containing CRDs Paths []string // CRDs is a list of CRDs to install @@ -46,11 +46,11 @@ type CRDInstallOptions struct { // ErrorIfPathMissing will cause an error if a Path does not exist ErrorIfPathMissing bool - // maxTime is the max time to wait - maxTime time.Duration + // MaxTime is the max time to wait + MaxTime time.Duration - // pollInterval is the interval to check - pollInterval time.Duration + // PollInterval is the interval to check + PollInterval time.Duration } const defaultPollInterval = 100 * time.Millisecond @@ -97,11 +97,11 @@ func readCRDFiles(options *CRDInstallOptions) error { // defaultCRDOptions sets the default values for CRDs func defaultCRDOptions(o *CRDInstallOptions) { - if o.maxTime == 0 { - o.maxTime = defaultMaxWait + if o.MaxTime == 0 { + o.MaxTime = defaultMaxWait } - if o.pollInterval == 0 { - o.pollInterval = defaultPollInterval + if o.PollInterval == 0 { + o.PollInterval = defaultPollInterval } } @@ -132,7 +132,7 @@ func WaitForCRDs(config *rest.Config, crds []*apiextensionsv1beta1.CustomResourc // Poll until all resources are found in discovery p := &poller{config: config, waitingFor: waitingFor} - return wait.PollImmediate(options.pollInterval, options.maxTime, p.poll) + return wait.PollImmediate(options.PollInterval, options.MaxTime, p.poll) } // poller checks if all the resources have been found in discovery, and returns false if not @@ -189,7 +189,7 @@ func CreateCRDs(config *rest.Config, crds []*apiextensionsv1beta1.CustomResource // Create each CRD for _, crd := range crds { - log.V(1).Info("installing CRD", "crd", crd) + log.V(1).Info("installing CRD", "crd", crd.Name) if _, err := cs.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd); err != nil { return err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go new file mode 100644 index 000000000..33613d954 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go @@ -0,0 +1,41 @@ +package envtest + +import apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + +// mergePaths merges two string slices containing paths. +// This function makes no guarantees about order of the merged slice. +func mergePaths(s1, s2 []string) []string { + m := make(map[string]struct{}) + for _, s := range s1 { + m[s] = struct{}{} + } + for _, s := range s2 { + m[s] = struct{}{} + } + merged := make([]string, len(m)) + i := 0 + for key := range m { + merged[i] = key + i++ + } + return merged +} + +// mergeCRDs merges two CRD slices using their names. +// This function makes no guarantees about order of the merged slice. +func mergeCRDs(s1, s2 []*apiextensionsv1beta1.CustomResourceDefinition) []*apiextensionsv1beta1.CustomResourceDefinition { + m := make(map[string]*apiextensionsv1beta1.CustomResourceDefinition) + for _, crd := range s1 { + m[crd.Name] = crd + } + for _, crd := range s2 { + m[crd.Name] = crd + } + merged := make([]*apiextensionsv1beta1.CustomResourceDefinition, len(m)) + i := 0 + for _, crd := range m { + merged[i] = crd + i++ + } + return merged +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go index 2cfedd1e0..ead8972d9 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go @@ -33,7 +33,18 @@ import ( var log = logf.RuntimeLog.WithName("test-env") -// Default binary path for test framework +/* +It's possible to override some defaults, by setting the following environment variables: + USE_EXISTING_CLUSTER (boolean): if set to true, envtest will use an existing cluster + TEST_ASSET_KUBE_APISERVER (string): path to the api-server binary to use + TEST_ASSET_ETCD (string): path to the etcd binary to use + TEST_ASSET_KUBECTL (string): path to the kubectl binary to use + KUBEBUILDER_ASSETS (string): directory containing the binaries to use (api-server, etcd and kubectl). Defaults to /usr/local/kubebuilder/bin. + KUBEBUILDER_CONTROLPLANE_START_TIMEOUT (string supported by time.ParseDuration): timeout for test control plane to start. Defaults to 20s. + KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT (string supported by time.ParseDuration): timeout for test control plane to start. Defaults to 20s. + KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT (boolean): if set to true, the control plane's stdout and stderr are attached to os.Stdout and os.Stderr + +*/ const ( envUseExistingCluster = "USE_EXISTING_CLUSTER" envKubeAPIServerBin = "TEST_ASSET_KUBE_APISERVER" @@ -51,6 +62,7 @@ const ( defaultKubebuilderControlPlaneStopTimeout = 20 * time.Second ) +// Default binary path for test framework func defaultAssetPath(binary string) string { assetPath := os.Getenv(envKubebuilderPath) if assetPath == "" { @@ -62,12 +74,16 @@ func defaultAssetPath(binary string) string { // DefaultKubeAPIServerFlags are default flags necessary to bring up apiserver. var DefaultKubeAPIServerFlags = []string{ + // Allow tests to run offline, by preventing API server from attempting to + // use default route to determine its --advertise-address + "--advertise-address=127.0.0.1", "--etcd-servers={{ if .EtcdURL }}{{ .EtcdURL.String }}{{ end }}", "--cert-dir={{ .CertDir }}", "--insecure-port={{ if .URL }}{{ .URL.Port }}{{ end }}", "--insecure-bind-address={{ if .URL }}{{ .URL.Hostname }}{{ end }}", "--secure-port={{ if .SecurePort }}{{ .SecurePort }}{{ end }}", "--admission-control=AlwaysAdmit", + "--service-cluster-ip-range=10.0.0.0/24", } // Environment creates a Kubernetes test environment that will start / stop the Kubernetes control plane and @@ -81,16 +97,23 @@ type Environment struct { // loading. Config *rest.Config - // CRDs is a list of CRDs to install + // CRDInstallOptions are the options for installing CRDs. + CRDInstallOptions CRDInstallOptions + + // CRDs is a list of CRDs to install. + // If both this field and CRDs field in CRDInstallOptions are specified, the + // values are merged. CRDs []*apiextensionsv1beta1.CustomResourceDefinition // CRDDirectoryPaths is a list of paths containing CRD yaml or json configs. + // If both this field and Paths field in CRDInstallOptions are specified, the + // values are merged. CRDDirectoryPaths []string // UseExisting indicates that this environments should use an // existing kubeconfig, instead of trying to stand up a new control plane. // This is useful in cases that need aggregated API servers and the like. - UseExistingCluster bool + UseExistingCluster *bool // ControlPlaneStartTimeout is the maximum duration each controlplane component // may take to start. It defaults to the KUBEBUILDER_CONTROLPLANE_START_TIMEOUT @@ -111,9 +134,10 @@ type Environment struct { AttachControlPlaneOutput bool } -// Stop stops a running server +// Stop stops a running server. +// If USE_EXISTING_CLUSTER is set to true, this method is a no-op. func (te *Environment) Stop() error { - if te.UseExistingCluster { + if te.useExistingCluster() { return nil } return te.ControlPlane.Stop() @@ -125,16 +149,23 @@ func (te Environment) getAPIServerFlags() []string { if len(te.KubeAPIServerFlags) == 0 { return DefaultKubeAPIServerFlags } + // Check KubeAPIServerFlags contains service-cluster-ip-range, if not, set default value to service-cluster-ip-range + containServiceClusterIPRange := false + for _, flag := range te.KubeAPIServerFlags { + if strings.Contains(flag, "service-cluster-ip-range") { + containServiceClusterIPRange = true + break + } + } + if !containServiceClusterIPRange { + te.KubeAPIServerFlags = append(te.KubeAPIServerFlags, "--service-cluster-ip-range=10.0.0.0/24") + } return te.KubeAPIServerFlags } // Start starts a local Kubernetes server and updates te.ApiserverPort with the port it is listening on func (te *Environment) Start() (*rest.Config, error) { - if !te.UseExistingCluster { - // Check USE_EXISTING_CLUSTER env then - te.UseExistingCluster = strings.ToLower(os.Getenv(envUseExistingCluster)) == "true" - } - if te.UseExistingCluster { + if te.useExistingCluster() { log.V(1).Info("using existing cluster") if te.Config == nil { // we want to allow people to pass in their own config, so @@ -207,10 +238,9 @@ func (te *Environment) Start() (*rest.Config, error) { } log.V(1).Info("installing CRDs") - _, err := InstallCRDs(te.Config, CRDInstallOptions{ - Paths: te.CRDDirectoryPaths, - CRDs: te.CRDs, - }) + te.CRDInstallOptions.CRDs = mergeCRDs(te.CRDInstallOptions.CRDs, te.CRDs) + te.CRDInstallOptions.Paths = mergePaths(te.CRDInstallOptions.Paths, te.CRDDirectoryPaths) + _, err := InstallCRDs(te.Config, te.CRDInstallOptions) return te.Config, err } @@ -256,3 +286,10 @@ func (te *Environment) defaultTimeouts() error { } return nil } + +func (te *Environment) useExistingCluster() bool { + if te.UseExistingCluster == nil { + return strings.ToLower(os.Getenv(envUseExistingCluster)) == "true" + } + return *te.UseExistingCluster +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go index 840cf5b89..c9b93f8b9 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go @@ -33,7 +33,7 @@ import ( // * Use EnqueueRequestForOwner to reconcile the owner of the object the event is for // - do this for events for the types the Controller creates. (e.g. ReplicaSets created by a Deployment Controller) // -// * Use EnqueueRequestFromMapFunc to transform an event for an object to a reconcile of an object +// * Use EnqueueRequestsFromMapFunc to transform an event for an object to a reconcile of an object // of a different type - do this for events for types the Controller may be interested in, but doesn't create. // (e.g. If Foo responds to cluster size events, map Node events to Foo objects.) // diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/doc.go new file mode 100644 index 000000000..9827eeafe --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/doc.go @@ -0,0 +1,32 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package healthz contains helpers from supporting liveness and readiness endpoints. +// (often referred to as healthz and readyz, respectively). +// +// This package draws heavily from the apiserver's healthz package +// ( https://github.com/kubernetes/apiserver/tree/master/pkg/server/healthz ) +// but has some changes to bring it in line with controller-runtime's style. +// +// The main entrypoint is the Handler -- this serves both aggregated health status +// and individual health check endpoints. +package healthz + +import ( + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("healthz") diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go b/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go new file mode 100644 index 000000000..e7f4b1c27 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go @@ -0,0 +1,207 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package healthz + +import ( + "fmt" + "net/http" + "path" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// Handler is an http.Handler that aggregates the results of the given +// checkers to the root path, and supports calling individual checkers on +// subpaths of the name of the checker. +// +// Adding checks on the fly is *not* threadsafe -- use a wrapper. +type Handler struct { + Checks map[string]Checker +} + +// checkStatus holds the output of a particular check +type checkStatus struct { + name string + healthy bool + excluded bool +} + +func (h *Handler) serveAggregated(resp http.ResponseWriter, req *http.Request) { + failed := false + excluded := getExcludedChecks(req) + + parts := make([]checkStatus, 0, len(h.Checks)) + + // calculate the results... + for checkName, check := range h.Checks { + // no-op the check if we've specified we want to exclude the check + if excluded.Has(checkName) { + excluded.Delete(checkName) + parts = append(parts, checkStatus{name: checkName, healthy: true, excluded: true}) + continue + } + if err := check(req); err != nil { + log.V(1).Info("healthz check failed", "checker", checkName, "error", err) + parts = append(parts, checkStatus{name: checkName, healthy: false}) + failed = true + } else { + parts = append(parts, checkStatus{name: checkName, healthy: true}) + } + } + + // ...default a check if none is present... + if len(h.Checks) == 0 { + parts = append(parts, checkStatus{name: "ping", healthy: true}) + } + + for _, c := range excluded.List() { + log.V(1).Info("cannot exclude health check, no matches for it", "checker", c) + } + + // ...sort to be consistent... + sort.Slice(parts, func(i, j int) bool { return parts[i].name < parts[j].name }) + + // ...and write out the result + // TODO(directxman12): this should also accept a request for JSON content (via a accept header) + _, forceVerbose := req.URL.Query()["verbose"] + writeStatusesAsText(resp, parts, excluded, failed, forceVerbose) +} + +// writeStatusAsText writes out the given check statuses in some semi-arbitrary +// bespoke text format that we copied from Kubernetes. unknownExcludes lists +// any checks that the user requested to have excluded, but weren't actually +// known checks. writeStatusAsText is always verbose on failure, and can be +// forced to be verbose on success using the given argument. +func writeStatusesAsText(resp http.ResponseWriter, parts []checkStatus, unknownExcludes sets.String, failed, forceVerbose bool) { + resp.Header().Set("Content-Type", "text/plain; charset=utf-8") + resp.Header().Set("X-Content-Type-Options", "nosniff") + + // always write status code first + if failed { + resp.WriteHeader(http.StatusInternalServerError) + } else { + resp.WriteHeader(http.StatusOK) + } + + // shortcut for easy non-verbose success + if !failed && !forceVerbose { + fmt.Fprint(resp, "ok") + return + } + + // we're always verbose on failure, so from this point on we're guaranteed to be verbose + + for _, checkOut := range parts { + switch { + case checkOut.excluded: + fmt.Fprintf(resp, "[+]%s excluded: ok\n", checkOut.name) + case checkOut.healthy: + fmt.Fprintf(resp, "[+]%s ok\n", checkOut.name) + default: + // don't include the error since this endpoint is public. If someone wants more detail + // they should have explicit permission to the detailed checks. + fmt.Fprintf(resp, "[-]%s failed: reason withheld\n", checkOut.name) + } + } + + if unknownExcludes.Len() > 0 { + fmt.Fprintf(resp, "warn: some health checks cannot be excluded: no matches for %s\n", formatQuoted(unknownExcludes.List()...)) + } + + if failed { + log.Info("healthz check failed", "statuses", parts) + fmt.Fprintf(resp, "healthz check failed\n") + } else { + fmt.Fprint(resp, "healthz check passed\n") + } +} + +func (h *Handler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + // clean up the request (duplicating the internal logic of http.ServeMux a bit) + // clean up the path a bit + reqPath := req.URL.Path + if reqPath == "" || reqPath[0] != '/' { + reqPath = "/" + reqPath + } + // path.Clean removes the trailing slash except for root for us + // (which is fine, since we're only serving one layer of sub-paths) + reqPath = path.Clean(reqPath) + + // either serve the root endpoint... + if reqPath == "/" { + h.serveAggregated(resp, req) + return + } + + // ...the default check (if nothing else is present)... + if len(h.Checks) == 0 && reqPath[1:] == "ping" { + CheckHandler{Checker: Ping}.ServeHTTP(resp, req) + return + } + + // ...or an individual checker + checkName := reqPath[1:] // ignore the leading slash + checker, known := h.Checks[checkName] + if !known { + http.NotFoundHandler().ServeHTTP(resp, req) + return + } + + CheckHandler{Checker: checker}.ServeHTTP(resp, req) +} + +// CheckHandler is an http.Handler that serves a health check endpoint at the root path, +// based on its checker. +type CheckHandler struct { + Checker +} + +func (h CheckHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + err := h.Checker(req) + if err != nil { + http.Error(resp, fmt.Sprintf("internal server error: %v", err), http.StatusInternalServerError) + } else { + fmt.Fprint(resp, "ok") + } +} + +// Checker knows how to perform a health check. +type Checker func(req *http.Request) error + +// Ping returns true automatically when checked +var Ping Checker = func(_ *http.Request) error { return nil } + +// getExcludedChecks extracts the health check names to be excluded from the query param +func getExcludedChecks(r *http.Request) sets.String { + checks, found := r.URL.Query()["exclude"] + if found { + return sets.NewString(checks...) + } + return sets.NewString() +} + +// formatQuoted returns a formatted string of the health check names, +// preserving the order passed in. +func formatQuoted(names ...string) string { + quoted := make([]string, 0, len(names)) + for _, name := range names { + quoted = append(quoted, fmt.Sprintf("%q", name)) + } + return strings.Join(quoted, ",") +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go index 7f40a32a5..0938fd4ec 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go @@ -68,6 +68,11 @@ type Controller struct { // specified, or the ~/.kube/Config. Config *rest.Config + // MakeQueue constructs the queue for this controller once the controller is ready to start. + // This exists because the standard Kubernetes workqueues start themselves immediately, which + // leads to goroutine leaks if something calls controller.New repeatedly. + MakeQueue func() workqueue.RateLimitingInterface + // Queue is an listeningQueue that listens for events from Informers and adds object keys to // the Queue for processing Queue workqueue.RateLimitingInterface @@ -93,6 +98,16 @@ type Controller struct { Recorder record.EventRecorder // TODO(community): Consider initializing a logger with the Controller Name as the tag + + // watches maintains a list of sources, handlers, and predicates to start when the controller is started. + watches []watchDescription +} + +// watchDescription contains all the information necessary to start a watch. +type watchDescription struct { + src source.Source + handler handler.EventHandler + predicates []predicate.Predicate } // Reconcile implements reconcile.Reconciler @@ -118,47 +133,72 @@ func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prc } } - log.Info("Starting EventSource", "controller", c.Name, "source", src) - return src.Start(evthdler, c.Queue, prct...) + c.watches = append(c.watches, watchDescription{src: src, handler: evthdler, predicates: prct}) + if c.Started { + log.Info("Starting EventSource", "controller", c.Name, "source", src) + return src.Start(evthdler, c.Queue, prct...) + } + + return nil } // Start implements controller.Controller func (c *Controller) Start(stop <-chan struct{}) error { + // use an IIFE to get proper lock handling + // but lock outside to get proper handling of the queue shutdown c.mu.Lock() - // TODO(pwittrock): Reconsider HandleCrash - defer utilruntime.HandleCrash() - defer c.Queue.ShutDown() + c.Queue = c.MakeQueue() + defer c.Queue.ShutDown() // needs to be outside the iife so that we shutdown after the stop channel is closed - // Start the SharedIndexInformer factories to begin populating the SharedIndexInformer caches - log.Info("Starting Controller", "controller", c.Name) + err := func() error { + defer c.mu.Unlock() - // Wait for the caches to be synced before starting workers - if c.WaitForCacheSync == nil { - c.WaitForCacheSync = c.Cache.WaitForCacheSync - } - if ok := c.WaitForCacheSync(stop); !ok { - // This code is unreachable right now since WaitForCacheSync will never return an error - // Leaving it here because that could happen in the future - err := fmt.Errorf("failed to wait for %s caches to sync", c.Name) - log.Error(err, "Could not wait for Cache to sync", "controller", c.Name) - c.mu.Unlock() - return err - } + // TODO(pwittrock): Reconsider HandleCrash + defer utilruntime.HandleCrash() - if c.JitterPeriod == 0 { - c.JitterPeriod = 1 * time.Second - } + // NB(directxman12): launch the sources *before* trying to wait for the + // caches to sync so that they have a chance to register their intendeded + // caches. + for _, watch := range c.watches { + log.Info("Starting EventSource", "controller", c.Name, "source", watch.src) + if err := watch.src.Start(watch.handler, c.Queue, watch.predicates...); err != nil { + return err + } + } - // Launch workers to process resources - log.Info("Starting workers", "controller", c.Name, "worker count", c.MaxConcurrentReconciles) - for i := 0; i < c.MaxConcurrentReconciles; i++ { - // Process work items - go wait.Until(c.worker, c.JitterPeriod, stop) - } + // Start the SharedIndexInformer factories to begin populating the SharedIndexInformer caches + log.Info("Starting Controller", "controller", c.Name) + + // Wait for the caches to be synced before starting workers + if c.WaitForCacheSync == nil { + c.WaitForCacheSync = c.Cache.WaitForCacheSync + } + if ok := c.WaitForCacheSync(stop); !ok { + // This code is unreachable right now since WaitForCacheSync will never return an error + // Leaving it here because that could happen in the future + err := fmt.Errorf("failed to wait for %s caches to sync", c.Name) + log.Error(err, "Could not wait for Cache to sync", "controller", c.Name) + return err + } + + if c.JitterPeriod == 0 { + c.JitterPeriod = 1 * time.Second + } + + // Launch workers to process resources + log.Info("Starting workers", "controller", c.Name, "worker count", c.MaxConcurrentReconciles) + for i := 0; i < c.MaxConcurrentReconciles; i++ { + // Process work items + go wait.Until(c.worker, c.JitterPeriod, stop) + } - c.Started = true - c.mu.Unlock() + c.Started = true + return nil + }() + if err != nil { + return err + } <-stop log.Info("Stopping workers", "controller", c.Name) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/filter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/filter.go deleted file mode 100644 index 8513846e2..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/filter.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package objectutil - -import ( - apimeta "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" -) - -// FilterWithLabels returns a copy of the items in objs matching labelSel -func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) { - outItems := make([]runtime.Object, 0, len(objs)) - for _, obj := range objs { - meta, err := apimeta.Accessor(obj) - if err != nil { - return nil, err - } - if labelSel != nil { - lbls := labels.Set(meta.GetLabels()) - if !labelSel.Matches(lbls) { - continue - } - } - outItems = append(outItems, obj.DeepCopyObject()) - } - return outItems, nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go index 18e150594..af230ad50 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go @@ -39,14 +39,13 @@ type provider struct { } // NewProvider create a new Provider instance. -func NewProvider(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger) (recorder.Provider, error) { +func NewProvider(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, broadcaster record.EventBroadcaster) (recorder.Provider, error) { clientSet, err := kubernetes.NewForConfig(config) if err != nil { return nil, fmt.Errorf("failed to init clientSet: %v", err) } - p := &provider{scheme: scheme, logger: logger} - p.eventBroadcaster = record.NewBroadcaster() + p := &provider{scheme: scheme, logger: logger, eventBroadcaster: broadcaster} p.eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: clientSet.CoreV1().Events("")}) p.eventBroadcaster.StartEventWatcher( func(e *corev1.Event) { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go b/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go index 9e13fd796..b3003ea5e 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go @@ -101,7 +101,7 @@ func getInClusterNamespace() (string, error) { return "", fmt.Errorf("error checking namespace file: %v", err) } - // Load the namespace file and return itss content + // Load the namespace file and return its content namespace, err := ioutil.ReadFile(inClusterNamespacePath) if err != nil { return "", fmt.Errorf("error reading namespace file: %v", err) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/zap.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/zap.go index 5812e85bf..36ef186a8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/zap.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/zap.go @@ -29,10 +29,21 @@ import ( "go.uber.org/zap/zapcore" ) +// New returns a brand new Logger configured with Opts. It +// uses KubeAwareEncoder which adds Type information and +// Namespace/Name to the log. +func New(opts ...Opts) logr.Logger { + return zapr.NewLogger(NewRaw(opts...)) +} + // Logger is a Logger implementation. // If development is true, a Zap development config will be used // (stacktraces on warnings, no sampling), otherwise a Zap production // config will be used (stacktraces on errors, sampling). +// +// Deprecated: use New() and the functional opts pattern instead: +// +// New(UseDevMode(development)) func Logger(development bool) logr.Logger { return LoggerTo(os.Stderr, development) } @@ -40,34 +51,159 @@ func Logger(development bool) logr.Logger { // LoggerTo returns a new Logger implementation using Zap which logs // to the given destination, instead of stderr. It otherwise behaves like // ZapLogger. +// +// Deprecated: use New() and the functional opts pattern instead: +// +// New(UseDevMode(development), WriteTo(writer)) func LoggerTo(destWriter io.Writer, development bool) logr.Logger { return zapr.NewLogger(RawLoggerTo(destWriter, development)) } // RawLoggerTo returns a new zap.Logger configured with KubeAwareEncoder // which logs to a given destination +// +// Deprecated: use NewRaw() and the functional opts pattern instead: +// +// NewRaw(UseDevMode(development), WriteTo(destWriter), RawZapOpts(opts...)) func RawLoggerTo(destWriter io.Writer, development bool, opts ...zap.Option) *zap.Logger { - // this basically mimics NewConfig, but with a custom sink - sink := zapcore.AddSync(destWriter) - - var enc zapcore.Encoder - var lvl zap.AtomicLevel - if development { - encCfg := zap.NewDevelopmentEncoderConfig() - enc = zapcore.NewConsoleEncoder(encCfg) - lvl = zap.NewAtomicLevelAt(zap.DebugLevel) - opts = append(opts, zap.Development(), zap.AddStacktrace(zap.ErrorLevel)) + return NewRaw(UseDevMode(development), WriteTo(destWriter), RawZapOpts(opts...)) +} + +// Opts allows to manipulate Options +type Opts func(*Options) + +// UseDevMode sets the logger to use (or not use) development mode (more +// human-readable output, extra stack traces and logging information, etc). +// See Options.Development +func UseDevMode(enabled bool) Opts { + return func(o *Options) { + o.Development = enabled + } +} + +// WriteTo configures the logger to write to the given io.Writer, instead of standard error. +// See Options.DestWritter +func WriteTo(out io.Writer) Opts { + return func(o *Options) { + o.DestWritter = out + } +} + +// Encoder configures how the logger will encode the output e.g JSON or console. +// See Options.Encoder +func Encoder(encoder zapcore.Encoder) func(o *Options) { + return func(o *Options) { + o.Encoder = encoder + } +} + +// Level sets the the minimum enabled logging level e.g Debug, Info +// See Options.Level +func Level(level *zap.AtomicLevel) func(o *Options) { + return func(o *Options) { + o.Level = level + } +} + +// StacktraceLevel configures the logger to record a stack trace for all messages at +// or above a given level. +// See Options.StacktraceLevel +func StacktraceLevel(stacktraceLevel *zap.AtomicLevel) func(o *Options) { + return func(o *Options) { + o.StacktraceLevel = stacktraceLevel + } +} + +// RawZapOpts allows appending arbitrary zap.Options to configure the underlying zap logger. +// See Options.ZapOpts +func RawZapOpts(zapOpts ...zap.Option) func(o *Options) { + return func(o *Options) { + o.ZapOpts = append(o.ZapOpts, zapOpts...) + } +} + +// Options contains all possible settings +type Options struct { + // Development configures the logger to use a Zap development config + // (stacktraces on warnings, no sampling), otherwise a Zap production + // config will be used (stacktraces on errors, sampling). + Development bool + // Encoder configures how Zap will encode the output. Defaults to + // console when Development is true and JSON otherwise + Encoder zapcore.Encoder + // DestWritter controls the destination of the log output. Defaults to + // os.Stderr. + DestWritter io.Writer + // Level configures the verbosity of the logging. Defaults to Debug when + // Development is true and Info otherwise + Level *zap.AtomicLevel + // StacktraceLevel is the level at and above which stacktraces will + // be recorded for all messages. Defaults to Warn when Development + // is true and Error otherwise + StacktraceLevel *zap.AtomicLevel + // ZapOpts allows passing arbitrary zap.Options to configure on the + // underlying Zap logger. + ZapOpts []zap.Option +} + +// addDefaults adds defaults to the Options +func (o *Options) addDefaults() { + if o.DestWritter == nil { + o.DestWritter = os.Stderr + } + + if o.Development { + if o.Encoder == nil { + encCfg := zap.NewDevelopmentEncoderConfig() + o.Encoder = zapcore.NewConsoleEncoder(encCfg) + } + if o.Level == nil { + lvl := zap.NewAtomicLevelAt(zap.DebugLevel) + o.Level = &lvl + } + if o.StacktraceLevel == nil { + lvl := zap.NewAtomicLevelAt(zap.WarnLevel) + o.StacktraceLevel = &lvl + } + o.ZapOpts = append(o.ZapOpts, zap.Development()) + } else { - encCfg := zap.NewProductionEncoderConfig() - enc = zapcore.NewJSONEncoder(encCfg) - lvl = zap.NewAtomicLevelAt(zap.InfoLevel) - opts = append(opts, zap.AddStacktrace(zap.WarnLevel), + if o.Encoder == nil { + encCfg := zap.NewProductionEncoderConfig() + o.Encoder = zapcore.NewJSONEncoder(encCfg) + } + if o.Level == nil { + lvl := zap.NewAtomicLevelAt(zap.InfoLevel) + o.Level = &lvl + } + if o.StacktraceLevel == nil { + lvl := zap.NewAtomicLevelAt(zap.ErrorLevel) + o.StacktraceLevel = &lvl + } + o.ZapOpts = append(o.ZapOpts, zap.WrapCore(func(core zapcore.Core) zapcore.Core { return zapcore.NewSampler(core, time.Second, 100, 100) })) } - opts = append(opts, zap.AddCallerSkip(1), zap.ErrorOutput(sink)) - log := zap.New(zapcore.NewCore(&KubeAwareEncoder{Encoder: enc, Verbose: development}, sink, lvl)) - log = log.WithOptions(opts...) + + o.ZapOpts = append(o.ZapOpts, zap.AddStacktrace(o.StacktraceLevel)) +} + +// NewRaw returns a new zap.Logger configured with the passed Opts +// or their defaults. It uses KubeAwareEncoder which adds Type +// information and Namespace/Name to the log. +func NewRaw(opts ...Opts) *zap.Logger { + o := &Options{} + for _, opt := range opts { + opt(o) + } + o.addDefaults() + + // this basically mimics NewConfig, but with a custom sink + sink := zapcore.AddSync(o.DestWritter) + + o.ZapOpts = append(o.ZapOpts, zap.AddCallerSkip(1), zap.ErrorOutput(sink)) + log := zap.New(zapcore.NewCore(&KubeAwareEncoder{Encoder: o.Encoder, Verbose: o.Development}, sink, *o.Level)) + log = log.WithOptions(o.ZapOpts...) return log } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go index 45803ef32..5ece5ef7e 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go @@ -31,14 +31,15 @@ import ( "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/healthz" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" "sigs.k8s.io/controller-runtime/pkg/metrics" "sigs.k8s.io/controller-runtime/pkg/recorder" "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook" - "sigs.k8s.io/controller-runtime/pkg/webhook/conversion" ) const ( @@ -46,6 +47,9 @@ const ( defaultLeaseDuration = 15 * time.Second defaultRenewDeadline = 10 * time.Second defaultRetryPeriod = 2 * time.Second + + defaultReadinessEndpoint = "/readyz" + defaultLivenessEndpoint = "/healthz" ) var log = logf.RuntimeLog.WithName("manager") @@ -91,9 +95,30 @@ type controllerManager struct { // metricsListener is used to serve prometheus metrics metricsListener net.Listener - mu sync.Mutex - started bool - errChan chan error + // healthProbeListener is used to serve liveness probe + healthProbeListener net.Listener + + // Readiness probe endpoint name + readinessEndpointName string + + // Liveness probe endpoint name + livenessEndpointName string + + // Readyz probe handler + readyzHandler *healthz.Handler + + // Healthz probe handler + healthzHandler *healthz.Handler + + mu sync.Mutex + started bool + startedLeader bool + healthzStarted bool + + // NB(directxman12): we don't just use an error channel here to avoid the situation where the + // error channel is too small and we end up blocking some goroutines waiting to report their errors. + // errSignal lets us track when we should stop because an error occurred + errSignal *errSignaler // internalStop is the stop channel *actually* used by everything involved // with the manager as a stop channel, so that we can pass a stop channel @@ -111,6 +136,10 @@ type controllerManager struct { port int // host is the hostname that the webhook server binds to. host string + // CertDir is the directory that contains the server key and certificate. + // if not set, webhook server would look up the server key and certificate in + // {TempDir}/k8s-webhook-server/serving-certs + certDir string webhookServer *webhook.Server @@ -125,6 +154,51 @@ type controllerManager struct { retryPeriod time.Duration } +type errSignaler struct { + // errSignal indicates that an error occurred, when closed. It shouldn't + // be written to. + errSignal chan struct{} + + // err is the received error + err error + + mu sync.Mutex +} + +func (r *errSignaler) SignalError(err error) { + r.mu.Lock() + defer r.mu.Unlock() + + if err == nil { + // non-error, ignore + log.Error(nil, "SignalError called without an (with a nil) error, which should never happen, ignoring") + return + } + + if r.err != nil { + // we already have an error, don't try again + return + } + + // save the error and report it + r.err = err + close(r.errSignal) +} + +func (r *errSignaler) Error() error { + r.mu.Lock() + defer r.mu.Unlock() + + return r.err +} + +func (r *errSignaler) GotError() chan struct{} { + r.mu.Lock() + defer r.mu.Unlock() + + return r.errSignal +} + // Add sets dependencies on i, and adds it to the list of Runnables to start. func (cm *controllerManager) Add(r Runnable) error { cm.mu.Lock() @@ -135,17 +209,23 @@ func (cm *controllerManager) Add(r Runnable) error { return err } + var shouldStart bool + // Add the runnable to the leader election or the non-leaderelection list if leRunnable, ok := r.(LeaderElectionRunnable); ok && !leRunnable.NeedLeaderElection() { + shouldStart = cm.started cm.nonLeaderElectionRunnables = append(cm.nonLeaderElectionRunnables, r) } else { + shouldStart = cm.startedLeader cm.leaderElectionRunnables = append(cm.leaderElectionRunnables, r) } - if cm.started { + if shouldStart { // If already started, start the controller go func() { - cm.errChan <- r.Start(cm.internalStop) + if err := r.Start(cm.internalStop); err != nil { + cm.errSignal.SignalError(err) + } }() } @@ -180,6 +260,40 @@ func (cm *controllerManager) SetFields(i interface{}) error { return nil } +// AddHealthzCheck allows you to add Healthz checker +func (cm *controllerManager) AddHealthzCheck(name string, check healthz.Checker) error { + cm.mu.Lock() + defer cm.mu.Unlock() + + if cm.healthzStarted { + return fmt.Errorf("unable to add new checker because healthz endpoint has already been created") + } + + if cm.healthzHandler == nil { + cm.healthzHandler = &healthz.Handler{Checks: map[string]healthz.Checker{}} + } + + cm.healthzHandler.Checks[name] = check + return nil +} + +// AddReadyzCheck allows you to add Readyz checker +func (cm *controllerManager) AddReadyzCheck(name string, check healthz.Checker) error { + cm.mu.Lock() + defer cm.mu.Unlock() + + if cm.healthzStarted { + return fmt.Errorf("unable to add new checker because readyz endpoint has already been created") + } + + if cm.readyzHandler == nil { + cm.readyzHandler = &healthz.Handler{Checks: map[string]healthz.Checker{}} + } + + cm.readyzHandler.Checks[name] = check + return nil +} + func (cm *controllerManager) GetConfig() *rest.Config { return cm.config } @@ -215,10 +329,10 @@ func (cm *controllerManager) GetAPIReader() client.Reader { func (cm *controllerManager) GetWebhookServer() *webhook.Server { if cm.webhookServer == nil { cm.webhookServer = &webhook.Server{ - Port: cm.port, - Host: cm.host, + Port: cm.port, + Host: cm.host, + CertDir: cm.certDir, } - cm.webhookServer.Register("/convert", &conversion.Webhook{}) if err := cm.Add(cm.webhookServer); err != nil { panic("unable to add webhookServer to the controller manager") } @@ -227,19 +341,21 @@ func (cm *controllerManager) GetWebhookServer() *webhook.Server { } func (cm *controllerManager) serveMetrics(stop <-chan struct{}) { + var metricsPath = "/metrics" handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{ ErrorHandling: promhttp.HTTPErrorOnError, }) // TODO(JoelSpeed): Use existing Kubernetes machinery for serving metrics mux := http.NewServeMux() - mux.Handle("/metrics", handler) + mux.Handle(metricsPath, handler) server := http.Server{ Handler: mux, } // Run the server go func() { + log.Info("starting metrics server", "path", metricsPath) if err := server.Serve(cm.metricsListener); err != nil && err != http.ErrServerClosed { - cm.errChan <- err + cm.errSignal.SignalError(err) } }() @@ -247,7 +363,39 @@ func (cm *controllerManager) serveMetrics(stop <-chan struct{}) { select { case <-stop: if err := server.Shutdown(context.Background()); err != nil { - cm.errChan <- err + cm.errSignal.SignalError(err) + } + } +} + +func (cm *controllerManager) serveHealthProbes(stop <-chan struct{}) { + cm.mu.Lock() + mux := http.NewServeMux() + + if cm.readyzHandler != nil { + mux.Handle(cm.readinessEndpointName, http.StripPrefix(cm.readinessEndpointName, cm.readyzHandler)) + } + if cm.healthzHandler != nil { + mux.Handle(cm.livenessEndpointName, http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler)) + } + + server := http.Server{ + Handler: mux, + } + // Run server + go func() { + if err := server.Serve(cm.healthProbeListener); err != nil && err != http.ErrServerClosed { + cm.errSignal.SignalError(err) + } + }() + cm.healthzStarted = true + cm.mu.Unlock() + + // Shutdown the server when stop is closed + select { + case <-stop: + if err := server.Shutdown(context.Background()); err != nil { + cm.errSignal.SignalError(err) } } } @@ -256,6 +404,9 @@ func (cm *controllerManager) Start(stop <-chan struct{}) error { // join the passed-in stop channel as an upstream feeding into cm.internalStopper defer close(cm.internalStopper) + // initialize this here so that we reset the signal channel state on every start + cm.errSignal = &errSignaler{errSignal: make(chan struct{})} + // Metrics should be served whether the controller is leader or not. // (If we don't serve metrics for non-leaders, prometheus will still scrape // the pod but will get a connection refused) @@ -263,6 +414,11 @@ func (cm *controllerManager) Start(stop <-chan struct{}) error { go cm.serveMetrics(cm.internalStop) } + // Serve health probes + if cm.healthProbeListener != nil { + go cm.serveHealthProbes(cm.internalStop) + } + go cm.startNonLeaderElectionRunnables() if cm.resourceLock != nil { @@ -278,9 +434,9 @@ func (cm *controllerManager) Start(stop <-chan struct{}) error { case <-stop: // We are done return nil - case err := <-cm.errChan: + case <-cm.errSignal.GotError(): // Error starting a controller - return err + return cm.errSignal.Error() } } @@ -296,7 +452,12 @@ func (cm *controllerManager) startNonLeaderElectionRunnables() { // Write any Start errors to a channel so we can return them ctrl := c go func() { - cm.errChan <- ctrl.Start(cm.internalStop) + if err := ctrl.Start(cm.internalStop); err != nil { + cm.errSignal.SignalError(err) + } + // we use %T here because we don't have a good stand-in for "name", + // and the full runnable might not serialize (mutexes, etc) + log.V(1).Info("non-leader-election runnable finished", "runnable type", fmt.Sprintf("%T", ctrl)) }() } } @@ -313,9 +474,16 @@ func (cm *controllerManager) startLeaderElectionRunnables() { // Write any Start errors to a channel so we can return them ctrl := c go func() { - cm.errChan <- ctrl.Start(cm.internalStop) + if err := ctrl.Start(cm.internalStop); err != nil { + cm.errSignal.SignalError(err) + } + // we use %T here because we don't have a good stand-in for "name", + // and the full runnable might not serialize (mutexes, etc) + log.V(1).Info("leader-election runnable finished", "runnable type", fmt.Sprintf("%T", ctrl)) }() } + + cm.startedLeader = true } func (cm *controllerManager) waitForCache() { @@ -329,7 +497,7 @@ func (cm *controllerManager) waitForCache() { } go func() { if err := cm.startCache(cm.internalStop); err != nil { - cm.errChan <- err + cm.errSignal.SignalError(err) } }() @@ -353,7 +521,7 @@ func (cm *controllerManager) startLeaderElection() (err error) { // Most implementations of leader election log.Fatal() here. // Since Start is wrapped in log.Fatal when called, we can just return // an error here which will cause the program to exit. - cm.errChan <- fmt.Errorf("leader election lost") + cm.errSignal.SignalError(fmt.Errorf("leader election lost")) }, }, }) @@ -361,7 +529,16 @@ func (cm *controllerManager) startLeaderElection() (err error) { return err } + ctx, cancel := context.WithCancel(context.Background()) + go func() { + select { + case <-cm.internalStop: + cancel() + case <-ctx.Done(): + } + }() + // Start the leader elector process - go l.Run(context.Background()) + go l.Run(ctx) return nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go index 670df79d0..d30c4f0d3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go @@ -32,6 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/healthz" internalrecorder "sigs.k8s.io/controller-runtime/pkg/internal/recorder" "sigs.k8s.io/controller-runtime/pkg/leaderelection" "sigs.k8s.io/controller-runtime/pkg/metrics" @@ -53,6 +54,12 @@ type Manager interface { // interface - e.g. inject.Client. SetFields(interface{}) error + // AddHealthzCheck allows you to add Healthz checker + AddHealthzCheck(name string, check healthz.Checker) error + + // AddReadyzCheck allows you to add Readyz checker + AddReadyzCheck(name string, check healthz.Checker) error + // Start starts all registered Controllers and blocks until the Stop channel is closed. // Returns an error if there is an error starting any controller. Start(<-chan struct{}) error @@ -63,7 +70,10 @@ type Manager interface { // GetScheme returns an initialized Scheme GetScheme() *runtime.Scheme - // GetClient returns a client configured with the Config + // GetClient returns a client configured with the Config. This client may + // not be a fully "direct" client -- it may read from a cache, for + // instance. See Options.NewClient for more information on how the default + // implementation works. GetClient() client.Client // GetFieldIndexer returns a client.FieldIndexer configured with the client @@ -135,9 +145,20 @@ type Options struct { Namespace string // MetricsBindAddress is the TCP address that the controller should bind to - // for serving prometheus metrics + // for serving prometheus metrics. + // It can be set to "0" to disable the metrics serving. MetricsBindAddress string + // HealthProbeBindAddress is the TCP address that the controller should bind to + // for serving health probes + HealthProbeBindAddress string + + // Readiness probe endpoint name, defaults to "readyz" + ReadinessEndpointName string + + // Liveness probe endpoint name, defaults to "healthz" + LivenessEndpointName string + // Port is the port that the webhook server serves at. // It is used to set webhook.Server.Port. Port int @@ -145,6 +166,10 @@ type Options struct { // It is used to set webhook.Server.Host. Host string + // CertDir is the directory that contains the server key and certificate. + // if not set, webhook server would look up the server key and certificate in + // {TempDir}/k8s-webhook-server/serving-certs + CertDir string // Functions to all for a user to customize the values that will be injected. // NewCache is the function that will create the cache to be used @@ -156,10 +181,15 @@ type Options struct { // use the cache for reads and the client for writes. NewClient NewClientFunc + // EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API + // Use this to customize the event correlator and spam filter + EventBroadcaster record.EventBroadcaster + // Dependency injection for testing - newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger) (recorder.Provider, error) - newResourceLock func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) - newMetricsListener func(addr string) (net.Listener, error) + newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, broadcaster record.EventBroadcaster) (recorder.Provider, error) + newResourceLock func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) + newMetricsListener func(addr string) (net.Listener, error) + newHealthProbeListener func(addr string) (net.Listener, error) } // NewClientFunc allows a user to define how to create a client @@ -227,7 +257,7 @@ func New(config *rest.Config, options Options) (Manager, error) { // Create the recorder provider to inject event recorders for the components. // TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific // to the particular controller that it's being injected into, rather than a generic one like is here. - recorderProvider, err := options.newRecorderProvider(config, options.Scheme, log.WithName("events")) + recorderProvider, err := options.newRecorderProvider(config, options.Scheme, log.WithName("events"), options.EventBroadcaster) if err != nil { return nil, err } @@ -242,34 +272,44 @@ func New(config *rest.Config, options Options) (Manager, error) { return nil, err } - // Create the mertics listener. This will throw an error if the metrics bind + // Create the metrics listener. This will throw an error if the metrics bind // address is invalid or already in use. metricsListener, err := options.newMetricsListener(options.MetricsBindAddress) if err != nil { return nil, err } + // Create health probes listener. This will throw an error if the bind + // address is invalid or already in use. + healthProbeListener, err := options.newHealthProbeListener(options.HealthProbeBindAddress) + if err != nil { + return nil, err + } + stop := make(chan struct{}) return &controllerManager{ - config: config, - scheme: options.Scheme, - errChan: make(chan error), - cache: cache, - fieldIndexes: cache, - client: writeObj, - apiReader: apiReader, - recorderProvider: recorderProvider, - resourceLock: resourceLock, - mapper: mapper, - metricsListener: metricsListener, - internalStop: stop, - internalStopper: stop, - port: options.Port, - host: options.Host, - leaseDuration: *options.LeaseDuration, - renewDeadline: *options.RenewDeadline, - retryPeriod: *options.RetryPeriod, + config: config, + scheme: options.Scheme, + cache: cache, + fieldIndexes: cache, + client: writeObj, + apiReader: apiReader, + recorderProvider: recorderProvider, + resourceLock: resourceLock, + mapper: mapper, + metricsListener: metricsListener, + internalStop: stop, + internalStopper: stop, + port: options.Port, + host: options.Host, + certDir: options.CertDir, + leaseDuration: *options.LeaseDuration, + renewDeadline: *options.RenewDeadline, + retryPeriod: *options.RetryPeriod, + healthProbeListener: healthProbeListener, + readinessEndpointName: options.ReadinessEndpointName, + livenessEndpointName: options.LivenessEndpointName, }, nil } @@ -291,6 +331,19 @@ func defaultNewClient(cache cache.Cache, config *rest.Config, options client.Opt }, nil } +// defaultHealthProbeListener creates the default health probes listener bound to the given address +func defaultHealthProbeListener(addr string) (net.Listener, error) { + if addr == "" || addr == "0" { + return nil, nil + } + + ln, err := net.Listen("tcp", addr) + if err != nil { + return nil, fmt.Errorf("error listening on %s: %v", addr, err) + } + return ln, nil +} + // setOptionsDefaults set default values for Options fields func setOptionsDefaults(options Options) Options { // Use the Kubernetes client-go scheme if none is specified @@ -299,7 +352,9 @@ func setOptionsDefaults(options Options) Options { } if options.MapperProvider == nil { - options.MapperProvider = apiutil.NewDiscoveryRESTMapper + options.MapperProvider = func(c *rest.Config) (meta.RESTMapper, error) { + return apiutil.NewDynamicRESTMapper(c) + } } // Allow newClient to be mocked @@ -338,5 +393,21 @@ func setOptionsDefaults(options Options) Options { options.RetryPeriod = &retryPeriod } + if options.EventBroadcaster == nil { + options.EventBroadcaster = record.NewBroadcaster() + } + + if options.ReadinessEndpointName == "" { + options.ReadinessEndpointName = defaultReadinessEndpoint + } + + if options.LivenessEndpointName == "" { + options.LivenessEndpointName = defaultLivenessEndpoint + } + + if options.newHealthProbeListener == nil { + options.newHealthProbeListener = defaultHealthProbeListener + } + return options } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go index 3b2c31628..505e5ff89 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go @@ -20,23 +20,9 @@ import ( "net/url" "time" - "k8s.io/apimachinery/pkg/util/runtime" - "github.com/prometheus/client_golang/prometheus" reflectormetrics "k8s.io/client-go/tools/cache" clientmetrics "k8s.io/client-go/tools/metrics" - workqueuemetrics "k8s.io/client-go/util/workqueue" -) - -const ( - workQueueSubsystem = "workqueue" - depthKey = "depth" - addsKey = "adds_total" - queueLatencyKey = "queue_duration_seconds" - workDurationKey = "work_duration_seconds" - unfinishedWorkKey = "unfinished_work_seconds" - longestRunningProcessorKey = "longest_running_processor_seconds" - retriesKey = "retries_total" ) // this file contains setup logic to initialize the myriad of places @@ -117,62 +103,11 @@ var ( Name: "last_resource_version", Help: "Last resource version seen for the reflectors", }, []string{"name"}) - - // workqueue metrics - - depth = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Subsystem: workQueueSubsystem, - Name: depthKey, - Help: "Current depth of workqueue", - }, []string{"name"}) - - adds = prometheus.NewCounterVec(prometheus.CounterOpts{ - Subsystem: workQueueSubsystem, - Name: addsKey, - Help: "Total number of adds handled by workqueue", - }, []string{"name"}) - - latency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Subsystem: workQueueSubsystem, - Name: queueLatencyKey, - Help: "How long in seconds an item stays in workqueue before being requested.", - Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), - }, []string{"name"}) - - workDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Subsystem: workQueueSubsystem, - Name: workDurationKey, - Help: "How long in seconds processing an item from workqueue takes.", - Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), - }, []string{"name"}) - - unfinishedWork = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Subsystem: workQueueSubsystem, - Name: unfinishedWorkKey, - Help: "How many seconds of work has done that " + - "is in progress and hasn't been observed by work_duration. Large " + - "values indicate stuck threads. One can deduce the number of stuck " + - "threads by observing the rate at which this increases.", - }, []string{"name"}) - - longestRunning = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Subsystem: workQueueSubsystem, - Name: longestRunningProcessorKey, - Help: "How many seconds has the longest running " + - "processor for workqueue been running.", - }, []string{"name"}) - - retries = prometheus.NewCounterVec(prometheus.CounterOpts{ - Subsystem: workQueueSubsystem, - Name: retriesKey, - Help: "Total number of retries handled by workqueue", - }, []string{"name"}) ) func init() { registerClientMetrics() registerReflectorMetrics() - registerWorkqueueMetrics() } // registerClientMetrics sets up the client latency metrics from client-go @@ -185,7 +120,7 @@ func registerClientMetrics() { clientmetrics.Register(&latencyAdapter{metric: requestLatency}, &resultAdapter{metric: requestResult}) } -// registerReflectorMetrics sets up reflector (reconile) loop metrics +// registerReflectorMetrics sets up reflector (reconcile) loop metrics func registerReflectorMetrics() { Registry.MustRegister(listsTotal) Registry.MustRegister(listsDuration) @@ -199,20 +134,7 @@ func registerReflectorMetrics() { reflectormetrics.SetReflectorMetricsProvider(reflectorMetricsProvider{}) } -// registerWorkQueueMetrics sets up workqueue (other reconcile) metrics -func registerWorkqueueMetrics() { - Registry.MustRegister(depth) - Registry.MustRegister(adds) - Registry.MustRegister(latency) - Registry.MustRegister(workDuration) - Registry.MustRegister(retries) - Registry.MustRegister(longestRunning) - Registry.MustRegister(unfinishedWork) - - workqueuemetrics.SetProvider(workqueueMetricsProvider{}) -} - -// this section contains adapters, implementations, and other sundry organic, artisinally +// this section contains adapters, implementations, and other sundry organic, artisanally // hand-crafted syntax trees required to convince client-go that it actually wants to let // someone use its metrics. @@ -273,114 +195,3 @@ func (reflectorMetricsProvider) NewItemsInWatchMetric(name string) reflectormetr func (reflectorMetricsProvider) NewLastResourceVersionMetric(name string) reflectormetrics.GaugeMetric { return lastResourceVersion.WithLabelValues(name) } - -// Workqueue metrics (method #3 for client-go metrics), -// copied (more-or-less directly) from k8s.io/kubernetes setup code -// (which isn't anywhere in an easily-importable place). -// TODO(directxman12): stop "cheating" and calling histograms summaries when we pull in the latest deps - -type workqueueMetricsProvider struct{} - -func (workqueueMetricsProvider) NewDepthMetric(name string) workqueuemetrics.GaugeMetric { - return depth.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewAddsMetric(name string) workqueuemetrics.CounterMetric { - return adds.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewLatencyMetric(name string) workqueuemetrics.HistogramMetric { - return latency.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewWorkDurationMetric(name string) workqueuemetrics.HistogramMetric { - return workDuration.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueuemetrics.SettableGaugeMetric { - return unfinishedWork.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueuemetrics.SettableGaugeMetric { - return longestRunning.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewRetriesMetric(name string) workqueuemetrics.CounterMetric { - return retries.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewDeprecatedDepthMetric(name string) workqueuemetrics.GaugeMetric { - depth := prometheus.NewGauge(prometheus.GaugeOpts{ - Subsystem: name, - Name: "depth", - Help: "Current depth of workqueue: " + name, - }) - runtime.HandleError(Registry.Register(depth)) - return depth -} - -func (workqueueMetricsProvider) NewDeprecatedAddsMetric(name string) workqueuemetrics.CounterMetric { - adds := prometheus.NewCounter(prometheus.CounterOpts{ - Subsystem: name, - Name: "adds", - Help: "Total number of adds handled by workqueue: " + name, - }) - runtime.HandleError(Registry.Register(adds)) - return adds -} - -func (workqueueMetricsProvider) NewDeprecatedLatencyMetric(name string) workqueuemetrics.SummaryMetric { - latency := prometheus.NewSummary(prometheus.SummaryOpts{ - Subsystem: name, - Name: "queue_latency", - Help: "How long an item stays in workqueue" + name + " before being requested.", - ConstLabels: prometheus.Labels{"name": name}, - }) - runtime.HandleError(Registry.Register(latency)) - return latency -} - -func (workqueueMetricsProvider) NewDeprecatedWorkDurationMetric(name string) workqueuemetrics.SummaryMetric { - workDuration := prometheus.NewSummary(prometheus.SummaryOpts{ - Subsystem: name, - Name: "work_duration", - Help: "How long processing an item from workqueue" + name + " takes.", - ConstLabels: prometheus.Labels{"name": name}, - }) - runtime.HandleError(Registry.Register(workDuration)) - return workDuration -} - -func (workqueueMetricsProvider) NewDeprecatedUnfinishedWorkSecondsMetric(name string) workqueuemetrics.SettableGaugeMetric { - unfinishedWork := prometheus.NewGauge(prometheus.GaugeOpts{ - Subsystem: name, - Name: "unfinished_work_seconds", - Help: "How many seconds of work " + name + " has done that " + - "is in progress and hasn't been observed by work_duration. Large " + - "values indicate stuck threads. One can deduce the number of stuck " + - "threads by observing the rate at which this increases.", - }) - runtime.HandleError(Registry.Register(unfinishedWork)) - return unfinishedWork -} - -func (workqueueMetricsProvider) NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name string) workqueuemetrics.SettableGaugeMetric { - longestRunning := prometheus.NewGauge(prometheus.GaugeOpts{ - Subsystem: name, - Name: "longest_running_processor_microseconds", - Help: "How many microseconds has the longest running " + - "processor for " + name + " been running.", - }) - runtime.HandleError(Registry.Register(longestRunning)) - return longestRunning -} - -func (workqueueMetricsProvider) NewDeprecatedRetriesMetric(name string) workqueuemetrics.CounterMetric { - retries := prometheus.NewCounter(prometheus.CounterOpts{ - Subsystem: name, - Name: "retries", - Help: "Total number of retries handled by workqueue: " + name, - }) - runtime.HandleError(Registry.Register(retries)) - return retries -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go index e9db3a110..693bff841 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go @@ -21,11 +21,9 @@ import ( "net" ) -// DefaultBindAddress sets the default bind address for the metrics -// listener -// The metrics is off by default. -// TODO: Flip the default by changing DefaultBindAddress back to ":8080" in the v0.2.0. -var DefaultBindAddress = "0" +// DefaultBindAddress sets the default bind address for the metrics listener +// The metrics is on by default. +var DefaultBindAddress = ":8080" // NewListener creates a new TCP listener bound to the given address. func NewListener(addr string) (net.Listener, error) { @@ -39,9 +37,12 @@ func NewListener(addr string) (net.Listener, error) { return nil, nil } + log.Info("metrics server is starting to listen", "addr", addr) ln, err := net.Listen("tcp", addr) if err != nil { - return nil, fmt.Errorf("error listening on %s: %v", addr, err) + er := fmt.Errorf("error listening on %s: %v", addr, err) + log.Error(er, "metrics server failed to listen. You may want to disable the metrics server or use another port if it is due to conflicts") + return nil, er } return ln, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go new file mode 100644 index 000000000..4baf81700 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go @@ -0,0 +1,124 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "k8s.io/client-go/util/workqueue" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("metrics") + +// This file is copied and adapted from k8s.io/kubernetes/pkg/util/workqueue/prometheus +// which registers metrics to the default prometheus Registry. We require very +// similar functionality, but must register metrics to a different Registry. + +func init() { + workqueue.SetProvider(workqueueMetricsProvider{}) +} + +func registerWorkqueueMetric(c prometheus.Collector, name, queue string) { + if err := Registry.Register(c); err != nil { + log.Error(err, "failed to register metric", "name", name, "queue", queue) + } +} + +type workqueueMetricsProvider struct{} + +func (workqueueMetricsProvider) NewDepthMetric(queue string) workqueue.GaugeMetric { + const name = "workqueue_depth" + m := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: name, + Help: "Current depth of workqueue", + ConstLabels: prometheus.Labels{"name": queue}, + }) + registerWorkqueueMetric(m, name, queue) + return m +} + +func (workqueueMetricsProvider) NewAddsMetric(queue string) workqueue.CounterMetric { + const name = "workqueue_adds_total" + m := prometheus.NewCounter(prometheus.CounterOpts{ + Name: name, + Help: "Total number of adds handled by workqueue", + ConstLabels: prometheus.Labels{"name": queue}, + }) + registerWorkqueueMetric(m, name, queue) + return m +} + +func (workqueueMetricsProvider) NewLatencyMetric(queue string) workqueue.HistogramMetric { + const name = "workqueue_queue_duration_seconds" + m := prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: name, + Help: "How long in seconds an item stays in workqueue before being requested.", + ConstLabels: prometheus.Labels{"name": queue}, + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }) + registerWorkqueueMetric(m, name, queue) + return m +} + +func (workqueueMetricsProvider) NewWorkDurationMetric(queue string) workqueue.HistogramMetric { + const name = "workqueue_work_duration_seconds" + m := prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: name, + Help: "How long in seconds processing an item from workqueue takes.", + ConstLabels: prometheus.Labels{"name": queue}, + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }) + registerWorkqueueMetric(m, name, queue) + return m +} + +func (workqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(queue string) workqueue.SettableGaugeMetric { + const name = "workqueue_unfinished_work_seconds" + m := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: name, + Help: "How many seconds of work has done that " + + "is in progress and hasn't been observed by work_duration. Large " + + "values indicate stuck threads. One can deduce the number of stuck " + + "threads by observing the rate at which this increases.", + ConstLabels: prometheus.Labels{"name": queue}, + }) + registerWorkqueueMetric(m, name, queue) + return m +} + +func (workqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(queue string) workqueue.SettableGaugeMetric { + const name = "workqueue_longest_running_processor_seconds" + m := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: name, + Help: "How many seconds has the longest running " + + "processor for workqueue been running.", + ConstLabels: prometheus.Labels{"name": queue}, + }) + registerWorkqueueMetric(m, name, queue) + return m +} + +func (workqueueMetricsProvider) NewRetriesMetric(queue string) workqueue.CounterMetric { + const name = "workqueue_retries_total" + m := prometheus.NewCounter(prometheus.CounterOpts{ + Name: name, + Help: "Total number of retries handled by workqueue", + ConstLabels: prometheus.Labels{"name": queue}, + }) + registerWorkqueueMetric(m, name, queue) + return m +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go index 9542d4a1c..b5e8d014d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go @@ -40,6 +40,7 @@ type Predicate interface { var _ Predicate = Funcs{} var _ Predicate = ResourceVersionChangedPredicate{} +var _ Predicate = GenerationChangedPredicate{} // Funcs is a function that implements Predicate. type Funcs struct { @@ -116,3 +117,47 @@ func (ResourceVersionChangedPredicate) Update(e event.UpdateEvent) bool { } return true } + +// GenerationChangedPredicate implements a default update predicate function on Generation change. +// +// This predicate will skip update events that have no change in the object's metadata.generation field. +// The metadata.generation field of an object is incremented by the API server when writes are made to the spec field of an object. +// This allows a controller to ignore update events where the spec is unchanged, and only the metadata and/or status fields are changed. +// +// For CustomResource objects the Generation is only incremented when the status subresource is enabled. +// +// Caveats: +// +// * The assumption that the Generation is incremented only on writing to the spec does not hold for all APIs. +// E.g For Deployment objects the Generation is also incremented on writes to the metadata.annotations field. +// For object types other than CustomResources be sure to verify which fields will trigger a Generation increment when they are written to. +// +// * With this predicate, any update events with writes only to the status field will not be reconciled. +// So in the event that the status block is overwritten or wiped by someone else the controller will not self-correct to restore the correct status. +type GenerationChangedPredicate struct { + Funcs +} + +// Update implements default UpdateEvent filter for validating generation change +func (GenerationChangedPredicate) Update(e event.UpdateEvent) bool { + if e.MetaOld == nil { + log.Error(nil, "Update event has no old metadata", "event", e) + return false + } + if e.ObjectOld == nil { + log.Error(nil, "Update event has no old runtime object to update", "event", e) + return false + } + if e.ObjectNew == nil { + log.Error(nil, "Update event has no new runtime object for update", "event", e) + return false + } + if e.MetaNew == nil { + log.Error(nil, "Update event has no new metadata", "event", e) + return false + } + if e.MetaNew.GetGeneration() == e.MetaOld.GetGeneration() { + return false + } + return true +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go index e3257a107..9583b5e9a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go @@ -17,6 +17,8 @@ limitations under the License. package admission import ( + "fmt" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -36,11 +38,17 @@ func NewDecoder(scheme *runtime.Scheme) (*Decoder, error) { // Decode decodes the inlined object in the AdmissionRequest into the passed-in runtime.Object. // If you want decode the OldObject in the AdmissionRequest, use DecodeRaw. +// It errors out if req.Object.Raw is empty i.e. containing 0 raw bytes. func (d *Decoder) Decode(req Request, into runtime.Object) error { + // we error out if rawObj is an empty object. + if len(req.Object.Raw) == 0 { + return fmt.Errorf("there is no content to decode") + } return d.DecodeRaw(req.Object, into) } // DecodeRaw decodes a RawExtension object into the passed-in runtime.Object. +// It errors out if rawObj is empty i.e. containing 0 raw bytes. func (d *Decoder) DecodeRaw(rawObj runtime.RawExtension, into runtime.Object) error { // NB(directxman12): there's a bug/weird interaction between decoders and // the API server where the API server doesn't send a GVK on the embedded @@ -49,6 +57,11 @@ func (d *Decoder) DecodeRaw(rawObj runtime.RawExtension, into runtime.Object) er // and call unstructured's special Unmarshal implementation, which calls // back into that same decoder :-/ // See kubernetes/kubernetes#74373. + + // we error out if rawObj is an empty object. + if len(rawObj.Raw) == 0 { + return fmt.Errorf("there is no content to decode") + } if unstructuredInto, isUnstructured := into.(*unstructured.Unstructured); isUnstructured { // unmarshal into unstructured's underlying object to avoid calling the decoder if err := json.Unmarshal(rawObj.Raw, &unstructuredInto.Object); err != nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go index fa60302ad..4b02e2170 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go @@ -17,7 +17,6 @@ limitations under the License. package admission import ( - "context" "encoding/json" "errors" "fmt" @@ -82,9 +81,10 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { wh.writeResponse(w, reviewResponse) return } + wh.log.V(1).Info("received request", "UID", req.UID, "kind", req.Kind, "resource", req.Resource) // TODO: add panic-recovery for Handle - reviewResponse = wh.Handle(context.Background(), req) + reviewResponse = wh.Handle(r.Context(), req) wh.writeResponse(w, reviewResponse) } @@ -97,5 +97,8 @@ func (wh *Webhook) writeResponse(w io.Writer, response Response) { if err != nil { wh.log.Error(err, "unable to encode the response") wh.writeResponse(w, Errored(http.StatusInternalServerError, err)) + } else { + res := responseAdmissionReview.Response + wh.log.V(1).Info("wrote response", "UID", res.UID, "allowed", res.Allowed, "result", res.Result) } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go index a65be69f6..2ffdb57b0 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go @@ -22,7 +22,7 @@ import ( "fmt" "net/http" - "github.com/appscode/jsonpatch" + "gomodules.xyz/jsonpatch/v2" admissionv1beta1 "k8s.io/api/admission/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/runtime/inject" diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go index bb625ed0d..da8e33678 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go @@ -19,7 +19,7 @@ package admission import ( "net/http" - "github.com/appscode/jsonpatch" + "gomodules.xyz/jsonpatch/v2" admissionv1beta1 "k8s.io/api/admission/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go index 282bb9b47..6defcfe92 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go @@ -29,6 +29,7 @@ type Validator interface { runtime.Object ValidateCreate() error ValidateUpdate(old runtime.Object) error + ValidateDelete() error } // ValidatingWebhookFor creates a new Webhook for validating the provided type. @@ -89,5 +90,19 @@ func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { } } + if req.Operation == v1beta1.Delete { + // In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346 + // OldObject contains the object being deleted + err := h.decoder.DecodeRaw(req.OldObject, obj) + if err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = obj.ValidateDelete() + if err != nil { + return Denied(err.Error()) + } + } + return Allowed("") } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go index 2f6cd9a88..8f3430efb 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go @@ -21,8 +21,8 @@ import ( "errors" "net/http" - "github.com/appscode/jsonpatch" "github.com/go-logr/logr" + "gomodules.xyz/jsonpatch/v2" admissionv1beta1 "k8s.io/api/admission/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -92,6 +92,10 @@ func (r *Response) Complete(req Request) error { // Handler can handle an AdmissionRequest. type Handler interface { + // Handle yields a response to an AdmissionRequest. + // + // The supplied context is extracted from the received http.Request, allowing wrapping + // http.Handlers to inject values into and control cancelation of downstream request processing. Handle(context.Context, Request) Response } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go index 27ed60ad1..276784efb 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go @@ -17,7 +17,7 @@ limitations under the License. package webhook import ( - "github.com/appscode/jsonpatch" + "gomodules.xyz/jsonpatch/v2" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go index 7a74a1489..05760cbbc 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go @@ -174,23 +174,27 @@ func (wh *Webhook) convertViaHub(src, dst conversion.Convertible) error { // getHub returns an instance of the Hub for passed-in object's group/kind. func (wh *Webhook) getHub(obj runtime.Object) (conversion.Hub, error) { - gvks, _, err := wh.scheme.ObjectKinds(obj) + gvks, err := objectGVKs(wh.scheme, obj) if err != nil { - return nil, fmt.Errorf("error retriving object kinds for given object : %v", err) + return nil, err + } + if len(gvks) == 0 { + return nil, fmt.Errorf("error retrieving gvks for object : %v", obj) } var hub conversion.Hub - var isHub, hubFoundAlready bool + var hubFoundAlready bool for _, gvk := range gvks { instance, err := wh.scheme.New(gvk) if err != nil { return nil, fmt.Errorf("failed to allocate an instance for gvk %v %v", gvk, err) } - if hub, isHub = instance.(conversion.Hub); isHub { + if val, isHub := instance.(conversion.Hub); isHub { if hubFoundAlready { return nil, fmt.Errorf("multiple hub version defined for %T", obj) } hubFoundAlready = true + hub = val } } return hub, nil @@ -216,21 +220,24 @@ func (wh *Webhook) allocateDstObject(apiVersion, kind string) (runtime.Object, e return obj, nil } -// CheckConvertibility determines if given type is convertible or not. For a type +// IsConvertible determines if given type is convertible or not. For a type // to be convertible, the group-kind needs to have a Hub type defined and all // non-hub types must be able to convert to/from Hub. -func CheckConvertibility(scheme *runtime.Scheme, obj runtime.Object) error { +func IsConvertible(scheme *runtime.Scheme, obj runtime.Object) (bool, error) { var hubs, spokes, nonSpokes []runtime.Object - gvks, _, err := scheme.ObjectKinds(obj) + gvks, err := objectGVKs(scheme, obj) if err != nil { - return fmt.Errorf("error retriving object kinds for given object : %v", err) + return false, err + } + if len(gvks) == 0 { + return false, fmt.Errorf("error retrieving gvks for object : %v", obj) } for _, gvk := range gvks { instance, err := scheme.New(gvk) if err != nil { - return fmt.Errorf("failed to allocate an instance for gvk %v %v", gvk, err) + return false, fmt.Errorf("failed to allocate an instance for gvk %v %v", gvk, err) } if isHub(instance) { @@ -247,13 +254,13 @@ func CheckConvertibility(scheme *runtime.Scheme, obj runtime.Object) error { } if len(gvks) == 1 { - return nil // single version + return false, nil // single version } if len(hubs) == 0 && len(spokes) == 0 { // multiple version detected with no conversion implementation. This is // true for multi-version built-in types. - return nil + return false, nil } if len(hubs) == 1 && len(nonSpokes) == 0 { // convertible @@ -261,18 +268,40 @@ func CheckConvertibility(scheme *runtime.Scheme, obj runtime.Object) error { for _, sp := range spokes { spokeVersions = append(spokeVersions, sp.GetObjectKind().GroupVersionKind().String()) } - log.V(1).Info("conversion enabled for kind", "kind", - gvks[0].GroupKind(), "hub", hubs[0], "spokes", spokeVersions) - return nil + return true, nil } - return PartialImplementationError{ + return false, PartialImplementationError{ hubs: hubs, nonSpokes: nonSpokes, spokes: spokes, } } +// objectGVKs returns all (Group,Version,Kind) for the Group/Kind of given object. +func objectGVKs(scheme *runtime.Scheme, obj runtime.Object) ([]schema.GroupVersionKind, error) { + // NB: we should not use `obj.GetObjectKind().GroupVersionKind()` to get the + // GVK here, since it is parsed from apiVersion and kind fields and it may + // return empty GVK if obj is an uninitialized object. + objGVKs, _, err := scheme.ObjectKinds(obj) + if err != nil { + return nil, err + } + if len(objGVKs) != 1 { + return nil, fmt.Errorf("expect to get only one GVK for %v", obj) + } + objGVK := objGVKs[0] + knownTypes := scheme.AllKnownTypes() + + var gvks []schema.GroupVersionKind + for gvk := range knownTypes { + if objGVK.GroupKind() == gvk.GroupKind() { + gvks = append(gvks, gvk) + } + } + return gvks, nil +} + // PartialImplementationError represents an error due to partial conversion // implementation such as hub without spokes, multiple hubs or spokes without hub. type PartialImplementationError struct { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher/certwatcher.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher/certwatcher.go index 4a20319b5..fbe757cc7 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher/certwatcher.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher/certwatcher.go @@ -124,7 +124,7 @@ func (cw *CertWatcher) ReadCertificate() error { cw.currentCert = &cert cw.Unlock() - log.Info("Updated current TLS certiface") + log.Info("Updated current TLS certificate") return nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go index 68a8e7dd6..2dcddded4 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go @@ -22,7 +22,7 @@ import ( "fmt" "net" "net/http" - "path" + "os" "path/filepath" "strconv" "sync" @@ -85,7 +85,7 @@ func (s *Server) setDefaults() { } if len(s.CertDir) == 0 { - s.CertDir = path.Join("/tmp", "k8s-webhook-server", "serving-certs") + s.CertDir = filepath.Join(os.TempDir(), "k8s-webhook-server", "serving-certs") } } @@ -106,6 +106,7 @@ func (s *Server) Register(path string, hook http.Handler) { // TODO(directxman12): call setfields if we've already started the server s.webhooks[path] = hook s.WebhookMux.Handle(path, instrumentedHook(path, hook)) + log.Info("registering webhook", "path", path) } // instrumentedHook adds some instrumentation on top of the given webhook. @@ -125,6 +126,8 @@ func (s *Server) Start(stop <-chan struct{}) error { s.defaultingOnce.Do(s.setDefaults) baseHookLog := log.WithName("webhooks") + baseHookLog.Info("starting webhook server") + // inject fields here as opposed to in Register so that we're certain to have our setFields // function available. for hookPath, webhook := range s.webhooks { @@ -164,6 +167,8 @@ func (s *Server) Start(stop <-chan struct{}) error { return err } + log.Info("serving webhook server", "host", s.Host, "port", s.Port) + srv := &http.Server{ Handler: s.WebhookMux, } @@ -171,6 +176,7 @@ func (s *Server) Start(stop <-chan struct{}) error { idleConnsClosed := make(chan struct{}) go func() { <-stop + log.Info("shutting down webhook server") // TODO: use a context with reasonable timeout if err := srv.Shutdown(context.Background()); err != nil { diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/apiserver.go b/vendor/sigs.k8s.io/testing_frameworks/integration/apiserver.go index 4f1e28267..dc912ffc0 100644 --- a/vendor/sigs.k8s.io/testing_frameworks/integration/apiserver.go +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/apiserver.go @@ -71,6 +71,15 @@ type APIServer struct { // Start starts the apiserver, waits for it to come up, and returns an error, // if occurred. func (s *APIServer) Start() error { + if s.processState == nil { + if err := s.setProcessState(); err != nil { + return err + } + } + return s.processState.Start(s.Out, s.Err) +} + +func (s *APIServer) setProcessState() error { if s.EtcdURL == nil { return fmt.Errorf("expected EtcdURL to be configured") } @@ -110,11 +119,7 @@ func (s *APIServer) Start() error { s.processState.Args, err = internal.RenderTemplates( internal.DoAPIServerArgDefaulting(s.Args), s, ) - if err != nil { - return err - } - - return s.processState.Start(s.Out, s.Err) + return err } // Stop stops this process gracefully, waits for its termination, and cleans up diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/etcd.go b/vendor/sigs.k8s.io/testing_frameworks/integration/etcd.go index 43cb314d7..ded0c0fb1 100644 --- a/vendor/sigs.k8s.io/testing_frameworks/integration/etcd.go +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/etcd.go @@ -61,6 +61,15 @@ type Etcd struct { // Start starts the etcd, waits for it to come up, and returns an error, if one // occoured. func (e *Etcd) Start() error { + if e.processState == nil { + if err := e.setProcessState(); err != nil { + return err + } + } + return e.processState.Start(e.Out, e.Err) +} + +func (e *Etcd) setProcessState() error { var err error e.processState = &internal.ProcessState{} @@ -88,11 +97,7 @@ func (e *Etcd) Start() error { e.processState.Args, err = internal.RenderTemplates( internal.DoEtcdArgDefaulting(e.Args), e, ) - if err != nil { - return err - } - - return e.processState.Start(e.Out, e.Err) + return err } // Stop stops this process gracefully, waits for its termination, and cleans up diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/etcd.go b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/etcd.go index 4c948cfd1..1fb093ade 100644 --- a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/etcd.go +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/etcd.go @@ -1,6 +1,8 @@ package internal -import "net/url" +import ( + "net/url" +) var EtcdDefaultArgs = []string{ "--listen-peer-urls=http://localhost:0", @@ -28,9 +30,9 @@ func isSecureScheme(scheme string) bool { func GetEtcdStartMessage(listenUrl url.URL) string { if isSecureScheme(listenUrl.Scheme) { // https://github.com/coreos/etcd/blob/a7f1fbe00ec216fcb3a1919397a103b41dca8413/embed/serve.go#L167 - return "serving client requests on " + listenUrl.Hostname() + return "serving client requests on " } // https://github.com/coreos/etcd/blob/a7f1fbe00ec216fcb3a1919397a103b41dca8413/embed/serve.go#L124 - return "serving insecure client requests on " + listenUrl.Hostname() + return "serving insecure client requests on " } diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/process.go b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/process.go index d7a088566..f6817976f 100644 --- a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/process.go +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/process.go @@ -4,11 +4,13 @@ import ( "fmt" "io" "io/ioutil" + "net" "net/http" "net/url" "os" "os/exec" "path" + "strconv" "time" "github.com/onsi/gomega/gbytes" @@ -38,6 +40,10 @@ type ProcessState struct { // Deprecated: Use HealthCheckEndpoint in favour of StartMessage StartMessage string Args []string + + // ready holds wether the process is currently in ready state (hit the ready condition) or not. + // It will be set to true on a successful `Start()` and set to false on a successful `Stop()` + ready bool } type DefaultedProcessInput struct { @@ -71,7 +77,7 @@ func DoDefaulting( } defaults.URL = url.URL{ Scheme: "http", - Host: fmt.Sprintf("%s:%d", host, port), + Host: net.JoinHostPort(host, strconv.Itoa(port)), } } else { defaults.URL = *listenUrl @@ -107,6 +113,10 @@ func DoDefaulting( type stopChannel chan struct{} func (ps *ProcessState) Start(stdout, stderr io.Writer) (err error) { + if ps.ready { + return nil + } + command := exec.Command(ps.Path, ps.Args...) ready := make(chan bool) @@ -131,6 +141,7 @@ func (ps *ProcessState) Start(stdout, stderr io.Writer) (err error) { select { case <-ready: + ps.ready = true return nil case <-timedOut: if pollerStopCh != nil { @@ -194,7 +205,7 @@ func (ps *ProcessState) Stop() error { case <-timedOut: return fmt.Errorf("timeout waiting for process %s to stop", path.Base(ps.Path)) } - + ps.ready = false if ps.DirNeedsCleaning { return os.RemoveAll(ps.Dir) }